#coding:utf-8

import urllib2
import os
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from bs4 import BeautifulSoup

heads = {}
heads['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'

request=urllib2.Request("http://www.kugou.com" ,headers=heads)#创建对酷狗官网get请求
result=urllib2.urlopen(request)#发出请求

soup=BeautifulSoup(result.read(),'html.parser')#生成可分析对象
for i in soup.find_all("div"):#遍历所有div标签
    if i.get("id")=="SongtabContent":#判断id为SongtabContent的div标签
        s=i.find_all("li")#把所有li标签内容赋值给s变量

with open(u"C://downloads//lw//a.txt","w") as f:#创建要写入文件对象
    for i in s:#遍历所有li标签对象
        f.write(u"歌曲名称为: %s    " % i.a.select(".songName")[0].text)#获取class为songName的值
        f.write(u"歌曲播放连接为: %s    " % i.a.get("href"
)) #获取标签为href的值
        f.write(u"歌曲播放时间为: %s" % i.a.select(".songTime")[0].text) #获取class为songTime的值
        f.write(os.linesep)

def shoufu():

    import requests
    import re

    resq = requests.get("http://www.sohu.com")#请求搜狐网站
    print resq.text[:100]#打印响应结果前一百行
    links = re.findall(r'href="(.*?)"', resq.text)#查找所有包含href内容
    print len(links)
    valid_link = []#保存有效连接
    invalid_link = []#保存无效连接

    for link in links:
        if re.search(r"(\.jpg)|(\.jpeg)|(\.gif)|(\.ico)|(\.png)|(\.js)|(\.css)$", link.strip()):#资源连接筛选出来
            print 6, link
            invalid_link.append(link.strip())
            continue#进入此判断之后执行完直接执行下一次循环
        elif link.strip() == "" or link.strip() == "#" or link.strip() == "/":#无效内容筛选去除
            # print 1,link
            invalid_link.append(link)
            continue
        elif link.strip().startswith("//"):#把有效相对连接筛选保存
            # print 2,link
            valid_link.append("http:" + link.strip())
            continue
        elif link.strip().count("javascript") >= 1 or link.strip().count("mailto:") >= 1:#引用js连接及邮箱超级连接去除
            # print 3,link
            invalid_link.append(link.strip())
            continue
        elif re.match(r"/\w+", link):#把剩下所有内容连接再做进一步筛选
            # print 5,link
            if re.match(r"http://.*?/", resq.url.strip()):#http开头连接筛选
                valid_link.append(re.match(r"http://.*?/", resq.url.strip()).group() + link.strip())#把连接以/结尾内容保存
            else:
                valid_link.append(re.match(r"http://.*", resq.url.strip()).group() + link.strip())#把连接以内容结尾保存
            continue
        else:
            # print 7,link
            valid_link.append(link.strip())#筛选剩下的内容都保存到有效列表中

    # for link in valid_link[:100]:
    #    print link
    print len(valid_link)

    # for link in invalid_link:
    #    print link
    print len(invalid_link)

    file_num = 1#为创建文件准备
    for link in list(set(valid_link)):
        # print link
        resq = requests.get(link, verify=True)#允许证书校验并访问所有保存的有效连接
        if u"篮球" in resq.text:#筛选网页内容中是否存在“篮球”内容
            print link
            if u'meta charset="utf-8"' in resq.text:#判断网页是否以utf-8编码
                with open(r"c:\\downloads\\lw\\" + str(file_num) + ".html", "w") as fp:
                    fp.write(resq.text.strip().encode("utf-8"))#编码内容为utf-8后保存到指定目录
            else:
                with open(r"c:\\downloads\\lw\\" + str(file_num) + ".html", "w") as fp:
                    fp.write(resq.text.strip().encode("gbk"))#编码内容为gbk后保存到指定目录
            file_num += 1

    print "Done!"

python获取网站信息_第1张图片