python爬虫学习(刷博客访问量)

用python爬虫学习

本人只是第一次接触,贴一篇参考博客
学习链接爬虫使用
此文会不断更新

一.使用参考博客中的代码刷访问量后发现若干问题:

1.首先是这个工具没有我想的高效这是源码,

import urllib2
from lxml import etree
import random
import time
import json,requests
 
class CsdnSpider():
    USER_AGENTS = [
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60',
        'Opera/8.0 (Windows NT 5.1; U; en)',
        'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50',
        'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
        'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
        'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
        'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)',
        'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 UBrowser/4.0.3214.0 Safari/537.36'
    ]
    url_list = [
        "https://blog.csdn.net/qq_41782425/article/details/84934224",
        "https://blog.csdn.net/qq_41782425/article/category/8519763",
        "https://blog.csdn.net/qq_41782425/article/details/84993073",
        "https://me.csdn.net/qq_41782425",
        "https://me.csdn.net/download/qq_41782425",
        "https://me.csdn.net/bbs/qq_41782425"
    ]
    def __init__(self):
        self.page = 0
        self.proxy = []
    def get_proxy(self):
        self.page+=1
        headers = {"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"}
        request = urllib2.Request("https://www.kuaidaili.com/free/inha/"+str(self.page), headers=headers)
        html = urllib2.urlopen(request).read()
        # print html
        content = etree.HTML(html)
        ip = content.xpath('//td[@data-title="IP"]/text()')
        port = content.xpath('//td[@data-title="PORT"]/text()')
        # 将对应的ip和port进行拼接
        for i in range(len(ip)):
            for p in range(len(port)):
                if i == p:
                    if ip[i] + ':' + port[p] not in self.proxy:
                        self.proxy.append(ip[i] + ':' + port[p])
        # print self.proxy
        if self.proxy:
            print "现在使用的是第" + str(self.page) + "页代理IP"
            self.spider()
 
    def spider(self):
        num = 0 # 用于访问计数
        err_num = 0 #用于异常错误计数
        while True:
            # 从列表中随机选择UA和代理
            user_agent = random.choice(self.USER_AGENTS)
            proxy = random.choice(self.proxy)
 
            # proxy = json.loads('{"http":'+'"'+proxy+'"}')
            # print proxy
            # print type(proxy)
            referer = random.choice(self.url_list) #随机选择访问url地址
            headers = {
                # "Host": "blog.csdn.net",
                "Connection": "keep-alive",
                "Cache-Control": "max-age=0",
                "Upgrade-Insecure-Requests": "1",
                # "User-Agent": user_agent,
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
                # "Referer": "https://blog.csdn.net/qq_41782425/article/details/84934224",
                # "Accept-Encoding": "gzip, deflate, br",
                "Accept-Language": "zh-CN,zh;q=0.9",
                "Cookie": "your cookie"
            }
            try:
                # 构建一个Handler处理器对象,参数是一个字典类型,包括代理类型和代理服务器IP+PROT
                httpproxy_handler = urllib2.ProxyHandler({"http": proxy})
                opener = urllib2.build_opener(httpproxy_handler)
                urllib2.install_opener(opener)
                request = urllib2.Request(referer,headers=headers)
                request.add_header("User-Agent", user_agent)
                response = urllib2.urlopen(request)
                html = response.read()
                # 利用etree.HTML,将字符串解析为HTML文档
                content = etree.HTML(html)
                # 使用xpath匹配阅读量
                read_num = content.xpath('//span[@class="read-count"]/text()')
                # 将列表转为字符串
                new_read_num = ''.join(read_num)
                # 通过xpath匹配的页面为blog.csdn.net/qq_41782425/article/details/84934224所以匹配其他页面返回的为空
                if len(new_read_num) != 0:
                    print new_read_num
 
                num += 1
                print '第' + str(num) + '次访问'
                print response.url + " 代理ip: " + str(proxy)
                # print request.headers
                time.sleep(1)
                # 当访问数量达到100时,退出循环,并调用get_proxy方法获取第二页的代理
                if num > 100:
                    break
            except Exception as result:
                err_num+=1
                print "错误信息(%d):%s"%(err_num,result)
                # 当错误信息大于等于30时,初始化代理页面page,重新从第一页开始获取代理ip,并退出循环
                if err_num >=30:
                    self.__init__()
                    break
            # url = "https://blog.csdn.net/qq_41782425/article/details/84934224"
            # try:
            #     response = requests.get(url,headers=headers,proxies={"http":""})
            #     num += 1
            #     print '第' + str(num) + '次访问'
            #     print response.url
            # except Exception as result:
            #     err_num+=1
            #     print "错误信息(%d):%s"%(err_num,result)
            #     if err_num >=30:
            #         break
        # 当退出循环时,看就会执行get_proxy获取代理的方法
        print "正在重新获取代理IP"
        self.get_proxy()
 
 
if __name__ == "__main__":
    CsdnSpider().get_proxy()
 
 
 
 
 

2.这是它的运行效果图

D:\PycharmProjects\Web_Crawler\venv\Scripts\python.exe D:/PycharmProjects/Web_Crawler/practice/csdnSpider.py
现在使用的是第1页代理IP
第1次访问
https://blog.csdn.net/qq_41782425/article/category/8519763 代理ip: 123.162.168.192:80882次访问
https://me.csdn.net/bbs/qq_41782425 代理ip: 60.182.22.244:81183次访问
https://me.csdn.net/qq_41782425 代理ip: 222.186.45.144:90004次访问
https://me.csdn.net/qq_41782425 代理ip: 27.24.215.49:376445次访问
https://me.csdn.net/qq_41782425 代理ip: 101.76.209.69:90006次访问
https://me.csdn.net/bbs/qq_41782425 代理ip: 175.11.194.73:807次访问
https://me.csdn.net/download/qq_41782425 代理ip: 111.230.254.195:478918次访问
https://blog.csdn.net/qq_41782425/article/category/8519763 代理ip: 123.162.168.192:532819次访问
https://me.csdn.net/bbs/qq_41782425 代理ip: 180.118.134.103:8118
阅读数:41910次访问
https://blog.csdn.net/qq_41782425/article/details/84934224 代理ip: 111.198.77.169:4789111次访问
https://me.csdn.net/qq_41782425 代理ip: 27.24.215.49:8118
阅读数:41912次访问
https://blog.csdn.net/qq_41782425/article/details/84934224 代理ip: 221.224.136.211:900013次访问
https://me.csdn.net/qq_41782425 代理ip: 221.224.136.211:811814次访问
https://me.csdn.net/download/qq_41782425 代理ip: 222.171.251.43:35101
阅读数:41915次访问
https://blog.csdn.net/qq_41782425/article/details/84934224 代理ip: 111.230.254.195:8118
阅读数:41916次访问
https://blog.csdn.net/qq_41782425/article/details/84934224 代理ip: 180.118.86.75:108017次访问
https://me.csdn.net/qq_41782425 代理ip: 101.76.209.69:4789118次访问
https://me.csdn.net/bbs/qq_41782425 代理ip: 222.186.45.144:3764419次访问
https://me.csdn.net/qq_41782425 代理ip: 123.162.168.192:4216420次访问
https://blog.csdn.net/qq_41782425/article/category/8519763 代理ip: 180.118.134.103:47891

当我把它的访问url替换成我的两个url后本机运行后一段时间后发现,访问次数一直在增加,但是阅读数增加的极其的慢两个url大概一分钟阅读数总的增加两次。甚至最后运行的会出现访问到的是https://passport.csdn.net/login
弄的我不是很明白,瞎改一统它的代码后结果,更糟了。
改后的代码

 
import urllib2
from lxml import etree
import random
import time
import json,requests
 
class CsdnSpider():
    USER_AGENTS = [
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60',
        'Opera/8.0 (Windows NT 5.1; U; en)',
        'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50',
        'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
        'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
        'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
        'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)',
        'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 UBrowser/4.0.3214.0 Safari/537.36'
    ]
    url_list = [
        "https://blog.csdn.net/One_Ok_Clock/article/details/88754136",
        "https://blog.csdn.net/One_Ok_Clock/article/details/88778799",
    ]
    def __init__(self):
        self.page = 11
        self.proxy = []
    def get_proxy(self):
        self.page+=1
        headers = {"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"}
        request = urllib2.Request("https://www.kuaidaili.com/free/inha/"+str(self.page), headers=headers)
        html = urllib2.urlopen(request).read()
        # print html
        content = etree.HTML(html)
        ip = content.xpath('//td[@data-title="IP"]/text()')
        port = content.xpath('//td[@data-title="PORT"]/text()')
        # 将对应的ip和port进行拼接
        for i in range(len(ip)):
            for p in range(len(port)):
                if i == p:
                    if ip[i] + ':' + port[p] not in self.proxy:
                        self.proxy.append(ip[i] + ':' + port[p])
        # print self.proxy
        if self.proxy:
            print "现在使用的是第" + str(self.page) + "页代理IP"
            self.spider()
 
    def spider(self):
        num = 0 # 用于访问计数
        err_num = 0 #用于异常错误计数
        while True:
            # 从列表中随机选择UA和代理
            user_agent = random.choice(self.USER_AGENTS)
            proxy = random.choice(self.proxy)
 
            # proxy = json.loads('{"http":'+'"'+proxy+'"}')
            # print proxy
            # print type(proxy)
            referer = random.choice(self.url_list) #随机选择访问url地址
            headers = {
                # "Host": "blog.csdn.net",
                "Connection": "keep-alive",
                "Cache-Control": "max-age=0",
                "Upgrade-Insecure-Requests": "1",
                # "User-Agent": user_agent,
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
                # "Referer": "https://blog.csdn.net/qq_41782425/article/details/84934224",
                # "Accept-Encoding": "gzip, deflate, br",
                "Accept-Language": "zh-CN,zh;q=0.9",
                "Cookie": "your cookie"
            }
            try:
                # 构建一个Handler处理器对象,参数是一个字典类型,包括代理类型和代理服务器IP+PROT
                httpproxy_handler = urllib2.ProxyHandler({"http": proxy})
                opener = urllib2.build_opener(httpproxy_handler)
                urllib2.install_opener(opener)
                request = urllib2.Request(referer,headers=headers)
                request.add_header("User-Agent", user_agent)
                response = urllib2.urlopen(request,timeout=1)
                html = response.read()
                # 利用etree.HTML,将字符串解析为HTML文档
                content = etree.HTML(html)
                # 使用xpath匹配阅读量
                read_num = content.xpath('//span[@class="read-count"]/text()')
                # 将列表转为字符串
                new_read_num = ''.join(read_num)
                # 通过xpath匹配的页面为blog.csdn.net/qq_41782425/article/details/84934224所以匹配其他页面返回的为空
                if len(new_read_num) != 0:
                    print new_read_num
 
                num += 1
                print '第' + str(num) + '次访问'
                print response.url + " 代理ip: " + str(proxy)
                # print request.headers
                time.sleep(0.1)
                # 当访问数量达到100时,退出循环,并调用get_proxy方法获取第二页的代理
                if num > 100:
                    break
            except Exception as result:
                err_num+=1
                print "错误信息(%d):%s"%(err_num,result)
                # 当错误信息大于等于30时,初始化代理页面page,重新从第一页开始获取代理ip,并退出循环
                if err_num >=30:
                    self.__init__()
                    break
            # url = "https://blog.csdn.net/qq_41782425/article/details/84934224"
            # try:
            #     response = requests.get(url,headers=headers,proxies={"http":""})
            #     num += 1
            #     print '第' + str(num) + '次访问'
            #     print response.url
            # except Exception as result:
            #     err_num+=1
            #     print "错误信息(%d):%s"%(err_num,result)
            #     if err_num >=30:
            #         break
        # 当退出循环时,看就会执行get_proxy获取代理的方法
        print "正在重新获取代理IP"
        self.get_proxy()
 
 
if __name__ == "__main__":
    CsdnSpider().get_proxy()


主要改的地方是从代理ip网站的第12页后获取代理ip,同时在连接的时候加了一个超时处理设置timeout=1秒
结果如下:
python爬虫学习(刷博客访问量)_第1张图片
连接成功时依旧会出现访问到的是https://passport.csdn.net/login
于是放弃这种方法。
待续。。。

你可能感兴趣的:(爬虫,python)