多线程requests爬取糗事百科

1.明确目标

  • 可以借助类,定义不同方法处理不同的步骤
  • 应该借助队列,将请求的url放入队列处理
  • 可以借助多个队列将请求,放回的html,抓取的内容都放在队列中,可以避免线程间的变量不安全问题,

上代码:

# -*- coding:utf-8 -*-
import requests
import time
from lxml import etree
from queue import Queue
import json
import threading


class Qiushi(object):
    def __init__(self):
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWeb\
                    Kit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
        }
        # 实例化三个队列,用来存放内容
        self.url_queue = Queue()
        self.html_queue = Queue()
        self.content_queue = Queue()


    def get_toal_url(self):
        """
        获取所有的页面url,并且返回urllist
        :return: urllist
        """
        url_temp = "https://www.qiushibaike.com/8hr/page/{}/"
        url_list = []
        for i in range(1,36):
            self.url_queue.put(url_temp.format(i))


    def parse_url(self):
        """
        一个发送请求,获取响应,同时etree处理html
        :return:
        """
        while self.url_queue.not_empty:
            time.sleep(2)
            url = self.url_queue.get()
            print("开始发起请求 url:",url)
            response = requests.get(url, headers=self.headers, timeout=10)#发起请求
            html = response.content.decode()#获取html字符串
            html = etree.HTML(html)#获取element 类型的html
            self.html_queue.put(html)
            # 这里可能队列数已经空了,但是消费者手里还有正在消费的队列
            # 发出完成的信号,不发的话,join会永远阻塞,程序不会停止
            self.url_queue.task_done()


    def get_content(self):
        """
        :param url:
        :return: 一个list,包含一个url对应页面的所有段子的的所有内容的列表
        """
        while self.html_queue.not_empty:
            html = self.html_queue.get()
            total_div = html.xpath('//div[@id="content-left"]/div')#返回divelememtn的一个列表
            print(len(total_div))
            items = []
            for i in total_div:#遍历div标枪,获取糗事百科每条的内容的全部信息
                author_name = i.xpath("./div[@class='author clearfix']/a[2]/h2/text()")[0].replace('\n','') if i.xpath("./div[@class='author clearfix']/a[2]/h2/text()") else "匿名用户"
                # print(author_name)
                if author_name != "匿名用户":
                    author_img = 'https:'+i.xpath("./div[@class='author clearfix']/a[1]/img/@src")[0]
                    author_href = "https://www.qiushibaike.com" +i.xpath("./div[@class='author clearfix']/a[2]/@href")[0]
                    author_gender = i.xpath('./div[@class="author clearfix"]//div/@class')[0].split(' ')[-1].replace("Icon", "")
                    author_age = i.xpath('./div[@class="author clearfix"]//div/text()')[0]
                else:
                    author_img = None
                    author_href = None
                    author_gender = None
                    author_age = None
                author_content = i.xpath('./a[1]/div[@class="content"]/span/text()')
                author_vote = i.xpath('./div[@class="stats"]/span[1]/i/text()')[0].replace('\n', '') if i.xpath('./div[@class="stats"]/span[1]/i/text()')[0] else 0
                author_comment = i.xpath('./div[@class="stats"]/span[2]/a/i/text()')[0].replace('\n', '')if  i.xpath('./div[@class="stats"]/span[2]/a/i/text()') else 0
                hot_comment_author = i.xpath('./a[@class="indexGodCmt"]/div/span[last()]/text()') if i.xpath('./a[@class="indexGodCmt"]/div/span[last()]/text()') else None
                if hot_comment_author != None:
                    hot_comment = i.xpath('./a[@class="indexGodCmt"]/div/div/text()')[0].replace("\n:", "").replace("\n", "")
                    hot_comment_like_num = i.xpath('./a[@class="indexGodCmt"]//div[@class="likenum"]/text()')[0].replace("\n",'')
                else:
                    hot_comment=None
                    hot_comment_like_num = None
                item = dict(
                    author_name=author_name,
                    author_img=author_img,
                    author_href=author_href,
                    author_gender=author_gender,
                    author_age=author_age,
                    content=author_content,
                    content_vote=author_vote,
                    content_comment_numbers=author_comment,
                    hot_comment=hot_comment,
                    hot_comment_author=hot_comment_author,
                    hot_comment_like_num=hot_comment_like_num,
                )
                items.append(item)
            self.content_queue.put(items)
            self.html_queue.task_done()#task_done的时候,队列计数减一



    def save_item(self):
        """
        保存items
        :params item:列表,
        :return:
        """
        while self.content_queue.not_empty:
            items = self.content_queue.get()
            f= open("qiubai.txt",'a',encoding='utf-8')
            for i in items:
                json.dump(i,f,ensure_ascii=False,indent=2)
            f.close()
            self.content_queue.task_done()



    def run(self):
        while 1:
            # 1.获取 url list
            # url_list = sel.get_total_url()
            thread_list = []
            thread_url = threading.Thread(target=self.get_toal_url)
            # thread_url.start()
            thread_list.append(thread_url)
            #发起网络请求
            for i in range(10):
                thread_parse = threading.Thread(target=self.parse_url)
                thread_list.append(thread_parse)
            #提取数据
            thread_get_content = threading.Thread(target=self.get_content)
            thread_list.append(thread_get_content)
            #保存
            thread_save = threading.Thread(target=self.save_item)
            thread_list.append(thread_save)

            for t in thread_list:
                print()
                t.setDaemon(True)  # 为每个进程设置为后台进程,效果是主进程退出子进程也会退出
                t.start()        #为了解决程序结束无法退出的问题

            self.url_queue.join()  # 让主线程等待,所有的队列为空的时候才能退出
            self.html_queue.join()
            self.content_queue.join()
            time.sleep(600)



if __name__ == "__main__":
    qiubai = Qiushi()
    qiubai.run()

你可能感兴趣的:(多线程requests爬取糗事百科)