【爬虫小程序:爬取斗鱼所有房间信息】Xpath(多线程版)

 

 

 
    
# 本程序亲测有效,用于理解爬虫相关的基础知识,不足之处希望大家批评指正
from queue import Queue
import requests
from lxml import etree
from threading import Thread

"""爬取目标:http://www.qiushibaike.com/8hr/page/1
    用多线程实现
"""


class QiuShi:

    def __init__(self):

        # url和headers
        self.base_url = 'http://www.qiushibaike.com/8hr/page/{}'
        self.headers = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'

        # 定义队列,用来传递数据
        self.url_queue = Queue()
        self.request_queue = Queue()
        self.html_queue = Queue()

    def get_url_list(self):
        """获取所有的url"""
        for i in range(1, 14):
            target_url = self.base_url.format(i)
            print(target_url)
            self.url_queue.put(target_url)

    def request_url(self):
        """向url发起请求"""
        while True:
            target_url = self.url_queue.get()
            response = requests.get(target_url, self.headers)
            self.request_queue.put(response)
            self.url_queue.task_done()

    def get_content(self):
        """获取数据"""
        while True:
            html_text = self.request_queue.get().content.decode()
            html = etree.HTML(html_text)
            div_list = html.xpath('//div[@id="content-left"]/div')
            content_list = []
            for div in div_list:
                item = {}
                item['author'] = div.xpath('.//h2/text()')[0].strip()
                item['content'] = div.xpath('.//span/text()')[0].strip()
                print(item)
                content_list.append(item)
            self.html_queue.put(content_list)
            self.request_queue.task_done()

    def save_data(self):
        """保存入库"""
        while True:
            data_list = self.html_queue.get()
            for data in data_list:
                with open('qiushi.text', 'a+') as f:
                    f.write(str(data))
                    f.write('\r\n')
            self.html_queue.task_done()

    def main(self):
        """主程序逻辑"""
        # 定义一个线程收集器,用于收集线程
        thread_list = []
        # 1.获取url
        self.get_url_list()
        # 2.请求url
        t_request_url = Thread(target=self.request_url)
        thread_list.append(t_request_url)
        # 3.获取数据任务比较重,用四个线程去跑
        # for worker in range(4):
        t_get_content = Thread(target=self.get_content)
        thread_list.append(t_get_content)
        # 4.保存入库
        t_save_data = Thread(target=self.save_data)
        thread_list.append(t_save_data)

        # 将收集器中的线程全部跑起来
        for s in thread_list:
            s.setDaemon(True)  # 设置子线程为守护线程
            s.start()  # 开启线程

        # 当所有队列中的任务完成了,回收线程
        for i in [self.url_queue,self.request_queue,self.html_queue]:
            i.join()
        print("主线程结束")


if __name__ == '__main__':
    qiushi = QiuShi()
    qiushi.main()

 

转载于:https://www.cnblogs.com/888888CN/p/10070250.html

你可能感兴趣的:(爬虫)