多线程爬虫

import threading
import time
import requests
from bs4 import BeautifulSoup
import json
from queue import Queue

class CrawlThread(threading.Thread):
    def __init__(self, name, page_queue, data_queue):
        super().__init__()
        self.name = name
        self.page_queue = page_queue
        self.data_queue = data_queue
        self.url = 'http://www.fanjian.net/jianwen-{}'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
        }

    def run(self):
        print('%s线程启动......' % self.name)
        '''
        1、从页码队列中取出一个页码
        2、将url和页码进行拼接得到url
        3、发送请求,得到响应
        4、将响应内容保存到数据队列中
        '''
        while 1:
            page = self.page_queue.get()
            url = self.url.format(page)
            content = requests.get(url=url, headers=self.headers).text
            self.data_queue.put(content)
        print('%s线程结束...' % self.name)

class ParseThread(threading.Thread):
    def __init__(self, name, data_queue, fp, lock):
        super().__init__()
        self.name = name
        self.data_queue = data_queue
        self.fp = fp
        self.lock = lock

    def run(self):
        '''
        1、从数据队列中获取数据
        2、解析
        3、保存数据到文件中
        '''
        while 1:
            data = self.data_queue.get()
            self.parse(data)

    def parse(self, data):
        soup = BeautifulSoup(data, 'lxml')

        item = {

        }
        self.lock.acquire()
        self.fp.write()
        self.lock.release()

def create_queue():
    page_queue = Queue()
    data_queue = Queue()
    # 向页码队列添加待爬取的页码
    for x in range(1, 11):
        page_queue.put(x)
    return page_queue, data_queue

def main():
    # 创建锁、打开文件
    fp = open('fanjian.txt', 'w', encoding='utf8')
    lock = threading.Lock()
    # 创建两个队列
    page_queue, data_queue = create_queue()
    # 创建两个空列表,用来保存创建的线程
    t_crawl_list = []
    t_parser_list = []
    # 创建采集线程并且启动
    crawl_names_list = ['采集线程1', '采集线程2', '采集线程3']
    for crawl_name in crawl_names_list:
        t_crawl = CrawlThread(crawl_name, page_queue, data_queue)
        t_crawl_list.append(t_crawl)
        t_crawl.start()

    # 创建解析线程并且启动
    parser_names_list = ['解析线程1', '解析线程2', '解析线程3']
    for parser_name in parser_names_list:
        t_parser = ParseThread(parser_name, data_queue, fp, lock)
        t_parser_list.append(t_parser)
        t_parser.start()

    # 写循环,让主线程等待每一个线程
    for t_crawl in t_crawl_list:
        t_crawl.join()
    for t_parser in t_parser_list:
        t_parser.join()
    fp.close()
    print('主线程-子线程全部结束')

if __name__ == '__main__':
    main()

你可能感兴趣的:(多线程爬虫)