python3的url采集的多线程

# coding:utf-8

import requests
from lxml import etree
from queue import Queue
from urllib import parse
import threading
import re
import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')

class Producer(threading.Thread):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
    }

    def __init__(self, page_queue, txt_queue, *args, **kwargs):
        super(Producer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.txt_queue = txt_queue

    def run(self):
        while True:
            if self.page_queue.empty():
                break
            url = self.page_queue.get()
            self.url_search(url)

    def url_search(self, url):
        resp = requests.get(url, headers=self.headers)
        text = resp.content.decode('utf-8')
        html = etree.HTML(text)
        aElements = html.xpath("//div[@class='f13']/a[1]/text()")
       # print(quto)
        #aElements = list(set(quto))
        for aElement in aElements:
            self.txt_queue.put(aElement)


class Consumer(threading.Thread):
    def __init__(self, page_queue, txt_queue, *args, **kwargs):
        super(Consumer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.txt_queue = txt_queue

    def run(self):
        with open('urls.txt', 'w') as f:
            while True:
                if self.txt_queue.empty() and self.page_queue.empty():
                    break
                #i = re.search("(http|https)://.*?(cn|com|org|)/", self.txt_queue.get().group())
                try:
                    i = re.search(".*?(cn|com|org)/", self.txt_queue.get()).group()
                    # 正则也可以这样.+?(/|)$
                except:
                    pass
                    #print("error: "+self.txt_queue.get())
                f.write(i + '\n')
        self.qc()


    def qc(self):
        print("clear_ok")
        f = open('urls.txt')
        quto = list(set(f))
        with open('urlsok.txt', 'w') as f:
            for i in quto:
                f.write(str(i))

        #print(quto)


def main():

    page_queue = Queue(1000)
    txt_queue = Queue(10000)
    keyword = input("keyword: ")
    page = int(input("page: "))
    for i in range(0, page*10, 10):
        url = "https://www.baidu.com/s?wd=" + parse.quote(keyword) + "&pn=" + str(i)
        page_queue.put(url)

    for p in range(1):
        t1 = Producer(page_queue, txt_queue)
        t1.start()

    for s in range(1):
        t2 = Consumer(page_queue, txt_queue)
        t2.start()


if __name__ == '__main__':
    main()

你可能感兴趣的:(python)