【项目实战】利用urllib实现多线程爬取糗事百科段子

import threading
import urllib.request
import re
import ssl

#把ssl设置为未验证,
ssl._create_default_https_context = ssl._create_unverified_context

#给opener添加header,伪装成浏览器
header = ("User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:57.0) Gecko/20100101 Firefox/57.0")
opener = urllib.request.build_opener()
opener.addheaders = [header]

#将浏览器设为全局
urllib.request.install_opener(opener)

#创建线程1
class One(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        
    def run(self):
        #爬取奇数页
        for i in range(1,20,2):
            urldata = urllib.request.urlopen("https://www.qiushibaike.com/8hr/page/" + str(i)).read().decode("utf-8")

            #设置正则表达式
            pat ='
.*?(.*?).*?
' data = re.compile(pat,re.S).findall(urldata) print(len(data)) for j in range(0,len(data)): print("第" + str(i) + "页的第" + str(j) + "条糗事:\n") print(data[j]) #创建线程2 class Two(threading.Thread): def __init__(self): threading.Thread.__init__(self) def run(self): #爬取偶数页 for i in range(0,20,2): urldata = urllib.request.urlopen("https://www.qiushibaike.com/8hr/page/" + str(i)).read().decode("utf-8") pat ='
.*?(.*?).*?
' data = re.compile(pat,re.S).findall(urldata) for j in range(0,len(data)): print("第" + str(i) + "页的第" + str(j) + "条糗事:\n") print(data[j]) #分别运行线程1和线程2 th1 = One() th1.start() th2 = Two() th2.start()

你可能感兴趣的:(Python,Urllib,多线程,urllib,python)