单线程爬虫已经写过了,这篇文章就对上一篇爬虫进行改造,改成多线程的,上期文章链接:https://blog.csdn.net,对比单线程,相同的任务量多线程可以从107秒降到8秒左右(主要指获取图片链接并写到文件中)
对于多线程爬虫,常见有2种写法,一种是继承threading.Thread类,还有一种是直接使用,至于线程池什么的,我还没了解过,本篇文章是直接使用threading.Thread。
一般来说:
不管是多线程,多进程还是分布式爬虫,核心的东西就是任务分配和任务同步。
这里以获取图片的URL并保存到文件中为例:(保存图片的可看代码注释)
定义队列:(每个队列存放的对象都是是一个list,用于分类别,方括号里的就是参数)
定义流程:(函数)
这里有一些参考资料:
import os
import sys
import time
import queue
import requests
import threading
from lxml import etree
class Reptile:
"爬虫类:获取图片的URL"
def __init__(self):
super().__init__()
self.base_url = [["明星壁纸", "http://www.win4000.com/wallpaper_205_0_10_1.html"],
["美食壁纸", "http://www.win4000.com/wallpaper_2361_0_10_1.html"],
["卡通动漫", "http://www.win4000.com/wallpaper_192_0_10_1.html"],
["游戏壁纸", "http://www.win4000.com/wallpaper_191_0_10_1.html"]]
self.headers = { # 自定义请求头
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'}
self.image_url_dir = os.path.join(sys.path[0], "image_url") # 图片链接保存的目录
self.image_dir = os.path.join(sys.path[0], "image") # 保存图片的目录
self.q_lock = threading.Lock()
self.start_url_q = queue.Queue() # 开始页面发送请求队列:放分类网址的 start_url(开始就定义好的队列) ["主题名称", "开始网址"]
self.start_content_q = queue.Queue() # 解析队列:放返回的内容 start_content ["主题名称", "开始网页内容"]
self.next_image_url_q = queue.Queue() # 套图跳转URL队列:放套图链接网址 next_image_url ["主题名称", "套图名称", "跳转网址"]
self.next_image_content_q = queue.Queue() # 解析队列:放返回内容 next_image_content ["主题名称", "套图名称", "跳转网址内容"]
self.image_info_q = queue.Queue() # 图片链接地址队列:放图片具体的信息 image_info ["主题名称", "套图名称", "图片链接"]
def init_base_url(self): # 初始化爬取网址
for base in self.base_url:
self.start_url_q.put(base)
os.makedirs(os.path.join(self.image_url_dir, base[0]))
# os.makedirs(os.path.join(self.image_dir, base[0]))
def send_start_url(self):
while True: # 每一个线程都会不断的请求内容
start_url = self.start_url_q.get() # ["主题名称", "开始网址"]
resp = requests.get(url=start_url[1], headers=self.headers)
self.start_content_q.put([start_url[0], resp.text])
self.start_url_q.task_done()
def get_start_content(self): # 解析开始网页
while True:
start_content = self.start_content_q.get() # ["主题名称", "开始网页内容"]
content = etree.HTML(start_content[1])
next_urls = content.xpath('./body/div[4]/div/div[3]/div/div/div/div/div/ul/li/a/@href') # 跳转地址
for url in next_urls:
self.next_image_url_q.put([start_content[0], url])
self.start_content_q.task_done()
def send_next_image_url(self): # 发送套图网页请求
while True:
next_image_url = self.next_image_url_q.get() # ["主题名称", "跳转网址"]
resp = requests.get(url=next_image_url[1], headers=self.headers)
self.next_image_content_q.put([next_image_url[0], resp.text])
self.next_image_url_q.task_done()
def get_next_image_content(self): # 解析套图网页
while True:
# ["主题名称", "跳转网址内容"]
next_image_content = self.next_image_content_q.get()
content = etree.HTML(next_image_content[1])
title = content.xpath('./body/div[4]/div/div[2]/div/div[2]/div/div[@class="pic-meinv"]/a/img/@title')[0] # 套图名称
next_url = content.xpath('./body/div[4]/div/div[2]/div/div[2]/div/div[@class="pic-meinv"]/a/@href')[0] # 下一张图片所在网址
image_url = content.xpath('./body/div[4]/div/div[2]/div/div[2]/div/div[@class="pic-meinv"]/a/img/@src')[0] # 图片资源
if next_url[-7] == '_':
self.next_image_url_q.put([next_image_content[0], next_url])
self.image_info_q.put([next_image_content[0], title, image_url])
self.next_image_content_q.task_done()
def save_image_info(self): # 保存图片信息
while True:
image_info = self.image_info_q.get() # ["主题名称", "套图名称", "图片地址"]
# 保存图片链接
self.q_lock.acquire()
file_name = os.path.join(
self.image_url_dir, image_info[0], image_info[1] + ".txt")
with open(file_name, "a", encoding="utf-8") as output:
output.write(
image_info[0] + "," + image_info[1] + "," + image_info[2] + "\n")
self.q_lock.release()
self.image_info_q.task_done()
def run(self):
thread_list = []
# 使用线程初始化URL
thread_list.append(threading.Thread(target=self.init_base_url))
# 使用线程请求开始页面
thread_list.append(threading.Thread(target=self.send_start_url))
# 使用3个线程解析开始网页内容
for i in range(3):
thread_list.append(threading.Thread(target=self.get_start_content))
# 使用10个线程发送跳转请求
for i in range(10):
thread_list.append(threading.Thread(
target=self.send_next_image_url))
# 使用10个线程解析跳转网页内容
for i in range(10):
thread_list.append(threading.Thread(
target=self.get_next_image_content))
# 使用10个线程保存文件
for i in range(10):
thread_list.append(threading.Thread(target=self.save_image_info))
# 开启线程
for th in thread_list:
th.setDaemon(True) # 主进程结束,线程会立马被结束
th.start()
# 线程同步,等待所有工作做完
for q in [self.start_url_q, self.start_content_q, self.next_image_url_q, self.next_image_content_q, self.image_info_q]:
q.join() # 队列为空再执行其他操作
class DownloadImage():
def __init__(self):
super().__init__()
self.image_url_dir = os.path.join(sys.path[0], "image_url") # 图片链接保存的目录
self.image_dir = os.path.join(sys.path[0], "image") # 保存图片的目录
self.image_num = 0
self.image_current_num = 0
self.q_lock = threading.Lock()
self.address_q = queue.Queue() # 地址队列
self.image_url_q = queue.Queue() # 图片链接队列
self.image_content_q = queue.Queue() # 图片内容队列
self.headers = { # 自定义请求头
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'}
def init_address(self): # 将所有地址保存到队列中去
dires = os.listdir(self.image_url_dir)
for dire in dires:
files = os.listdir(os.path.join(self.image_url_dir, dire))
# 得到所有文件
for file in files:
address = os.path.join(self.image_url_dir, dire, file)
self.address_q.put(address)
# 建立保持图片的目录
image_dire = os.path.join(self.image_dir, dire, file)[:-4]
if not os.path.exists(image_dire):
os.makedirs(image_dire)
def get_image_url(self): # 获取图片的URL
while True:
address = self.address_q.get()
self.q_lock.acquire()
with open(address, "r", encoding="utf-8") as input:
rows = input.readlines()
for row in rows:
self.image_url_q.put(row[:-1])
self.image_num += 1
self.q_lock.release()
self.address_q.task_done()
def send_image_url(self): # 请求图片内容
while True:
row = self.image_url_q.get().split(",") # ['美食壁纸', '唯美甜食冰淇淋图片桌面壁纸', 'http://pic1.win4000.com/wallpaper/2020-02-19/5e4cee929b3d6.jpg']
# resp = requests.get(url=row[2], headers=self.headers, stream=True)
resp = requests.get(url=row[2], headers=self.headers)
self.image_content_q.put([row[0], row[1], row[2][-16:-5], resp])
self.image_url_q.task_done()
def get_image_content(self): # 保存图片内容
while True:
content = self.image_content_q.get()
self.q_lock.acquire()
image_name = os.path.join(self.image_dir, content[0], content[1], content[2] + ".jpg")
with open(image_name, "wb+") as output:
output.write(content[3].content)
self.image_current_num += 1
print(f"共计{self.image_num},正在下载第{self.image_current_num}张,完成度:{(self.image_current_num / self.image_num) * 100}%")
self.q_lock.release()
self.image_content_q.task_done()
def run(self):
thread_list = []
self.init_address()
# 获取图片的URL
for i in range(3):
thread_list.append(threading.Thread(target=self.get_image_url))
# 请求图片内容
for i in range(10):
thread_list.append(threading.Thread(target=self.send_image_url))
# 保存图片内容
for i in range(10):
thread_list.append(threading.Thread(target=self.get_image_content))
# 启动线程
for t in thread_list:
t.setDaemon(True)
t.start()
# 线程同步
for q in [self.address_q, self.image_url_q, self.image_content_q]:
q.join()
if __name__ == '__main__':
t = time.time()
reptile = Reptile()
reptile.run() # 获取图片链接
print(f"get_url used time:{time.time() - t}")
# get_url used time:7.833039045333862
t = time.time()
downloadImage = DownloadImage()
downloadImage.run() # 下载图片
print(f"get_url used time:{time.time() - t}")