python解析网址,多线程下载图片
import requests
from bs4 import BeautifulSoup
from downLoadFile import downLoadFile
import threading
'''
1、使用requests获取网页源码
2、使用beautifulSoup进行dom格式化
3、获取里面所有img节点
4、下载图片
'''
class imgFetch():
def __init__(self,url,savePath):
self.url=url
self.savePath=savePath
def fetch(self):
try:
r=requests.get(self.url,timeout=10)
r.raise_for_status() #加入网络异常处理
r.encoding=r.apparent_encoding #设置解码格式与页面编码格式一致(避免出现中文乱码)
soup=BeautifulSoup(r.text,'html.parser')
img_db=soup.find_all('img')
except:
print('网络错误!')
print('===============开始下载图片======================', len(img_db))
self.download(img_db) # 开始下载图片
def download(self,img_db):
for img in img_db:
downLoadFile(self.savePath,img.get('src')).start()
url = 'http://www.17sucai.com/category/2/48'
savePath = 'e:/test/'
imgFetch(url, savePath).fetch()
import requests
import uuid
import threading
'''
下载图片
1、判断图片是否合理
2、解析图片路径
3、下载存储
'''
class downLoadFile(threading.Thread):
def __init__(self,savePath,imgUrl):
super(downLoadFile, self).__init__()
self.savePath = savePath
self.imgUrl=imgUrl
def run(self):
try:
imgUrl = self.__imgUrl__()
if imgUrl:
saveImg = '%s%s%s' % (self.savePath,uuid.uuid1(),imgUrl[-4:])
print( '下载路径:%s,存储路径:%s' % (imgUrl,saveImg) )
r = requests.get(imgUrl, timeout=20)
r.raise_for_status()
with open(saveImg, "wb") as code:
code.write(r.content)
except:
print( '文件路径有问题,网络:%s,存储:%s' % (imgUrl,saveImg))
finally:
return True
def __imgUrl__(self):
imgType=self.imgUrl[-4:]
types=['.jpg','.png','.gif']
if imgType in types:
if self.imgUrl[0:7]=='http://':
return self.imgUrl
elif self.imgUrl[0:8]=='https://':
return self.imgUrl
elif self.imgUrl[0:1]=='/':
return 'http:'+self.imgUrl
else:
return None