hello,大家好
本章可是一个重中之重了,因为我们今天是要爬取一个图片而不是一个网页或是一个json
所以我们也就不用用到selenium了,当然有兴趣的同学也一样可以使用selenium去爬取。
为了方便我们就用request请求就够了,因为够快。。。
上章的课程传送门:
python网络爬虫之入门[一]
python网络爬虫之自动化测试工具selenium[二]
理一下本章思路:
1、学习并掌握好正则表达式
2、使用正则表达式去分析网站来获取特定信息
如果是学过正则表达式的却不知道这个工具的话,那绝对是一个很大的遗憾。
可能还有一些比较好用的工具把,但这个就已经够用了,
下载路径:
链接:https://pan.baidu.com/s/1g8Zn-CKopsnCjA_o9jS0TQ
提取码:iq9s
本着大家好,我好的思想理念,所以我觉得给大家起一个匹配案例就行了,其他的正则自己可以慢慢看代码理解
放图:
使用regexbuddy做检测:
失败案例
"""
正则表达式
"""
import re
str = "[email protected]"
# 匹配所有邮箱
# f = re.match("^\w{4,20}@\w+\.com$",str)
# 匹配qq或163邮箱
# f = re.match("\w{4,20}@(qq|163)\.com",str)
# 匹配python \num方法
# str = "python"
# f = re.match("<([A-Za-z]+)><([A-Za-z]+)>.*\\2>\\1>",str)
# 匹配python 起别名方法
str = "python"
f = re.match("<(?P[A-Za-z]+)><(?P[A-Za-z]+)>.*(?P=name2)>(?P=name1)>" ,str)
# 2、match和search的区别
# str = "你好,123,现在在线人数为9999"
# f = re.match("\\d*",str)
# f = re.search(",\\d*",str)
# f = re.findall("\\d*",str)
# 3、贪婪与非贪婪 :加个?就行
# f = re.findall('src=".*"',str)
# f = re.findall('src=".*?"',str)
# 4、免写转义\\ : 加个r
# f = re.match(r"<([A-Za-z]+)><([A-Za-z]+)>.*\2>\1>",str)
if f:
print("匹配成功")
print(f.group())
# for i in f:
# print(i)
else:
print("匹配失败")
我们此次爬取的对象为http://pic.netbian.com/4kdongman/
获取指定的图片查看:
但是呢,一个展示图片根本不能符合我们的要求,
所以我们点击进去看看:
F12检索网页代码;
接下来就是去分析一个网页的结构,确定好用什么正则表达式才能准确的拿到a标签的href,或者img标签的src
这个非常重要,重要,重要!!
那么接下来的那个点击进去之后的路经我们照样可以通过这个方法访问,解析
下面自己测试
import requests
import re
from fake_useragent import UserAgent
ua = UserAgent(verify_ssl=False)
headers = {
"Cookie": "__cfduid=d475437d729908631eff1e1d69f0314c81574259376; zkhanecookieclassrecord=%2C66%2C; Hm_lvt_526caf4e20c21f06a4e9209712d6a20e=1574259380,1574691901,1574734052; security_session_verify=ebb4b36dc44da23d2cdd02fa4650ae15; Hm_lpvt_526caf4e20c21f06a4e9209712d6a20e=1574735387"
,
"User-Agent": ua.random
}
rep = requests.get("http://pic.netbian.com/4kdongman/index.html", headers=headers, verify=False)
rep.encoding="gbk"
# result = """.*?"""
# 拿一个正则表达式去匹配
# contents = re.findall(result, rep.text)
print(rep.text)
# for content in contents:
# print(content)
"""
爬取4k动漫图片
"""
import requests
import time
import os
import re
from fake_useragent import UserAgent
ua = UserAgent(verify_ssl=False)
headers = {
"Cookie": "__cfduid=d475437d729908631eff1e1d69f0314c81574259376; zkhanecookieclassrecord=%2C66%2C; Hm_lvt_526caf4e20c21f06a4e9209712d6a20e=1574259380,1574691901,1574734052; security_session_verify=645e98edf446fb2efa862d275906b0ba; Hm_lpvt_526caf4e20c21f06a4e9209712d6a20e=1574782670"
,
"User-Agent": ua.random
}
# 获取当前目录
root = os.getcwd()
# range此参数可以自己更改,第几页到第几页
for page in range(0, 125):
# 进入当前目录
os.chdir(root)
# 创建文件夹
os.mkdir(f"4k动漫的第{page+1}页")
# 改变当前文件目录
os.chdir(f"4k动漫的第{page+1}页")
if page+1 == 1:
url = f"http://pic.netbian.com/4kdongman/index.html"
else:
url = f"http://pic.netbian.com/4kdongman/index_{page + 1}.html"
response = requests.get(url,headers=headers,verify=False)
response.encoding="gbk"
if response.status_code == 200 :
result= """.*?"""
# 拿一个正则表达式去匹配
contents = re.findall(result,response.text)
# 去遍历所有的图片
for content in contents:
path = content
print(f"{path}正在进入html......")
response2 = requests.get("http://pic.netbian.com"+path, headers=headers,verify=False)
response2.encoding = "gbk"
time.sleep(1)
result2 = """"""
contents2 = re.findall(result2, response2.text)
for content2 in contents2:
path2 = content2[0]
name = content2[1]
response3 = requests.get("http://pic.netbian.com"+path2, headers=headers,verify=False)
# 保存到本地
with open(f"{name}.jpg","wb") as f:
f.write(response3.content)
print(f"{name} : {path2} 保存成功,等待1秒后继续爬取")
time.sleep(1)
print(f"第{page + 1}页抓取成功,,等待2秒后继续爬取")
time.sleep(2)
这个案例来自于:https://blog.csdn.net/qq_33958297/article/details/89388556
爬取的网站:https://www.mzitu.com/
# -*- coding: utf-8 -*-
import requests
import os
from lxml import etree
from threading import *
from time import sleep
nMaxThread = 3 #这里设置需要开启几条线程
ThreadLock = BoundedSemaphore(nMaxThread)
gHeads = {
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
}
class Meizitu(Thread):
def __init__(self,url,title):
Thread.__init__(self)
self.url = url #这里的url在后面的referer中需要使用
self.title = title
def run(self):
try:
PhotoUrl,Page = self.GetPhotoUrlAndPageNum()
if PhotoUrl and Page > 0:
self.SavePhoto(PhotoUrl,Page)
finally:
ThreadLock.release()
def GetPhotoUrlAndPageNum(self):
html = requests.get(self.url,headers=gHeads)
if html.status_code == 200:
xmlContent = etree.HTML(html.text)
PhotoUrl = xmlContent.xpath("//div[@class='main-image']/p/a/img/@src")[0][:-6] #01.jpg 正好是-6
PageNum = xmlContent.xpath("//div[@class='pagenavi']/a[5]/span/text()")[0]
return PhotoUrl,int(PageNum)
else:
return None,None
def SavePhoto(self,url,page):
savePath = "./photo/%s" % self.title
if not os.path.exists(savePath):
os.makedirs(savePath)
for i in range(page):
heads = {
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
"Referer": "%s/%d" %(self.url,i+1),
"Accept": "image/webp,image/apng,image/*,*/*;q=0.8"
}
j = 0
while j<5:
print (u"Download : %s/%d.jpg" % (self.title, i + 1))
html = requests.get("%s%02d.jpg"%(url,i+1),headers=heads)
if html.status_code == 200:
with open(savePath + "/%d.jpg"%(i+1),"wb") as f:
f.write(html.content)
break
elif html.status_code == 404:
j+=1
sleep(0.05)
continue
else:
return None
def main():
while True:
try:
nNum = int(raw_input(u"请输入要下载几页: "))
if nNum>0:
break
except ValueError:
print(u"请输入数字。")
continue
for i in range(nNum):
url = "https://www.mzitu.com/xinggan/page/%d/"%(i+1)
html = requests.get(url,headers=gHeads)
if html.status_code == 200:
xmlContent = etree.HTML(html.content)
hrefList = xmlContent.xpath("//ul[@id='pins']/li/a/@href")
titleList = xmlContent.xpath("//ul[@id='pins']/li/a/img/@alt")
for i in range(len(hrefList)):
ThreadLock.acquire()
t = Meizitu(hrefList[i],titleList[i])
t.start()
if __name__ == '__main__':
main()
如果有正则基础的可以直接看如何爬取,没有的可以学一学。
不过regexbuddy工具都可以玩玩看
如果感觉本章写的还不错的话,不如。。。。。(~ ̄▽ ̄)~ ,(´▽`ʃ♡ƪ)