安装命令:
pip install selenium -i https://pypi.tuna.tsinghua.edu.cn/simple
下载地址:https://chromedriver.storage.googleapis.com/index.html
选择对应的版本下载:
windows系统下载win32完成后解压缩,将exe文件拷贝到C:\Windows\System32目录下即可。
from selenium import webdriver
from bs4 import BeautifulSoup
import requests
from time import sleep
import os
driver=webdriver.Chrome()
#driver.implicitly_wait(30)#设置隐式等待
#定义百度图片搜索关键词
# all=open('list.txt',encoding='UTF-8').readlines()
all=['车']
for a in all:
a=a.strip()
os.mkdir(a)
url='https://image.baidu.com/search/index?tn=baiduimage&ps=1&ct=201326592&lm=-1&cl=2&nc=1&ie=utf-8&word={}'.format(a)
sleep(2) # 睡眠 2 秒,否则有可能页面未加载完成
driver.get(url) #打开网页
for i in range(10): #值越大,获取的页面越多,可根据自己的需求设置
#控制网页向下滚动1000像素值
driver.execute_script("window.scrollBy(0,1000)")
sleep(1)
driver.enconding='UTF-8'
soup=BeautifulSoup(driver.page_source,'html.parser')
#使用find层层迭代来找到图片的网址信息
body=soup.find('div',attrs={'id':'wrapper'})
body=body.find('div',attrs={'id':'imgContainer'})
body=body.find('div',attrs={'id':'imgid'})
proxies={
#爬取图片多很大概率IP地址会被封掉,自己编写几个ip地址
"http":"http:/198.118.26.66",
"http":"http:/196.128.27.67",
"http":"http:/195.138.28.68",
"http":"http:/194.148.29.69",
}
i=0
count=0
for txt in body.find_all('div',attrs={'class':'imgpage'}):
txt=txt.find('ul',attrs={'class':'imglist clearfix pageNum'+str(i)})
i+=1
for img in txt.find_all('li',class_='imgitem'):
if "rsItem imgitem" not in str(img):
img=img.find('img')
img=img.attrs['data-imgurl']
image=requests.get(img,proxies)
print(img)
string=a+'\\'+ str(count) + '.jpg'
fp = open(string,'wb')
fp.write(image.content)
fp.close()
count+=1
print('共爬取',i,'页',count,'张图片')