其实原理都是一样的,只是每个网站的结构不一样,因此只是稍加修改了一下
当然如果能用一个程序去跑可能更好,当时急着用,用完了也没后续优化,就先分享出来了。
百度
import re
import requests
from urllib import error
from bs4 import BeautifulSoup
import os
num = 0
numPicture = 0
file = ''
List = []
def Find(url):
global List
#print('正在检测图片总数,请稍等.....')
t = 0
i = 1
s = 0
while t < 2000:
Url = url + str(t)
try:
Result = requests.get(Url, timeout=7)
except BaseException:
t = t + 60
continue
else:
result = Result.text
pic_url = re.findall('"objURL":"(.*?)",', result, re.S) # 先利用正则表达式找到图片url
s += len(pic_url)
if len(pic_url) == 0:
break
else:
List.append(pic_url)
t = t + 60
return s
def recommend(url):
Re = []
try:
html = requests.get(url)
except error.HTTPError as e:
return
else:
html.encoding = 'utf-8'
bsObj = BeautifulSoup(html.text, 'html.parser')
div = bsObj.find('div', id='topRS')
if div is not None:
listA = div.findAll('a')
for i in listA:
if i is not None:
Re.append(i.get_text())
return Re
def dowmloadPicture(html, keyword):
global num
# t =0
pic_url = re.findall('"objURL":"(.*?)",', html, re.S) # 先利用正则表达式找到图片url
# print('找到关键词:' + keyword + '的图片,即将开始下载图片...')
for each in pic_url:
#print('正在下载第' + str(num + 1) + '张图片,图片地址:' + str(each))
try:
if each is not None:
pic = requests.get(each, timeout=7)
else:
continue
except BaseException:
#print('错误,当前图片无法下载')
continue
else:
string = file + r'\\' + str(num) + '.jpg'
fp = open(string, 'wb')
fp.write(pic.content)
fp.close()
num += 1
if num >= numPicture:
return
if __name__ == '__main__': # 主函数入口
word = input("请输入搜索关键词(可以是人名,地名等): ")
# add = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=%E5%BC%A0%E5%A4%A9%E7%88%B1&pn=120'
url = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=' + word + '&pn='
tot = Find(url)
Recommend = recommend(url) # 记录相关推荐
print('经过检测%s类图片共有%d张' % (word, tot))
numPicture = int(input('请输入想要下载的图片数量 '))
file = input('请建立一个存储图片的文件夹,输入文件夹名称即可')
y = os.path.exists(file)
if y == 1:
print('该文件已存在,请重新输入')
file = input('请建立一个存储图片的文件夹,)输入文件夹名称即可')
os.mkdir(file)
else:
os.mkdir(file)
t = 0
tmp = url
while t < numPicture:
try:
url = tmp + str(t)
result = requests.get(url, timeout=10)
print(url)
except error.HTTPError as e:
print('网络错误,请调整网络后重试')
t = t + 60
else:
dowmloadPicture(result.text, word)
t = t + 60
print('当前搜索结束,感谢使用')
#print('猜你喜欢')
for re in Recommend:
print(re, end=' ')
必应
import requests
import urllib
import os, re
import itertools
from PIL import Image
import io
import logging
import html
sign_table = { #解码 : . / 这三个符号
'_z2C$q': ':',
'_z&e3B': '.',
'AzdH3F': '/'
}
char_table = { #解码其他字符
'w': 'a',
'k': 'b',
'v': 'c',
'1': 'd',
'j': 'e',
'u': 'f',
'2': 'g',
'i': 'h',
't': 'i',
'3': 'j',
'h': 'k',
's': 'l',
'4': 'm',
'g': 'n',
'5': 'o',
'r': 'p',
'q': 'q',
'6': 'r',
'f': 's',
'p': 't',
'7': 'u',
'e': 'v',
'o': 'w',
'8': '1',
'd': '2',
'n': '3',
'9': '4',
'c': '5',
'm': '6',
'0': '7',
'b': '8',
'l': '9',
'a': '0'
}
# python3字符串的translate方法需要用单个字符的十进制unicode编码作为key
# value 中的数字会被当成十进制unicode编码转换成字符
# 也可以直接用字符串作为value
char_table = {ord(key): ord(value) for key, value in char_table.items()}
# 解码图片URL
def decode(url):
# 先替换字符串
for key, value in sign_table.items():
url = url.replace(key, value)
# 再替换剩下的字符
return url.translate(char_table)
# 生成网址列表
def buildUrls(word):
word = urllib.parse.quote(word)
url = r"https://cn.bing.com/images/async?q={word}&first={pn}&count=35&relp=35&scenario=ImageBasicHover&datsrc=N_I&layout=RowBased&mmasync=1"
#url = r"https://pic.sogou.com/d?query={word}&mode=1&start={pn}"
test = itertools.count(start=0, step=35)
urls = (url.format(word=word, pn=x) for x in test)
return urls
# 解析JSON获取图片URL
def resolveImgUrl(html):
imgUrls = re.findall(r'"murl":"http://(.*?)"', html)
return imgUrls
# 下载图片
def downImg(imgUrl, dirpath, imgName):
filename = os.path.join(dirpath, imgName)
try:
fullUrl = 'http://'+imgUrl
res = requests.get(fullUrl, timeout=15)
if str(res.status_code)[0] == "4":
print(str(res.status_code), ":" , imgUrl)
return False
except Exception as e:
logging.debug("抛出异常:", imgUrl)
logging.debug(e)
return False
with open(filename, "wb") as f:
f.write(res.content)
return True
if __name__ == '__main__':
print("欢迎使用bing图片下载脚本!\n目前仅支持单个关键词。")
print("下载结果保存在脚本目录下的picture文件夹中。")
print("=" * 50)
word = 'street people'
dirpath = os.getcwd() + '\\pictures'
if not os.path.isdir(dirpath):
os.mkdir(dirpath)
urls = buildUrls(word)
index = 0
for url in urls:
print("正在请求:", url)
mhtml = requests.get(url, timeout=10)
imgUrls = resolveImgUrl(html.unescape(mhtml.text))
if len(imgUrls) == 0: # 没有图片则结束
break
for url in imgUrls:
if downImg(url, dirpath, str(index) + ".jpg"):
index += 1
print("已下载 %s 张" % index)
if index>12000:
exit(0)