声明:爬虫为学习使用,请各位同学务必不要对当放网站或i服务器造成伤害。务必不要写死循环。
-
思路:古镇——古镇列表(循环获取古镇详情href)——xx古镇详情(获取所有img的src)
-
需要安装requests:
pip install requests
# 低版本Python需要去除SSL验证:
import ssl
# 获取网页
ssl._create_default_https_context = ssl._create_unverified_context # 去除低版本Python验证SSL证书行为
response = urllib.request.urlopen(url)
html = response.read().decode('utf-8') # 编码格式gb2312,utf-8,GBK
html_string = str(html) # 转换成string,可以直接向数据库添加
-
1. 单分类爬:
from bs4 import BeautifulSoup
import urllib.request
import requests
import os
import re
# 保存文章里面的图片
def down(url, num):
# 获取网页
response = urllib.request.urlopen(url)
html = response.read().decode('utf-8') # 编码格式gb2312,utf-8,GBK
html_string = str(html) # 转换成string,可以直接向数据库添加
soup = BeautifulSoup(html_string, "html.parser") # 解析网页标签
# 匹配抓取区域
# 该div的class由img和aImg两种,要判断一下
pid = soup.findAll('div', {"class": "img"})
if len(pid) == 0:
pid = soup.findAll('div', {"class": "aImg"})
pass
print(pid)
pid2 = soup.find("article").find("h1") # 本篇文章的标题
# 清除html标签
pattern = re.compile(r'<[^>]+>', re.S)
txt = pattern.sub('', str(pid2))
print(txt)
for img_html in pid:
img_src = img_html.find('img')['src']
root = "D:/python/do/spider/guojiadili/" + txt + "/" # 没有最后一级文件夹目录则会自动创建
img_name = img_src.split("/")[-1].replace('@!rw9', '').replace('@!rw14', '').replace('@!rw7', '').replace('@!rw8', '').replace('@!rw10', '').replace('@!rw11', '').replace('@!rw12', '').replace('@!rw13', '').replace('@!rw6', '').replace('@!rw5', '').replace('@!rw4', '').replace('@!rw3', '').replace('@!rw2', '').replace('@!rw1', '').replace('@!rw15', '').replace('@!rw16', '').replace('@!rw17', '').replace('@!rw18', '').replace('@!rw19', '') # 去除图片后缀后面的特殊字符串,得到真实图片名
print(img_name)
path = root + img_name # 保存文件的名字
# 保存图片到本地
try:
if not os.path.exists(root):
os.mkdir(root)
if not os.path.exists(path):
r = requests.get(img_src)
r.raise_for_status()
# 使用with语句可以不用自己手动关闭已经打开的文件流
with open(path, "wb") as f: # 开始写文件,wb代表写二进制文件
f.write(r.content)
num += 1
print("保存文件成功=" + str(num))
else:
print("文件已存在")
except Exception as e:
print("文件保存失败:" + str(e))
pass
pass
# down("http://www.dili360.com//article/p549a356731fc659.htm", 0)
# 解析文章目录中所有的文章地址
def list(url, number):
# 获取网页
response = urllib.request.urlopen(url)
html = response.read().decode('utf-8') # 编码格式gb2312,utf-8,GBK
html_string = str(html) # 转换成string,可以直接向数据库添加
soup = BeautifulSoup(html_string, "html.parser") # 解析网页标签
# 匹配抓取区域
# pid = soup.find(attrs={"id": "content"})
pid = soup.findAll('div', {"class": "thumb-img"})
print(pid)
print("第" + str(number) + "页")
for a_html in pid:
a_href = a_html.find('a')['href']
print(a_href)
new_url = "http://www.dili360.com" + a_href # 文章地址
print(new_url)
print(type(new_url))
down(new_url, 0) # 获取单个列表中单个文章的图片
pass
pass
# list('http://www.dili360.com/Travel/sight/20194/1.htm')
# 解析有多少个文章目录
page = 1 # 起始目录标号
while page <= 14: # 最大目录标号
list('http://www.dili360.com/Travel/sight/20247/' + str(page) + '.htm', 1) # 单个目录地址
page += 1
pass
else:
print("所有文章保存完毕!")
-
提示,循环sight/xxxxx.htm可以把整个分类全部爬下来。但是不建议你这样学习。爬个上G图片也没什么用处。
-
旅游就去这些古镇吧!
-
2. 以下是爬单进程爬取全站图片(过几天做图片分用):
from bs4 import BeautifulSoup
import urllib.request
import requests
import os
import re
# 保存文章里面的图片
def down(url, num, file):
# 获取网页
response = urllib.request.urlopen(url)
html = response.read().decode('utf-8') # 编码格式gb2312,utf-8,GBK
html_string = str(html) # 转换成string,可以直接向数据库添加
soup = BeautifulSoup(html_string, "html.parser") # 解析网页标签
# 匹配抓取区域
# 该div的class由img和aImg两种,要判断一下
pid = soup.findAll('div', {"class": "img"})
if len(pid) == 0:
pid = soup.findAll('div', {"class": "aImg"})
pass
print(pid)
pid2 = soup.find("article").find("h1") # 本篇文章的标题
# 清除html标签
pattern = re.compile(r'<[^>]+>', re.S)
txt = pattern.sub('', str(pid2))[0:20]
print(txt)
for img_html in pid:
img_src = img_html.find('img')['src']
root_file = "D:/python/do/spider/guojiadili/" + file + "/" # 分类的文件夹
root = root_file + txt + "/" # 没有最后一级文件夹目录则会自动创建
img_name = img_src.split("/")[-1].replace('@!rw9', '').replace('@!rw14', '').replace('@!rw7', '').replace('@!rw8', '').replace('@!rw10', '').replace('@!rw11', '').replace('@!rw12', '').replace('@!rw13', '').replace('@!rw6', '').replace('@!rw5', '').replace('@!rw4', '').replace('@!rw3', '').replace('@!rw2', '').replace('@!rw1', '').replace('@!rw15', '').replace('@!rw16', '').replace('@!rw17', '').replace('@!rw18', '').replace('@!rw19', '') # 去除图片后缀后面的特殊字符串,得到真实图片名
print(img_name)
path = root + img_name # 保存文件的名字
# 保存图片到本地
try:
if not os.path.exists(root_file):
os.mkdir(root_file)
if not os.path.exists(root):
os.mkdir(root)
if not os.path.exists(path):
r = requests.get(img_src)
r.raise_for_status()
# 使用with语句可以不用自己手动关闭已经打开的文件流
with open(path, "wb") as f: # 开始写文件,wb代表写二进制文件
f.write(r.content)
num += 1
print("保存文件成功=" + str(num))
else:
print("文件已存在")
except Exception as e:
print("文件保存失败:" + str(e))
pass
pass
# down("http://www.dili360.com//article/p549a356731fc659.htm", 0)
# 解析文章目录中所有的文章地址
def list(url, number):
print(url)
# 获取网页
response = urllib.request.urlopen(url)
html = response.read().decode('utf-8') # 编码格式gb2312,utf-8,GBK
html_string = str(html) # 转换成string,可以直接向数据库添加
soup = BeautifulSoup(html_string, "html.parser") # 解析网页标签
try:
# 匹配抓取区域
# pid = soup.find(attrs={"id": "content"})
pid = soup.findAll('div', {"class": "thumb-img"})
print(pid)
pid2 = soup.find(attrs={"class": "article-left"}).find("h1") # 分类的标题
print(pid2)
# 清除html标签
pattern = re.compile(r'<[^>]+>', re.S)
file = pattern.sub('', str(pid2))[0:20]
print("第" + str(number) + "页")
for a_html in pid:
a_href = a_html.find('a')['href']
print(a_href)
new_url = "http://www.dili360.com" + a_href # 文章地址
print(new_url)
print(type(new_url))
down(new_url, 0, file) # 获取单个列表中单个文章的图片
pass
except:
print("无该页面")
pass
pass
# list('http://www.dili360.com/Travel/sight/20194/1.htm')
# list('http://www.dili360.com/travel/sight/20281.htm', 0)
# list('http://www.dili360.com/travel/sight/' + str(class_num) + '/' + str(1) + '.htm', 1) # 单个目录地址
def page_class():
for cla in range(20190, 20290):
for page in range(1, 30):
list('http://www.dili360.com/travel/sight/' + str(cla) + '/' + str(page) + '.htm', 1) # 单个目录地址
pass
pass
pass
page_class()
pass
-
3. 以下是多线程爬,有多少个分类就有多少个线程:
from bs4 import BeautifulSoup
import urllib.request
import requests
import os
import re
import time
import _thread
# 保存文章里面的图片
def down(url, num, file):
# 获取网页
response = urllib.request.urlopen(url)
html = response.read().decode('utf-8') # 编码格式gb2312,utf-8,GBK
html_string = str(html) # 转换成string,可以直接向数据库添加
soup = BeautifulSoup(html_string, "html.parser") # 解析网页标签
# 匹配抓取区域
# 该div的class由img和aImg两种,要判断一下
pid = soup.findAll('div', {"class": "img"})
if len(pid) == 0:
pid = soup.findAll('div', {"class": "aImg"})
pass
print(pid)
pid2 = soup.find("article").find("h1") # 本篇文章的标题
# 清除html标签
pattern = re.compile(r'<[^>]+>', re.S)
txt = pattern.sub('', str(pid2))[0:20]
print(txt)
for img_html in pid:
img_src = img_html.find('img')['src']
root_file = "D:/python/do/spider/guojiadili/" + file + "/" # 分类的文件夹
root = root_file + txt + "/" # 没有最后一级文件夹目录则会自动创建
img_name = img_src.split("/")[-1].replace('@!rw9', '').replace('@!rw14', '').replace('@!rw7', '').replace('@!rw8', '').replace('@!rw10', '').replace('@!rw11', '').replace('@!rw12', '').replace('@!rw13', '').replace('@!rw6', '').replace('@!rw5', '').replace('@!rw4', '').replace('@!rw3', '').replace('@!rw2', '').replace('@!rw1', '').replace('@!rw15', '').replace('@!rw16', '').replace('@!rw17', '').replace('@!rw18', '').replace('@!rw19', '') # 去除图片后缀后面的特殊字符串,得到真实图片名
print(img_name)
path = root + img_name # 保存文件的名字
# 保存图片到本地
try:
if not os.path.exists(root_file):
os.mkdir(root_file)
if not os.path.exists(root):
os.mkdir(root)
if not os.path.exists(path):
r = requests.get(img_src)
r.raise_for_status()
# 使用with语句可以不用自己手动关闭已经打开的文件流
with open(path, "wb") as f: # 开始写文件,wb代表写二进制文件
f.write(r.content)
num += 1
print("保存文件成功=" + str(num))
else:
print("文件已存在")
except Exception as e:
print("文件保存失败:" + str(e))
pass
pass
# down("http://www.dili360.com//article/p549a356731fc659.htm", 0)
# 解析文章目录中所有的文章地址
def list(url, number):
print(url)
# 获取网页
response = urllib.request.urlopen(url)
html = response.read().decode('utf-8') # 编码格式gb2312,utf-8,GBK
html_string = str(html) # 转换成string,可以直接向数据库添加
soup = BeautifulSoup(html_string, "html.parser") # 解析网页标签
try:
# 匹配抓取区域
# pid = soup.find(attrs={"id": "content"})
pid = soup.findAll('div', {"class": "thumb-img"})
print(pid)
pid2 = soup.find(attrs={"class": "article-left"}).find("h1") # 分类的标题
print(pid2)
# 清除html标签
pattern = re.compile(r'<[^>]+>', re.S)
file = pattern.sub('', str(pid2))[0:20]
print("第" + str(number) + "页")
for a_html in pid:
a_href = a_html.find('a')['href']
print(a_href)
new_url = "http://www.dili360.com" + a_href # 文章地址
print(new_url)
print(type(new_url))
down(new_url, 0, file) # 获取单个列表中单个文章的图片
pass
except:
print("无该页面")
pass
pass
# 单线程爬
# def page_class():
#
# for cla in range(20190, 20290):
# for page in range(1, 30):
# list('http://www.dili360.com/travel/sight/' + str(cla) + '/' + str(page) + '.htm', 1) # 单个目录地址
# pass
# pass
#
# pass
#
#
# page_class()
# pass
# 多线程爬,有多少个分类就有多少线程
all_thread_num = 0
def page_class(cla, that_num):
# print("已启动线程=" + str(that_num))
global all_thread_num
all_thread_num += 1
print("线程总数=" + str(all_thread_num))
for page in range(1, 30):
list('http://www.dili360.com/travel/sight/' + str(cla) + '/' + str(page) + '.htm', 1) # 单个目录地址
pass
pass
for cla in range(20190, 20291): # 创建线程
try:
_thread.start_new_thread(page_class, (cla, (cla - 20000)))
# page_class(cla, (cla - 20000))
pass
except:
print("无法启动线程")
pass
pass
while 1:
pass
只有17593张图片吗????3342篇有分类的博客游记???怀疑人生!
-