1)通用爬虫:
实例:百度、360、google、sougou等搜索引擎
功能:访问网页‐>抓取数据‐>数据存储‐>数据处理‐>提供检索服务
robots协议:一个约定俗成的协议,添加robots.txt文件,来说明本网站哪些内容不可以被抓取,起不到限制作用,自己写的爬虫无需遵守。
网站排名(SEO):
缺点:
2)聚焦爬虫
功能:根据需求,实现爬虫程序,抓取需要的数据
设计思路:
1.确定要爬取的url。
2.模拟浏览器通过http协议访问url,获取服务器返回的html代码。
3.解析html字符串(根据一定规则提取需要的数据)。
1)User‐Agent
User Agent:中文名为用户代理,简称 UA,它是一个特殊字符串头,使得服务器能够识别客户使用的操作系统及版本、CPU 类型、浏览器及版本、浏览器渲染引擎、浏览器语言、浏览器插件等。
2)代理IP
西刺代理、快代理
什么是高匿名、匿名和透明代理?它们有什么区别?
3)验证码访问
打码平台、云打码平台
4)动态加载网页
网站返回的是js数据,并不是网页的真实数据
5)数据加密
分析js代码
urllib.request.urlopen()
模拟浏览器向服务器发送请求
response
服务器返回的数据
response
的数据类型是 HttpResponse
方法 | 作用 |
---|---|
read() |
字节形式读取二进制 扩展:rede(5)返回前几个字节 |
readline() |
读取一行 |
readlines() |
一行一行读取,直至结束 |
getcode() |
获取状态码 |
geturl() |
获取url |
getheaders() |
获取headers |
urllib.request.urlretrieve()
请求网页、请求图片、请求视频
UA介绍:User Agent中文名为用户代理,简称 UA,它是一个特殊字符串头,使得服务器能够识别客户使用的操作系统及版本、CPU 类型、浏览器及版本。浏览器内核、浏览器渲染引擎、浏览器语言、浏览器插件等。
语法:request = urllib.request.Request()
1)get请求方式:urllib.parse.quote()
import urllib.request
import urllib.parse
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/120.0.0.0 Safari/537.36'
}
url = 'https://www.baidu.com/s?wd='
keyword = urllib.parse.quote('周杰伦')
url = url + keyword
print(url)
request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
print(content)
2)get请求方式:urllib.parse.urlencode()
import urllib.request
import urllib.parse
url = 'https://www.baidu.com/s?'
data = {
'wd': '周杰伦',
'age': '42',
'location': '中国台湾'
}
url = url + urllib.parse.urlencode(data)
# print(url)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/120.0.0.0 Safari/537.36'
}
request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
print(content)
3)post请求方式
import urllib.request
import urllib.parse
import json
url = 'https://fanyi.baidu.com/sug'
data = {
'kw': 'spider'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/120.0.0.0 Safari/537.36'
}
# post的请求参数必须要进行编码
data = urllib.parse.urlencode(data).encode('utf-8')
request = urllib.request.Request(url=url, data=data, headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
content = json.loads(content)
print(content['data'][0]['v'])
4)post和get区别
import urllib.parse
import urllib.request
def creat_request(page_arg):
url = 'https://movie.douban.com/j/chart/top_list?type=13&interval_id=100%3A90&action=&'
data = {
'start': (page_arg - 1) * 20,
'limit': 20
}
url = url + urllib.parse.urlencode(data)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/120.0.0.0 Safari/537.36'
}
return urllib.request.Request(url=url, headers=headers)
def get_content(request_arg):
response = urllib.request.urlopen(request_arg)
return response.read().decode('utf-8')
def download(page_arg, content_arg):
with open('douban_' + str(page_arg) + '.json', 'w', encoding='utf-8') as fp:
fp.write(content_arg)
if __name__ == '__main__':
start_page = int(input('请输入起始页码:'))
end_page = int(input('请输入结束页码:'))
for page in range(start_page, end_page + 1):
request = creat_request(page)
content = get_content(request)
download(page, content)
import urllib.request
import urllib.parse
def create_request(arg_page):
url = 'https://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=cname'
data = {
'cname': '广州',
'pid': '',
'pageIndex': str(arg_page),
'pageSize': '10'
}
data = urllib.parse.urlencode(data).encode('utf-8')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/120.0.0.0 Safari/537.36'
}
request = urllib.request.Request(url=url, data=data, headers=headers)
return request
def get_content(arg_request):
response = urllib.request.urlopen(arg_request)
content = response.read().decode('utf-8')
return content
def download(arg_page, arg_content):
with open('kfc_' + str(arg_page) + '.json', 'w', encoding='utf-8') as fp:
fp.write(arg_content)
if __name__ == '__main__':
for page in range(1, 4):
created_request = create_request(page)
geted_content = get_content(created_request)
download(page, geted_content)
urllib.error.HTTPError
、urllib.error.URLError
。URLError
\ HTTPError
。import urllib.request
import urllib.error
url = 'https://blog.csdn.net/m0_52503067/article/details/1308872711'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/120.0.0.0 Safari/537.36'
}
try:
request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
print(content)
except urllib.error.HTTPError:
print('系统正在维护中……')
import urllib.request
import urllib.error
url = 'https://www.yixinchd.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/120.0.0.0 Safari/537.36'
}
try:
request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
print(content)
except urllib.error.URLError:
print('系统正在升级中……')
import urllib.request
# 适用的场景:进行数据采集的时候,需要绕过登录,然后进入某个页面
url = 'https://weibo.cn/6348128187/info'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/120.0.0.0 Safari/537.36',
'Cookie': 'SUB=_2A25IkTqmDeRhGeBN71oQ8ibNwzuIHXVr7zJurDV6PUJbktANLVPfkW1NRH7w-XCjbRc1bX9unbhvkaNW9jZBFLqP; SUBP=00'
'33WrSXqPxfM725Ws9jqgMF55529P9D9W5UG-o5opBbliwJzv9ep.M35JpX5KzhUgL.Foq0Shnpeonp1hM2dJLoIEBLxKBLB.zLBK-Lx'
'K-LB.qL1heLxK-L1K5L12eLxK-LB-BLBKqt; SSOLoginState=1704282870; _T_WM=88327e172a5590f58515f50a61044399; '
'MLOGIN=1'
}
request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
with open('weibo.html', 'w', encoding='utf-8') as fp:
fp.write(content)
urllib.request.urlopen(url)
不能定制请求头。urllib.request.Request(url,headers,data)
可以定制请求头。Handler
定制更高级的请求头。随着业务逻辑的复杂,请求对象的定制已经满足不了我们的需求,动态cookie和代理不能使用请求对象的定制。# 需求:使用handler访问百度,获取网页源码
import urllib.request
url = 'https://www.baidu.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/120.0.0.0 Safari/537.36'
}
request = urllib.request.Request(url=url, headers=headers)
# 1.获取handler对象
handler = urllib.request.HTTPHandler()
# 2.获取opener对象
opener = urllib.request.build_opener(handler)
# 3.调用open方法
response = opener.open(request)
content = response.read().decode('utf-8')
with open('handler.html', 'w', encoding='utf-8') as fp:
fp.write(content)
1)代理的常用功能
2)代码配置代理
import urllib.request
import urllib.parse
url = 'https://www.baidu.com/s?wd=ip'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/120.0.0.0 Safari/537.36'
}
request = urllib.request.Request(url=url, headers=headers)
# response = urllib.request.urlopen(request)
proxies = {
'http': '121.41.8.23:16817'
}
handler = urllib.request.ProxyHandler(proxies=proxies)
opener = urllib.request.build_opener(handler)
response = opener.open(request)
content = response.read().decode('utf-8')
with open('dali.html', 'w', encoding='utf-8') as fp:
fp.write(content)
3)代理池
import urllib.request
import random
url = 'https://www.baidu.com/s?wd=ip'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/120.0.0.0 Safari/537.36'
}
proxies_poll = [
{'http': '121.41.8.23:16817'},
{'http': '121.41.8.23:16817'},
]
proxies = random.choice(proxies_poll)
# print(proxies)
request = urllib.request.Request(url=url, headers=headers)
handler = urllib.request.ProxyHandler(proxies=proxies)
opener = urllib.request.build_opener(handler)
response = opener.open(request)
content = response.read().decode('utf-8')
with open('daili.html', 'w', encoding='utf-8') as fp:
fp.write(content)
xpath基本语法
获取百度首页的百度一下
import urllib.request
from lxml import etree
url = 'https://www.baidu.com/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/120.0.0.0 Safari/537.36'
}
request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('utf-8')
tree = etree.HTML(content)
result = tree.xpath('//span/input[@id="su"]/@value')[0]
print(result)
获取站长素材的一些图片