# 抓取 Python 官网
import urllib.request
response = urllib.request.urlopen('https://www.python.org')
print(response.read().decode('utf-8'))
...(文档太长此处省略)
print(type(response))
response.status # 结果的状态码
200
response.getheaders() # 响应头
[('Server', 'nginx'),
('Content-Type', 'text/html; charset=utf-8'),
('X-Frame-Options', 'DENY'),
('Via', '1.1 vegur'),
('Via', '1.1 varnish'),
('Content-Length', '48402'),
('Accept-Ranges', 'bytes'),
('Date', 'Fri, 26 Jul 2019 02:17:41 GMT'),
('Via', '1.1 varnish'),
('Age', '1290'),
('Connection', 'close'),
('X-Served-By', 'cache-iad2132-IAD, cache-hnd18731-HND'),
('X-Cache', 'MISS, HIT'),
('X-Cache-Hits', '0, 1457'),
('X-Timer', 'S1564107461.004033,VS0,VE0'),
('Vary', 'Cookie'),
('Strict-Transport-Security', 'max-age=63072000; includeSubDomains')]
response.getheader('Server') # 响应头中 Server 的值
'nginx'
urlopen 函数的 API:
urlllib.request.urlopen(url, data=None, [timeout,]*, cafile=None, capath=None, cadefault=False, context=None)
import urllib.parse
import urllib.request
data = bytes(urllib.parse.urlencode({'word': 'hello'}), encoding='utf8')
response = urllib.request.urlopen('http://httpbin.org/post', data=data) # data 参数
print(data)
print(response.read().decode('utf-8'))
b'word=hello'
{
"args": {},
"data": "",
"files": {},
"form": {
"word": "hello"
},
"headers": {
"Accept-Encoding": "identity",
"Content-Length": "10",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "httpbin.org",
"User-Agent": "Python-urllib/3.7"
},
"json": null,
"origin": "119.123.40.149, 119.123.40.149",
"url": "https://httpbin.org/post"
}
import socket
import urllib.error
try:
response = urllib.request.urlopen('http://httpbin.org/get', timeout=0.1)
except urllib.error.URLError as e:
if isinstance(e.reason, socket.timeout):
print('TIME OUT')
TIME OUT
Request 对象的构造方法:
class urllib.request.Request(url, data=None, header={}, origin_req_host=None, unverifiable=False, method=None)
# 传入多个参数构建请求
from urllib import request, parse
url = 'http://httpbin.org/post'
headers = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
'Host': 'httpbin.org'
}
dict = {'name': 'Germey'}
data = bytes(parse.urlencode(dict), encoding='utf8')
req = request.Request(url, data=data, headers=headers, method='POST')
response = request.urlopen(req)
print(response.read().decode('utf-8'))
{
"args": {},
"data": "",
"files": {},
"form": {
"name": "Germey"
},
"headers": {
"Accept-Encoding": "identity",
"Content-Length": "11",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "httpbin.org",
"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"
},
"json": null,
"origin": "119.123.40.149, 119.123.40.149",
"url": "https://httpbin.org/post"
}
# 使用 add_header() 方法添加 headers
req = request.Request(url=url, data=data, method='POST')
req.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)')
什么是 Handler
验证
from urllib.request import HTTPBasicAuthHandler, HTTPPasswordMgrWithDefaultRealm, build_opener
from urllib.error import URLError
username = 'username'
password = 'password'
url = 'http://localhost:5000'
p = HTTPPasswordMgrWithDefaultRealm() # 首先实例化 HTTPPasswordMgrWithDefaultRealm 对象
p.add_password(None, url, username, password) # 利用 add_password() 添加用户名和密码
auth_handler = HTTPBasicAuthHandler(p) # 实例化 HTTPBasicAuthHandler 对象,其参数是 HTTPPasswordMgrWithDefaultRealm 对象
opener = build_opener(auth_handler)
try:
result = opener.open(url)
html = result.read().decode('utf-8')
print(html)
except URLError as e:
print(e.reason)
from urllib.error import URLError
from urllib.request import ProxyHandler, build_opener
proxy_handler = ProxyHandler({
'http': 'http://127.0.0.1:9743',
'https': 'https://127.0.0.1:9743'
}) # ProxyHandler 的参数是一个字典,键名是协议类型,键值是代理链接
opener = build_opener(proxy_handler)
try:
response = opener.open('https://www.baidu.com')
print(response.read().decode('utf-8'))
except URLError as e:
print(e.reason)
# 获取网站的 Cookies
import http.cookiejar, urllib.request
cookie = http.cookiejar.CookieJar() # 声明 CookieJar 对象
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = build_opener(handler)
response = opener.open('https://www.baidu.com')
for item in cookie:
print(item.name+'='+item.value)
BIDUPSID=F930DC7B2787707F675C4D0962FBE525
PSTM=1564122900
BD_NOT_HTTPS=1
# 将 Cookies 保存为 Mozilla 型浏览器的 Cookies 格式
filename = 'cookies.txt'
cookie = http.cookiejar.MozillaCookieJar(filename) # 改为 LWPCookieJar 即可保存为 LWP 格式的 Cookies
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = build_opener(handler)
response = opener.open('https://www.baidu.com')
cookie.save(ignore_discard=True, ignore_expires=True)
# 读取 Cookies 文件并利用
cookie = http.cookiejar.MozillaCookieJar(filename)
cookie.load('cookies.txt', ignore_discard=True, ignore_expires=True)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = build_opener(handler)
response = opener.open('https://www.baidu.com')
print(response.read().decode('utf-8'))
from urllib import request, error
try:
response = request.urlopen('https://cuiqingcai.com/index.htm')
except error.URLError as e:
print(e.reason)
Not Found
from urllib import request, error
try:
response = request.urlopen('https://cuiqingcai.com/index.htm')
except error.HTTPError as e:
print(e.reason, e.code, e.headers, sep='\n')
Not Found
404
Server: nginx/1.10.3 (Ubuntu)
Date: Fri, 26 Jul 2019 07:09:04 GMT
Content-Type: text/html; charset=UTF-8
Transfer-Encoding: chunked
Connection: close
Set-Cookie: PHPSESSID=8jneufd9uur912goa2ljidb1a1; path=/
Pragma: no-cache
Vary: Cookie
Expires: Wed, 11 Jan 1984 05:00:00 GMT
Cache-Control: no-cache, must-revalidate, max-age=0
Link: ; rel="https://api.w.org/"
from urllib import request, error
try:
response = request.urlopen('https://cuiqingcai.com/index.htm')
except error.HTTPError as e:
print(e.reason, e.code, e.headers, sep='\n')
except error.URLError as e:
print(e.reason)
else:
print('Request successfully')
Not Found
404
Server: nginx/1.10.3 (Ubuntu)
Date: Fri, 26 Jul 2019 07:11:59 GMT
Content-Type: text/html; charset=UTF-8
Transfer-Encoding: chunked
Connection: close
Set-Cookie: PHPSESSID=v48n158pas6k0gcs0bdoq4sdb0; path=/
Pragma: no-cache
Vary: Cookie
Expires: Wed, 11 Jan 1984 05:00:00 GMT
Cache-Control: no-cache, must-revalidate, max-age=0
Link: ; rel="https://api.w.org/"
import socket
from urllib import request, error
try:
response = request.urlopen('https://www.baidu.com', timeout=0.01)
except error.URLError as e:
print(type(e.reason))
if isinstance(e.reason, socket.timeout):
print('TIMEOUT')
else:
print('Request successfully')
TIMEOUT
1. urlparse()
from urllib.parse import urlparse
result = urlparse('http://www.baidu.com/index.html;user?id=5#comment')
print(type(result), result, sep='\n')
ParseResult(scheme='http', netloc='www.baidu.com', path='/index.html', params='user', query='id=5', fragment='comment')
其中,: //前面是 scheme(协议),第一个/前面是 netloc(域名),其后面是 path(访问路径),;后面是 params(参数),?后面是 query(查询条件,一般用作GET类型的URL),#后面是fragment(锚点,用于直接定位页面内部的下拉位置)。标准链接格式如下:
scheme://netloc/path;params?query#fragment
urlparse() 的 API 用法:
urllib.parse(urlstring, scheme='', allow_fragments=True)
2. urlunparse()
from urllib.parse import urlunparse
data = ['http', 'www.baidu.com', 'index.html', 'user', 'a=6', 'comment']
# 除了列表类型,也可以使用元组或者其他特定数据结构
print(urlunparse(data))
http://www.baidu.com/index.html;user?a=6#comment
3. urlsplit()
from urllib.parse import urlsplit
result = urlsplit('http://www.baidu.com/index.html;user?id=5#comment')
print(result)
SplitResult(scheme='http', netloc='www.baidu.com', path='/index.html;user', query='id=5', fragment='comment')
4. urlunsplit()
from urllib.parse import urlunsplit
data = ['http', 'www.baidu.com', 'index.html', 'a=6', 'comment']
print(urlunsplit(data))
http://www.baidu.com/index.html?a=6#comment
5. urljoin()
from urllib.parse import urljoin
print(urljoin('http://www.baidu.com', 'FAQ.html')) # 补充 scheme 和 netloc
print(urljoin('http://www.baidu.com', 'https://raymoneshaw.com/FAQ.html')) # 没有补充
print(urljoin('http://www.baidu.com?wd=abc', 'https://raymoneshaw.com')) # query 不起作用
http://www.baidu.com/FAQ.html
https://raymoneshaw.com/FAQ.html
https://raymoneshaw.com
6. urlencode()
from urllib.parse import urlencode
params = {
'name': 'germey',
'age': '22',
}
base_url = 'http://www.baidu.com?'
url = base_url + urlencode(params)
print(url)
http://www.baidu.com?name=germey&age=22
7. parse_qs()
from urllib.parse import parse_qs
query = 'name=germey&age=22'
print(parse_qs(query))
{'name': ['germey'], 'age': ['22']}
8. parse_qsl()
from urllib.parse import parse_qsl
query = 'name=germey&age=22'
print(parse_qsl(query))
[('name', 'germey'), ('age', '22')]
9. quote()
from urllib.parse import quote
keyword = '壁纸'
url = 'https://www.baidu.com/s?wd=' + quote(keyword)
print(url)
https://www.baidu.com/s?wd=%E5%A3%81%E7%BA%B8
10. unquote()
from urllib.parse import unquote
url = 'https://www.baidu.com/s?wd=%E5%A3%81%E7%BA%B8'
print(unquote(url))
https://www.baidu.com/s?wd=壁纸
Robot 协议也称作爬虫协议、机器人协议,全名为网络爬虫排除标准,用来告诉爬虫和搜索引擎哪些页面可以抓取,哪些不可以,通常是一个 robots.txt 文件,放在网站根目录下。
当爬虫访问一个站点时,会首先检查 robots.txt 文件,如果该文件存在,则按照文件中定义的爬取范围来爬取,如果不存在,则访问所有可直接访问的页面。
一个 robots.txt 样例:
User-agent: *
Disallow: /
Allow: /public/
其中:
robotparser() 方法提供了一个类 RobotFileParser,它可以根据网站的 robots.txt 文件来判断一个爬虫是否有权限爬取网页,实例声明为:
urllib.robotparser.RobotFileParser(url='')
该类只需要传入 robots.txt 的链接即可,也可以先不传入,默认为空,之后使用 set_url 方法设置
该类的常用方法:
1. 使用 read() 方法:
from urllib.robotparser import RobotFileParser
rp = RobotFileParser('http://www.jianshu.com/robots.txt')
rp.read()
print(rp.can_fetch('*', 'http://www.jianshu.com/p/b67554025d7d'))
print(rp.can_fetch('*', 'http://www.jianshu.com/search?q=python&page=1&type=collections'))
False
False
2. 使用 parse() 方法:
from urllib.robotparser import RobotFileParser
from urllib.request import urlopen, Request
rp = RobotFileParser()
# 原书里直接使用 urlopen,没有添加请求头,运行报错403禁止访问,需伪装成浏览器进行访问
headers = {
'User-Agent': 'Mozilla/4.0(compatible; MSIE 5.5; Windows NT)'
}
url = 'http://www.jianshu.com/robots.txt'
req = Request(url=url, headers=headers)
response = urlopen(req)
rp.parse(response.read().decode('utf-8').split('\n'))
print(rp.can_fetch('*', 'http://www.jianshu.com/p/b67554025d7d'))
print(rp.can_fetch('*', 'http://www.jianshu.com/search?q=python&page=1&type=collections'))
True
False
_________________________
???为何两种方法结果不一样
import requests
r = requests.get('https://www.baidu.com/')
print(type(r))
print(r.status_code)
print(type(r.text))
print(r.text)
print(r.cookies)
200
]>
1. 基本实例
import requests
r = requests.get('http://httpbin.org/get')
print(r.text)
{
"args": {},
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Host": "httpbin.org",
"User-Agent": "python-requests/2.21.0"
},
"origin": "219.134.114.18, 219.134.114.18",
"url": "https://httpbin.org/get"
}
# 使用 params 参数传入一个字典
data = {
'name': 'Germey',
'age': '22'
}
r2 = requests.get('http://httpbin.org/get', params=data)
print(r2.text)
{
"args": {
"age": "22",
"name": "Germey"
},
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Host": "httpbin.org",
"User-Agent": "python-requests/2.21.0"
},
"origin": "219.134.114.18, 219.134.114.18",
"url": "https://httpbin.org/get?name=Germey&age=22"
}
# 调用 json 方法将其转化为字典
print(r.json())
print(type(r.json()))
{'args': {}, 'headers': {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate', 'Host': 'httpbin.org', 'User-Agent': 'python-requests/2.21.0'}, 'origin': '219.134.114.18, 219.134.114.18', 'url': 'https://httpbin.org/get'}
2. 抓取网页
import requests
import re
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
} # 不要把 User-Agent 写成了 User_Agent。。。
r = requests.get('https://www.zhihu.com/explore', headers=headers)
pattern = re.compile('explore-feed.*?question_link.*?>(.*?)', re.S)
titles = re.findall(pattern, r.text)
print(titles)
['\n如何评价杨超越粉丝自制反网络暴力科普视频《心理少女小越》?\n', '\n游戏设计中,如何避免出现“打不过所以没资源,没资源所以打不过”的现象?\n', '\n韩国人眼中的中国是怎么样的?\n', '\n各个传统相声段子都是哪一版比较好?\n', '\n历史名人都有些什么轶事、趣事?\n', '\n一年过去,怎样评价腾讯“大文娱”布局下,哇唧唧哇对火箭少女101的运营?\n', '\n怎么样客观看待张云雷现象?\n', '\n勇士篮网先签后换,4 年 1.17 亿美元拿下德安吉洛·拉塞尔,如何评价这笔操作?\n', '\n是罗云熙成就了润玉,还是润玉成就了罗云熙?\n', '\n罗云熙在白发中饰演的容齐与香蜜中润玉的妆容造型区别在哪里?\n']
3. 抓取二进制数据
import requests
r = requests.get('https://github.com/favicon.ico')
# print(r.text) 图片直接转换为字符串,出现乱码
# print(r.content) 结果以 b 开头,代表 bytes 类型数据
# 保存图片
with open('favicon.ico', 'wb') as f:
f.write(r.content)
import requests
data = {'name': 'Germey', 'age': '22'}
r = requests.post('https://httpbin.org/post', data=data)
print(r.text)
{
"args": {},
"data": "",
"files": {},
"form": {
"age": "22",
"name": "Germey"
},
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Content-Length": "18",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "httpbin.org",
"User-Agent": "python-requests/2.21.0"
},
"json": null,
"origin": "119.123.34.219, 119.123.34.219",
"url": "https://httpbin.org/post"
}
import requests
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
}
r = requests.get('http://www.jianchu.com', headers=headers)
print(type(r.status_code), r.status_code)
print(type(r.headers), r.headers)
print(type(r.cookies), r.cookies)
print(type(r.url), r.url)
print(type(r.history), r.history)
200
{'Connection': 'close', 'Date': 'Tue, 30 Jul 2019 02:49:49 GMT', 'Server': 'Microsoft-IIS/6.0', 'X-Powered-By': 'ASP.NET, ThinkPHP', 'Content-Type': 'text/html; charset=utf-8', 'Cache-control': 'private'}
http://www.jianchu.com/
[]
import requests
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
}
r = requests.get('http://www.jianchu.com', headers=headers)
exit() if not r.status_code == requests.codes.ok else print('Request Successfully')
Request Successfully
import requests
files = {'file': open('favicon.ico', 'rb')}
r = requests.post('https://httpbin.org/post', files=files)
print(r.text)
{
"args": {},
"data": "",
"files": {
"file": "data:application/octet-stream;base64,AAABA...(Cookie 太长此处省略)},
"form": {},
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Content-Length": "6665",
"Content-Type": "multipart/form-data; boundary=c91243e76affc0aba448829a1ac67793",
"Host": "httpbin.org",
"User-Agent": "python-requests/2.21.0"
},
"json": null,
"origin": "113.91.43.181, 113.91.43.181",
"url": "https://httpbin.org/post"
}
import requests
r = requests.get('https://www.baidu.com')
print(r.cookies)
for key, value in r.cookies.items():
print(key + '=' + value)
]>
BDORZ=27315
import requests
headers = {
'Cookie':'l_n_c=1; q_c1=0b3845e618e84f97b07763a0067ef058|...', # Cookie 太长此处省略
'host': 'www.zhihu.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
}
r = requests.get('https://www.zhihu.com', headers=headers)
print(r.text)
首页 - 知乎 ...
(文档太长此处省略)
import requests
cookies = 'l_n_c=1; q_c1=0b3845e618e84f97b07763a0067ef058|...', # Cookie 太长此处省略
# 首先实例化 RequestsCookieJar 对象
jar = requests.cookies.RequestsCookieJar()
headers = {
'host': 'www.zhihu.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
}
for cookie in cookies.split(';'): # 分割 cookies
key, value = cookie.split('=', 1) # 分割 key 与 value
jar.set(key, value) # 设置每个 cookie 的 key 与 value
r = requests.get('https://www.zhihu.com', cookies=jar, headers=headers)
print(r.text)
首页 - 知乎 ...
(文档太长此处省略)
1. 不使用 Session:
import requests
requests.get('http://httpbin.org/cookies/set/number/123456789') # 设置一个 cookie
r = requests.get('http://httpbin.org/cookies') # 直接请求获取 cookies
print(r.text) # 返回为空,说明未获取到 cookies
{
"cookies": {}
}
2. 使用 Session:
s = requests.Session()
s.get('http://httpbin.org/cookies/set/number/123456789')
r = s.get('http://httpbin.org/cookies')
print(r.text)
{
"cookies": {
"number": "123456789"
}
}
import requests
response = requests.get('https://www.12306.cn')
print(response.status_code)
200
import requests
response = requests.get('https://www.12306.cn', verify=False)
print(response.status_code)
200
d:\python 3.7\lib\site-packages\urllib3\connectionpool.py:847: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
InsecureRequestWarning)
d:\python 3.7\lib\site-packages\urllib3\connectionpool.py:847: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
InsecureRequestWarning)
# 设置忽略警告的方式:
import requests
from requests.packages import urllib3
urllib3.disable_warnings()
response = requests.get('https://www.12306.cn', verify=False)
print(response.status_code)
# 捕获警告到日志的方式
import logging
import requests
logging.captureWarnings(True)
response = requests.get('https://www.12306.cn', verify=False)
print(response.status_code)
200
200
# 只是一段演示示例
import requests
response = requests.get('https://www.12306.cn', cert=('/path/server.crt', '/path/key'))
print(response.status_code)
import requests
proxies = {
'http': 'http://121.33.220.158:808', # 网上找的免费代理IP
'https': 'http://119.28.118.116:1080' # 只用 http 代理的话下面不会报证书错误
}
requests.get('https://www.taobao.com', proxies=proxies, verify=False) # 报证书错误,禁用证书检查
import requests
proxies = {
'http': 'http://user:password@http://121.33.220.158:808',
}
requests.get('https://www.taobao.com', proxies=proxies)
import requests
proxies = {
'http':'socks5://user:password@http://121.33.220.158:808',
# 'https':'socks5://user:password@http://119.28.118.116:1080' # 该行报错:InvalidSchema: Missing dependencies for SOCKS support
}
requests.get('https://www.taobao.com', proxies=proxies, verify=False)
import requests
r = requests.get('https://www.taobao.com', timeout=1) # timeout 默认为 None,即永久等待
print(r.status_code) # 若要分别指定连接和读取时间,则传入一个元组,如 (5, 30)
200
import requests
from requests.auth import HTTPBasicAuth
r = requests.get('http://localhost:5000', auth=HTTPBasicAuth('username', 'password'))
print(r.status_code)
# 用户名和密码正确的话返回 200,认证失败的话返回 401
import requests
r = requests.get('http://localhost:5000', auth=('username', 'password'))
print(r.status_code)
# 用户名和密码正确的话返回 200,认证失败的话返回 401
import requests
from requests_oauthlib import OAuth1
url = 'https://api.twitter.com/1.1/account/verify_credentials.json'
auth = OAuth1('YOUR_APP_KEY', 'YOUR APP_SECRET',
'USER_OAUTH_TOKEN', 'USER_OAUTH_TOKEN_SECRET')
requests.get(url, auth=auth)
from requests import Request, Session
url = 'http://httpbin.org/post'
data = {
'name': 'germey'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
}
s = Session() # 创造一个 Session 实例
req = Request('POST', url, data=data, headers=headers) # 使用 url, data, headers 构造一个 Request 实例
prepped = s.prepare_request(req) # 调用 Session 对象的 prepare_request 方法,将 Request 对象转化为 Prepared_Request 对象
r = s.send(prepped) # 使用 Session 对象的 send() 方法发送
print(r.text)
{
"args": {},
"data": "",
"files": {},
"form": {
"name": "germey"
},
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Content-Length": "11",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "httpbin.org",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"
},
"json": null,
"origin": "119.123.34.219, 119.123.34.219",
"url": "https://httpbin.org/post"
}
import re
content = 'Hello 123 4567 World_This is a Regex Demo'
print(len(content))
result = re.match('^Hello\s\d\d\d\s\d{4}\s\w{10}', content)
print(result) # 结果是 Match 对象
print(result.group()) # Match 对象的 grouop() 方法返回匹配的内容
print(result.span()) # Match 对象的 span() 方法输出匹配的范围
41
Hello 123 4567 World_This
(0, 25)
import re
content = 'Hello 1234567 World_This is a Regex Demo'
result = re.match('^Hello\s(\d+)\sWorld', content) # 提取 1234567
print(result)
print(result.group()) # 输出完整的匹配结果
print(result.group(1)) # 输出第一个被 () 包围的结果
print(result.span())
Hello 1234567 World
1234567
(0, 19)
import re
content = 'Hello 123 4567 World_This is a Regex Demo'
result = re.match('.*', content)
print(result)
print(result.group())
print(result.span())
Hello 123 4567 World_This is a Regex Demo
(0, 41)
# 贪婪
import re
content = 'Hello 1234567 World_This is a Regex Demo'
result = re.match('^He.*(\d+).*Demo$', content)
print(result)
print(result.group(1))
7
# 非贪婪
import re
content = 'Hello 1234567 World_This is a Regex Demo'
result = re.match('^He.*?(\d+).*Demo$', content)
print(result)
print(result.group(1))
1234567
# 匹配结果在字符串结尾
import re
content = 'http://weibo.com/comment/kEraCn'
result1 = re.match('http.*?comment/(.*?)', content)
result2 = re.match('http.*?comment/(.*)', content)
print('result1', result1.group(1)) # .*? 匹配不到任何结果
print('result2', result2.group(1)) # .* 匹配到了结果
result1
result2 kEraCn
import re
content = '''Hello 1234567 World_This
is a Regex Demo
'''
result = re.match('^He.*?(\d+).*Demo$', content)
print(result.group(1))
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
in
6
7 result = re.match('^He.*?(\d+).*Demo$', content)
----> 8 print(result.group(1))
AttributeError: 'NoneType' object has no attribute 'group'
import re
content = '''Hello 1234567 World_This
is a Regex Demo
'''
result = re.match('^He.*?(\d+).*Demo$', content, re.S)
print(result.group(1))
1234567
import re
content = 'Extra string Hello 1234567 World_This is a Regex Demo Extra string'
result = re.search('He.*?(\d+).*?Demo', content)
print(result)
html = ''''''
result = re.search('(.*?)' , html, re.S) # .*? 不能存在于开头或结尾
# search() 返回第一个匹配到的目标,因此如果将 active 去掉会匹配到第二个li节点
# 若不加 re.S 则会匹配到第四个li节点,因为第三个包含换行符
if result:
print(result.group(1), result.group(2))
齐秦 往事随风
results = re.findall('(.*?)' , html, re.S)
print(results)
print(type(results))
for result in results:
print(result)
print(result[0], result[1], result[2])
[('/2.mp3', '任贤齐', '沧海一声笑'), ('/3.mp3', '齐秦', '往事随风'), ('/4.mp3', 'beyond', '光辉岁月'), ('/5.mp3', '陈慧琳', '记事本'), ('/6.mp3', '邓丽君', '但愿人长久')]
('/2.mp3', '任贤齐', '沧海一声笑')
/2.mp3 任贤齐 沧海一声笑
('/3.mp3', '齐秦', '往事随风')
/3.mp3 齐秦 往事随风
('/4.mp3', 'beyond', '光辉岁月')
/4.mp3 beyond 光辉岁月
('/5.mp3', '陈慧琳', '记事本')
/5.mp3 陈慧琳 记事本
('/6.mp3', '邓丽君', '但愿人长久')
/6.mp3 邓丽君 但愿人长久
import re
content = '1234afasdf678sdlkfjk'
content = re.sub('\d+', '', content)
print(content)
afasdfsdlkfjk
# 直接使用 findall
results = re.findall('\s*?()?(\w+)()?\s*?' , html, re.S)
for result in results:
print(result[1])
一路上有你
沧海一声笑
往事随风
光辉岁月
记事本
但愿人长久
html = re.sub('|' , '', html)
print(html)
results = re.findall('(.*?)' , html, re.S)
for result in results:
print(result.strip())
经典老歌
经典老歌列表
- 一路上有你
-
沧海一声笑
-
往事随风
- 光辉岁月
- 记事本
-
但愿人长久
一路上有你
沧海一声笑
往事随风
光辉岁月
记事本
但愿人长久
import re
content1 = '2016-12-15 12:00'
content2 = '2016-12-16 12:05'
content3 = '2016-12-19 11:00'
pattern = re.compile('\d{2}:\d{2}')
result1 = re.sub(pattern, '', content1)
result2 = re.sub(pattern, '', content2)
result3 = re.sub(pattern, '', content3)
print(result1, result2, result3)
2016-12-15 2016-12-16 2016-12-19
本节目标为提取出猫眼电影 TOP100 的电影名称,时间,评分,图片等信息,站点为 https://maoyan.com/board/4, 提取的结果以文件形式保存;
网页分析:
import requests
def get_one_page(url):
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
def main():
url = 'https://maoyan.com/board/4'
html = get_one_page(url)
print(html)
main()
TOP100榜 - 猫眼电影 - 一网打尽好电影
...
(文档太长此处省略一部分)
1
9.5
...后面省略
2. 正则提取:
观察返回的 HTML 代码,发现每部电影都在 dd 节点中;
首先提取排名信息:排名信息在 class 为 board-index 的 i 节点中,利用非贪婪方式匹配,正则表达式如下:
- .*?board-index.*?>(.*?)
然后提取电影图片:图片在 a 节点的第二个图片链接中,即 data-src 属性中,正则表达式修改如下:
- .*?board-index.*?>(.*?).*?data-src="(.*?)"
提取电影名称:电影名称在 p 节点内,class 为 name,使用 name 作为一个标志位,进一步提取其内 a 节点的正文内容:
- .*?board-index.*?>(.*?).*?data-src="(.*?)".*?name.*?a.*?>(.*?)
提取主演:主演在 class 为 star 的 p 节点内,与上面同样的方式:
- .*?board-index.*?>(.*?).*?data-src="(.*?)".*?name.*?a.*?>(.*?).*?star.*?>(.*?)
提取发布时间:
- .*?board-index.*?>(.*?).*?data-src="(.*?)".*?name.*?a.*?>(.*?).*?star.*?>(.*?).*?releasetime.*?>(.*?)
提取评分:
- .*?board-index.*?>(.*?).*?data-src="(.*?)".*?name.*?a.*?>(.*?).*?star.*?>(.*?).*?releasetime.*?>(.*?).*?integer.*?>(.*?).*?fraction.*?>(.*?).*?
def parse_one_page(html):
pattern = re.compile(
'.*?board-index.*?>(.*?).*?data-src="(.*?)".*?name.*?a.*?>(.*?).*?star.*?>(.*?).*?releasetime.*?>(.*?).*?integer.*?>(.*?).*?fraction.*?>(.*?).*? ',
re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'index': item[0],
'image': item[1],
'title': item[2].strip(),
'actor': item[3].strip()[3:], # if len(item[3]) > 3 else '',
'time': item[4].strip()[5:], # if len(item[4]) > 5 else '',
'score': item[5].strip() + item[6].strip()
}
url = 'https://maoyan.com/board/4'
html = get_one_page(url)
for i in parse_one_page(html):
print(i)
{'index': '1', 'image': 'https://p1.meituan.net/movie/20803f59291c47e1e116c11963ce019e68711.jpg@160w_220h_1e_1c', 'title': '霸王别姬', 'actor': '张国荣,张丰毅,巩俐', 'time': '1993-01-01', 'score': '9.5'}
{'index': '2', 'image': 'https://p0.meituan.net/movie/283292171619cdfd5b240c8fd093f1eb255670.jpg@160w_220h_1e_1c', 'title': '肖申克的救赎', 'actor': '蒂姆·罗宾斯,摩根·弗里曼,鲍勃·冈顿', 'time': '1994-09-10(加拿大)', 'score': '9.5'}
{'index': '3', 'image': 'https://p0.meituan.net/movie/289f98ceaa8a0ae737d3dc01cd05ab052213631.jpg@160w_220h_1e_1c', 'title': '罗马假日', 'actor': '格利高里·派克,奥黛丽·赫本,埃迪·艾伯特', 'time': '1953-09-02(美国)', 'score': '9.1'}
{'index': '4', 'image': 'https://p1.meituan.net/movie/6bea9af4524dfbd0b668eaa7e187c3df767253.jpg@160w_220h_1e_1c', 'title': '这个杀手不太冷', 'actor': '让·雷诺,加里·奥德曼,娜塔莉·波特曼', 'time': '1994-09-14(法国)', 'score': '9.5'}
{'index': '5', 'image': 'https://p1.meituan.net/movie/b607fba7513e7f15eab170aac1e1400d878112.jpg@160w_220h_1e_1c', 'title': '泰坦尼克号', 'actor': '莱昂纳多·迪卡普里奥,凯特·温丝莱特,比利·赞恩', 'time': '1998-04-03', 'score': '9.5'}
{'index': '6', 'image': 'https://p0.meituan.net/movie/da64660f82b98cdc1b8a3804e69609e041108.jpg@160w_220h_1e_1c', 'title': '唐伯虎点秋香', 'actor': '周星驰,巩俐,郑佩佩', 'time': '1993-07-01(中国香港)', 'score': '9.1'}
{'index': '7', 'image': 'https://p0.meituan.net/movie/46c29a8b8d8424bdda7715e6fd779c66235684.jpg@160w_220h_1e_1c', 'title': '魂断蓝桥', 'actor': '费雯·丽,罗伯特·泰勒,露塞尔·沃特森', 'time': '1940-05-17(美国)', 'score': '9.2'}
{'index': '8', 'image': 'https://p0.meituan.net/movie/223c3e186db3ab4ea3bb14508c709400427933.jpg@160w_220h_1e_1c', 'title': '乱世佳人', 'actor': '费雯·丽,克拉克·盖博,奥利维娅·德哈维兰', 'time': '1939-12-15(美国)', 'score': '9.1'}
{'index': '9', 'image': 'https://p1.meituan.net/movie/ba1ed511668402605ed369350ab779d6319397.jpg@160w_220h_1e_1c', 'title': '天空之城', 'actor': '寺田农,鹫尾真知子,龟山助清', 'time': '1992', 'score': '9.1'}
{'index': '10', 'image': 'https://p0.meituan.net/movie/b0d986a8bf89278afbb19f6abaef70f31206570.jpg@160w_220h_1e_1c', 'title': '辛德勒的名单', 'actor': '连姆·尼森,拉尔夫·费因斯,本·金斯利', 'time': '1993-12-15(美国)', 'score': '9.2'}
写入文件:
通过 JSON 库的 dumps() 方法实现字典的序列化,并指定 ensure_ascii=False,避免输出结果是中文形式而不是 Unicode 编码
import json
def write_to_file(content):
with open('result.txt', 'a', encoding='utf-8') as f:
print(type(json.dumps(content)))
f.write(json.dumps(content, ensure_ascii=False) + '\n')
import json
import requests
import re
import time
def get_one_page(url):
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
def parse_one_page(html):
pattern = re.compile(
'.*?board-index.*?>(.*?).*?data-src="(.*?)".*?name.*?a.*?>(.*?).*?star.*?>(.*?).*?releasetime.*?>(.*?).*?integer.*?>(.*?).*?fraction.*?>(.*?).*? ',
re.S)
items = re.findall(pattern, html)
for item in items:
yield {
'index': item[0],
'image': item[1],
'title': item[2].strip(),
'actor': item[3].strip()[3:], # if len(item[3]) > 3 else '',
'time': item[4].strip()[5:], # if len(item[4]) > 5 else '',
'score': item[5].strip() + item[6].strip()
}
def write_to_file(content):
with open('result.txt', 'a', encoding='utf-8') as f:
print(type(json.dumps(content)))
f.write(json.dumps(content, ensure_ascii=False) + '\n')
def main(offset):
url = 'https://maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
for item in parse_one_page(html):
print(item)
write_to_file(item)
if __name__ == '__main__':
for i in range(10):
main(offset=i * 10)
time.sleep(1)
{'index': '1', 'image': 'https://p1.meituan.net/movie/20803f59291c47e1e116c11963ce019e68711.jpg@160w_220h_1e_1c', 'title': '霸王别姬', 'actor': '张国荣,张丰毅,巩俐', 'time': '1993-01-01', 'score': '9.5'}
{'index': '2', 'image': 'https://p0.meituan.net/movie/283292171619cdfd5b240c8fd093f1eb255670.jpg@160w_220h_1e_1c', 'title': '肖申克的救赎', 'actor': '蒂姆·罗宾斯,摩根·弗里曼,鲍勃·冈顿', 'time': '1994-09-10(加拿大)', 'score': '9.5'}
{'index': '3', 'image': 'https://p0.meituan.net/movie/289f98ceaa8a0ae737d3dc01cd05ab052213631.jpg@160w_220h_1e_1c', 'title': '罗马假日', 'actor': '格利高里·派克,奥黛丽·赫本,埃迪·艾伯特', 'time': '1953-09-02(美国)', 'score': '9.1'}
{'index': '4', 'image': 'https://p1.meituan.net/movie/6bea9af4524dfbd0b668eaa7e187c3df767253.jpg@160w_220h_1e_1c', 'title': '这个杀手不太冷', 'actor': '让·雷诺,加里·奥德曼,娜塔莉·波特曼', 'time': '1994-09-14(法国)', 'score': '9.5'}
{'index': '5', 'image': 'https://p1.meituan.net/movie/b607fba7513e7f15eab170aac1e1400d878112.jpg@160w_220h_1e_1c', 'title': '泰坦尼克号', 'actor': '莱昂纳多·迪卡普里奥,凯特·温丝莱特,比利·赞恩', 'time': '1998-04-03', 'score': '9.5'}
...后面省略