""""
聚焦爬虫:爬取页面中指定的页面内容
-编码流程
1.指定url
2.发起请求
3.获取响应数据
4。将响应数据进行持久化存储
数据解析分类:
-正则
-bs4
-xpath(***)
数据解析原理:
-解析的局部的文本内容都会在标签之间或者标签对应的属性中进行存储
-1.进行指定标签定位
-2.标签或者标签对应的属性中存储的数据值进行提取
"""""
import requests
import re
import os
if __name__ == '__main__':
if not os.path.exists('./qiutuLibs'):
os.mkdir('./qiutuLibs')
url='https://pic.qiushibaike.com/system/pictures/12376/123765122/medium/2FGDR342YXMO1LLU.jpg'
img_data=requests.get(url=url).content
with open('./jiutu.jpg','wb') as fp:
fp.write(img_data)
print('over')
url='https://www.qiushibaike.com/imgrank/'
headers_gooogle={
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'
}
page_text=requests.get(url=url,headers=headers_gooogle).text
'''
'''
ex='.*?
'
img_src_list
=re
.findall
(ex
,page_text
,re
.S
)
for src
in img_src_list
:
src
='https:'+src
img_data_result
=requests
.get
(url
=url
,headers
=headers_gooogle
).content
img_name
=src
.split
('/')[-1]
img_path
='./qiutuLibs/'+img_name
with open(img_path
,'wb') as fp
:
fp
.write
(img_data_result
)
print(img_name
,'over')
if __name__
== '__main__':
import requests
import re
import json
"""
项目案例4:爬取国家药品监督管理总局中基于中华人民共和国化妆品生产许可证的相关数据
"""
headers_firefox
= {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:82.0) Gecko/20100101 Firefox/82.0'}
headers_google
= {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'
}
"""
# with open('./1.html','w',encoding='utf-8') as fp:
# fp.write(data)
# print('over')
从此可以发现有些数据是动态加载过来的(例如阿贾克斯数据)
发现首页中对应的企业信息数据是通过ajax动态请求到的
通过对详情页的观察发现:url域名是一样的,只有携带的参数(id)不是一样的
id可以从首页中对应的ajax请求到的json串中获取
域名和ID拼接处一个完整企业对应的url
详情页的数据也是动态加载过来的
发现所有post请求的URL都是一样的,只有参数id是不同的
可以批量获取多家企业的id,然后合并id和url获得所需要的数据
"""
url
= 'http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsList'
id_list
= []
for page
in range(0, 20):
page
= str(page
)
param
= {
'on': 'true',
'page': page
,
'pageSize': '15',
'productName': ' ',
'conditionType': '1',
'applyname': ' '
}
json_ids
= requests
.post
(url
=url
, data
=param
, headers
=headers_google
).json
()
for dic
in json_ids
['list']:
id_list
.append
(dic
['ID'])
post_url
= "http://scxk.nmpa.gov.cn:81/xk/itownet/portalAction.do?method=getXkzsById"
all_datalist
= []
for id in id_list
:
data
= {
'id': id
}
detail_json
= requests
.post
(url
=post_url
, headers
=headers_google
, data
=data
).json
()
all_datalist
.append
(detail_json
)
fp
= open('./alldata.json', 'w', encoding
='utf-8')
json
.dump
(all_datalist
, fp
=fp
, ensure_ascii
=False)
print(len(all_datalist
))