爬虫——爬取QQ空间

from urllib import request

url = 'https://user.qzone.qq.com/2908664710'

headers = {
"User-Agent": "Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/4.0.2 Mobile/8C148 Safari/6533.18.5",
"cookie":"pgv_pvi=5603690496; RK=hF4B+uYZ0p; pgv_pvid=9781457135; same_pc=%7B%7D; ptui_loginuin=2908664710; __Q_w_s__QZN_TodoMsgCnt=1; ptisp=ctc; ptcz=845f1796d3e319ef4561712cdb830cdec288d531defde9f4021e2f00ba36aba7; uin=o2908664710; skey=@oPlDSTPdO; p_uin=o2908664710; pt4_token=1ZwWJBOMRRskk8TZmV2Z6rYf*Hl6I0bPudWCnGnR958_; p_skey=qrKW4zleXOESm32PE34JDHfdnhH6Hv9DLPEbdOnhsJQ_; qzspeedup=sdch; pgv_info=ssid=s3412175011; qz_screen=1920x1080; 2908664710_todaycount=1; 2908664710_totalcount=517; QZ_FE_WEBP_SUPPORT=1; cpu_performance_v8=3; Loading=Yes"}

req = request.Request(url=url,headers=headers)

response = request.urlopen(req)

res = response.read().decode('utf-8')

with open('day4/static/qq2.html',"w",encoding='utf-8') as f:
f.write(res)

转载于:https://www.cnblogs.com/langye521erxia/p/11111160.html

你可能感兴趣的:(爬虫——爬取QQ空间)