这个网页具备检测爬虫和浏览器访问的功能。
因此为了成功爬取该网页,我们需要伪装requests的headers(把爬虫伪装成一个浏览器)
代码如下:
1 import requests 2 url = "http://www.amazon.cn/gp/product/B01M8L5Z3Y" 3 try: 4 kv = {'user-agent':'Mozilla/5.0'} 5 r = requests.get(url, headers=kv) 6 r.raise_for_status() 7 r.encoding = r.apparent_encoding 8 print(r.text[1000:2000]) 9 except: 10 print("爬取失败")
输出:
1 log");ue.stub(ue,"onunload");ue.stub(ue,"onflush"); 2 3 (function(d,e){function h(f,b){if(!(a.ec>a.mxe)&&f){a.ter.push(f);b=b||{};var c=f.logLevel||b.logLevel;c&&c!==k&&c!==m&&c!==n&&c!==p||a.ec++;c&&c!=k||a.ecf++;b.pageURL=""+(e.location?e.location.href:"");b.logLevel=c;b.attribution=f.attribution||b.attribution;a.erl.push({ex:f,info:b})}}function l(a,b,c,e,g){d.ueLogError({m:a,f:b,l:c,c:""+e,err:g,fromOnError:1,args:arguments},g?{attribution:g.attribution,logLevel:g.logLevel}:void 0);return!1}var k="FATAL",m="ERROR",n="WARN",p="DOWNGRADED",a={ec:0,ecf:0, 4 pec:0,ts:0,erl:[],ter:[],mxe:50,startTimer:function(){a.ts++;setInterval(function(){d.ue&&a.pec<a.ec&&d.uex("at");a.pec=a.ec},1E4)}};l.skipTrace=1;h.skipTrace=1;h.isStub=1;d.ueLogError=h;d.ue_err=a;e.οnerrοr=l})(ue_csm,window); 5 6 ue.stub(ue,"event");ue.stub(ue,"onSushiUnload");ue.stub(ue,"onSushiFlush"); 7 8 var ue_url='/gp/product/B01M8L5Z3Y/uedata/unsticky/461-6495535-0834918/NoPageType/ntpoffrw', 9 ue_sid='461-6495535-0834918', 10 ue_mid='AAH