网站为了防止被爬取,会有反爬机制,对于同一个IP地址的大量同类型的访问,会封锁IP,过一段时间后,才能继续访问。
有几种套路:
修改请求头,模拟浏览器(而不是代码去直接访问)去访问
采用代理IP并轮换
设置访问时间间隔
从该网站获取: https://www.xicidaili.com/
inspect -> 鼠标定位:
要获取的代理IP地址,属于class = "odd"标签的内容:代码如下,获取的代理IP保存在proxy_ip_list列表中
# 案例代码
from bs4 import BeautifulSoup
import requests
import time
def open_proxy_url(url):
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
headers = {'User-Agent': user_agent}
try:
r = requests.get(url, headers = headers, timeout = 20)
r.raise_for_status()
r.encoding = r.apparent_encoding
return(r.text)
except:
print('无法访问网页' + url)
def get_proxy_ip(response):
proxy_ip_list = []
soup = BeautifulSoup(response, 'html.parser')
proxy_ips = soup.select('.odd')#选择标签
for proxy_ip in proxy_ips:
ip = proxy_ip.select('td')[1].text
port = proxy_ip.select('td')[2].text
protocol = proxy_ip.select('td')[5].text
if protocol in ('HTTP','HTTPS'):
proxy_ip_list.append(f'{protocol}://{ip}:{port}')
return proxy_ip_list
if __name__ == '__main__':
proxy_url = 'https://www.xicidaili.com/'
text = open_proxy_url(proxy_url)
proxy_ip_filename = 'proxy_ip.txt'
with open(proxy_ip_filename, 'w') as f:
f.write(text)
text = open(proxy_ip_filename, 'r').read()
proxy_ip_list = get_proxy_ip(text)
print(proxy_ip_list)
通过bs4的find_all(‘tr’)来获取所有IP:
def get_proxy_ip(response):
proxy_ip_list = []
soup = BeautifulSoup(response, 'html.parser')
proxy_ips = soup.find(id = 'ip_list').find_all('tr')
for proxy_ip in proxy_ips:
if len(proxy_ip.select('td')) >=8:
ip = proxy_ip.select('td')[1].text
port = proxy_ip.select('td')[2].text
protocol = proxy_ip.select('td')[5].text
if protocol in ('HTTP','HTTPS','http','https'):
proxy_ip_list.append(f'{protocol}://{ip}:{port}')
return proxy_ip_list
proxies的格式是一个字典:
{‘http’: ‘http://IP:port‘,‘https’:'https://IP:port‘}
把它直接传入requests的get方法中即可
web_data = requests.get(url, headers=headers, proxies=proxies)
def open_url_using_proxy(url, proxy):
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
headers = {'User-Agent': user_agent}
proxies = {}
if proxy.startswith('HTTPS'):
proxies['https'] = proxy
else:
proxies['http'] = proxy
try:
r = requests.get(url, headers = headers, proxies = proxies, timeout = 10)
r.raise_for_status()
r.encoding = r.apparent_encoding
return (r.text, r.status_code)
except:
print('无法访问网页' + url)
return False
url = 'http://www.baidu.com'
text = open_url_using_proxy(url, proxy_ip_list[0])
无论是免费还是收费的代理网站,提供的代理IP都未必有效,我们应该验证一下,有效后,再放入我们的代理IP池中,以下通过几种方式:访问网站,得到的返回码是200真正的访问某些网站,获取title等,验证title与预计的相同访问某些可以提供被访问IP的网站,类似于“查询我的IP”的网站,查看返回的IP地址是什么验证返回码。
def check_proxy_avaliability(proxy):
url = 'http://www.baidu.com'
result = open_url_using_proxy(url, proxy)
VALID_PROXY = False
if result:
text, status_code = result
if status_code == 200:
print('有效代理IP: ' + proxy)
else:
print('无效代理IP: ' + proxy)
def check_proxy_avaliability(proxy):
url = 'http://www.baidu.com'
text, status_code = open_url_using_proxy(url, proxy)
VALID = False
if status_code == 200:
if r_title:
if r_title[0] == '百度一下,你就知道 ':
VALID = True
if VALID:
print('有效代理IP: ' + proxy)
else:
print('无效代理IP: ' + proxy)
from bs4 import BeautifulSoup
import requests
import re
import json
def open_proxy_url(url):
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
headers = {'User-Agent': user_agent}
try:
r = requests.get(url, headers = headers, timeout = 10)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print('无法访问网页' + url)
def get_proxy_ip(response):
proxy_ip_list = []
soup = BeautifulSoup(response, 'html.parser')
proxy_ips = soup.find(id = 'ip_list').find_all('tr')
for proxy_ip in proxy_ips:
if len(proxy_ip.select('td')) >=8:
ip = proxy_ip.select('td')[1].text
port = proxy_ip.select('td')[2].text
protocol = proxy_ip.select('td')[5].text
if protocol in ('HTTP','HTTPS','http','https'):
proxy_ip_list.append(f'{protocol}://{ip}:{port}')
return proxy_ip_list
def open_url_using_proxy(url, proxy):
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
headers = {'User-Agent': user_agent}
proxies = {}
if proxy.startswith(('HTTPS','https')):
proxies['https'] = proxy
else:
proxies['http'] = proxy
try:
r = requests.get(url, headers = headers, proxies = proxies, timeout = 10)
r.raise_for_status()
r.encoding = r.apparent_encoding
return (r.text, r.status_code)
except:
print('无法访问网页' + url)
print('无效代理IP: ' + proxy)
return False
def check_proxy_avaliability(proxy):
url = 'http://www.baidu.com'
result = open_url_using_proxy(url, proxy)
VALID_PROXY = False
if result:
text, status_code = result
if status_code == 200:
r_title = re.findall('.* ', text)
if r_title:
if r_title[0] == '百度一下,你就知道 ':
VALID_PROXY = True
if VALID_PROXY:
check_ip_url = 'https://jsonip.com/'
try:
text, status_code = open_url_using_proxy(check_ip_url, proxy)
except:
return
print('有效代理IP: ' + proxy)
with open('valid_proxy_ip.txt','a') as f:
f.writelines(proxy)
try:
source_ip = json.loads(text).get('ip')
print(f'源IP地址为:{source_ip}')
print('='*40)
except:
print('返回的非json,无法解析')
print(text)
else:
print('无效代理IP: ' + proxy)
if __name__ == '__main__':
proxy_url = 'https://www.xicidaili.com/'
proxy_ip_filename = 'proxy_ip.txt'
text = open(proxy_ip_filename, 'r').read()
proxy_ip_list = get_proxy_ip(text)
for proxy in proxy_ip_list:
check_proxy_avaliability(proxy)
挑战项目:模拟登录丁香园,并抓取论坛页面所有的人员基本信息与回复帖子内容。
丁香园论坛:http://www.dxy.cn/bbs/thread/626626#626626 。
import requests, json, re, random,time
from bs4 import BeautifulSoup
from selenium import webdriver
from lxml import etree
class getUrl(object):
"""docstring for getUrl"""
def __init__(self):
self.headers={
"Connection": "keep-alive",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/51.0.2704.63 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, sdch",
"Accept-Language": "zh-CN,zh;q=0.8"
};
def run(self):
browser = webdriver.Chrome('/home/ach/桌面/爬虫/学习任务/chromedriver')
browser.get('https://auth.dxy.cn/accounts/login?service=http://www.dxy.cn/bbs/index.html')
time.sleep(1)
#切换账号密码登录表单
js1 = 'document.querySelector("#j_loginTab1").style.display="none";'
browser.execute_script(js1)
time.sleep(1)
js2 = 'document.querySelector("#j_loginTab2").style.display="block";'
browser.execute_script(js2)
#输入账号密码
input_name = browser.find_element_by_name('username')
input_name.clear()
input_name.send_keys('*')# 这里为自己账号和密码
input_pass = browser.find_element_by_name('password')
input_pass.clear()
input_pass.send_keys('*')
browser.find_element_by_xpath('//*[@class="form__button"]/button').click()
#此步骤应该有验证码,先跳过
time.sleep(10)
cookie = browser.get_cookies()
cookie_dict = {i['name']:i['value'] for i in cookie}
#转到抓取页面
browser.get("http://www.dxy.cn/bbs/thread/626626#626626");
html = browser.page_source
tree = etree.HTML(html)
user = tree.xpath('//div[@id="postcontainer"]//div[@class="auth"]/a/text()')
content = tree.xpath('//td[@class="postbody"]')
for i in range(0,len(user)):
result = user[i].strip()+":"+content[i].xpath('string(.)').strip()
#写入文件
dir_file = open("DXY_records.txt",'a', encoding="utf-8")
dir_file.write(result+"\n")
dir_file.write('*' * 80+"\n")
dir_file.close()
print('*' * 5 +"抓取结束"+'*' * 5)
if __name__ == '__main__':
geturl = getUrl()
geturl.run()