在本篇文章里小编给大家整理了关于python爬虫实现获取下一页代码内容,需要的朋友们可以参考学习下。
我们首先来看下实例代码:
from time import sleep
import faker
import requests
from lxml import etree
fake = faker.Faker()
base_url = "http://angelimg.spbeen.com"
def get_next_link(url):
content = downloadHtml(url)
html = etree.HTML(content)
next_url = html.xpath("//a[@class='ch next']/@href")
if next_url:
return base_url + next_url[0]
else:
return False
def downloadHtml(ur):
user_agent = fake.user_agent()
headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"}
response = requests.get(url, headers=headers)
return response.text
def getImgUrl(content):
html = etree.HTML(content)
img_url = html.xpath('//*[@id="content"]/a/img/@src')
title = html.xpath(".//div['@class=article']/h2/text()")
return img_url[0],title[0]
def saveImg(title,img_url):
if img_url is not None and title is not None:
with open("txt/"+str(title)+".jpg",'wb') as f:
user_agent = fake.user_agent()
headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"}
content = requests.get(img_url, headers=headers)
#request_view(content)
f.write(content.content)
f.close()
def request_view(response):
import webbrowser
request_url = response.url
base_url = ' ' %(request_url)
base_url = base_url.encode()
content = response.content.replace(b"",base_url)
tem_html = open('tmp.html','wb')
tem_html.write(content)
tem_html.close()
webbrowser.open_new_tab('tmp.html')
def crawl_img(url):
content = downloadHtml(url)
res = getImgUrl(content)
title = res[1]
img_url = res[0]
saveImg(title,img_url)
if __name__ == "__main__":
url = "http://angelimg.spbeen.com/ang/4968/1"
while url:
print(url)
crawl_img(url)
url = get_next_link(url)
python 爬虫如何执行自动下一页循环加载文字
from bs4 import BeautifulSoup
import requests
import time
from lxml import etree
import os
# 该demo执行的为如何利用bs去爬一些文字
def start():
# 发起网络请求
html=requests.get('http://www.baidu.com')
#编码
html.encoding=html.apparent_encoding
#创建sp
soup=BeautifulSoup(html.text,'html.parser')
print(type(soup))
print('打印元素')
print(soup.prettify())
#存储一下title 该方法没有提示直接展示
title=soup.head.title.string
print(title)
# 写入文本
with open(r'C:/Users/a/Desktop/a.txt','w') as f:
f.write(title)
print(time.localtime())
url_2 = 'http://news.gdzjdaily.com.cn/zjxw/politics/sz_4.shtml'
def get_html_from_bs4(url):
# response = requests.get(url,headers=data,proxies=ip).content.decode('utf-8')
response = requests.get(url).content.decode('utf-8')
soup = BeautifulSoup(response, 'html.parser')
next_page = soup.select('#displaypagenum a:nth-of-type(9)')[0].get('href')
# for i in nett
print(next_page)
next2='http://news.gdzjdaily.com.cn/zjxw/politics/'+next_page
def get_html_from_etree(url):
response = requests.get(url).content.decode('utf-8')
html= etree.HTML(response)
next_page = html.xpath('.//a[@class="PageNum"][8]/@href')[0]
print(next_page)
# next2='http://news.gdzjdaily.com.cn/zjxw/politics/'+next_page
get_html_from_etree(url_2)
if __name__ == '__main__':
start()
from bs4 import BeautifulSoup
import requests
import time
from lxml import etree
import os
# 该demo执行的为如何利用bs去爬一些文字
def start():
# 发起网络请求
html=requests.get('http://www.baidu.com')
#编码
html.encoding=html.apparent_encoding
#创建sp
soup=BeautifulSoup(html.text,'html.parser')
print(type(soup))
print('打印元素')
print(soup.prettify())
#存储一下title 该方法没有提示直接展示
title=soup.head.title.string
print(title)
# 写入文本
with open(r'C:/Users/a/Desktop/a.txt','w') as f:
f.write(title)
print(time.localtime())
url_2 = 'http://news.gdzjdaily.com.cn/zjxw/politics/sz_4.shtml'
def get_html_from_bs4(url):
# response = requests.get(url,headers=data,proxies=ip).content.decode('utf-8')
response = requests.get(url).content.decode('utf-8')
soup = BeautifulSoup(response, 'html.parser')
next_page = soup.select('#displaypagenum a:nth-of-type(9)')[0].get('href')
# for i in nett
print(next_page)
next2='http://news.gdzjdaily.com.cn/zjxw/politics/'+next_page
def get_html_from_etree(url):
response = requests.get(url).content.decode('utf-8')
html= etree.HTML(response)
next_page = html.xpath('.//a[@class="PageNum"][8]/@href')[0]
print(next_page)
# next2='http://news.gdzjdaily.com.cn/zjxw/politics/'+next_page
get_html_from_etree(url_2)
if __name__ == '__main__':
start()
到此这篇关于python爬虫实现获取下一页代码的文章就介绍到这了,更多相关python爬虫可随时留言。
我是一名python开发工程师,整理了一套最新的python系统学习教程,包括从基础的python脚本到web开发、爬虫、数据分析、数据可视化、机器学习,面试宝典。想要这些资料的可以关注小编,加Q裙851211580自取Python学习资料和学习视频,还有大神在线指导哦!