Python-好段子爬取

import urllib.request
import urllib.parse
from lxml import etree #导入xpath#
import time
import json
import os

item_list=[]

def handle_request(url,page):
headers={
"User-Agnet":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
}
#将url和page进行拼接#
url=url%page
#如果下面的用{},可以写成url=url.format(page)
print(url)
request=urllib.request.Request(url=url,headers=headers)
return request

def parse_content(content):
#生成对象, 构造xpath的解析对象#
tree=etree.HTML(content)
#抓取内容#
div_list=tree.xpath('//div/a/p')
#遍历div列表#
print(div_list)
print(len(div_list))
for odiv in div_list:
#获取标题#
title=odiv.xpath('//div[@class="head"]/h2/text()')[0]
print(title)
text_lt=odiv.xpath('.//div[@class="content"]/p/text()')#返回的是一个列表#
text='\n'.join(text_lt)
print(text)
item={
'标题':title,
'内容':text,
}
#将内容添加到列表中#
item_list.append(item)

def main():
start_page=int(input("请输入起始页码:"))
end_page=int(input("请输入结束页码:"))
url='http://www.haoduanzi.com/category/?1-%s.html'
#这样写也可以:url='http://www.haoduanzi.com/category/?1-{.}html'
for page in range(start_page,end_page+1):
request=handle_request(url,page)
content=urllib.request.urlopen(request).read().decode()
#解析内容#
parse_content(content)
time.sleep(2)

#写入到文件中#
string=json.dumps(item_list,ensure_ascii=False)
with open('/Users/marine/Desktop/python/duanzi.txt','w', encoding='utf8') as fp:
    fp.write(string)

if name=='main':
main()

你可能感兴趣的:(Python-好段子爬取)