使用urllib爬取简书中文章标题和简介

介绍

使用urllib的request方法,配合re正则进行静态页面基本数据爬取

代码


import  re
from urllib import request
# headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
# req=request.Request('http://www.jianshu.com',headers=headers)
# resp = request.urlopen(req)
# text=resp.read().decode()   #获取简书首页数据
# #print(text)
# reg=r']*>(.*?)' #标题的正则
# regse=r'

\s*(\S*)\s*' #摘要的正则 # pattern=re.compile(reg) #正则匹配 # patternse=re.compile(regse) # contain=pattern.findall(text,re.M) #寻找所有正则匹配的串 # containse=patternse.findall(text,re.M) # for (tie,ab) in zip(contain,containse): #联立输出 # print("标题:",tie) # print("摘要:",ab) class jianshuhome(object): def __init__(self): self.url = 'https://www.jianshu.com' def get_page(self): #浏览器标识,基本爬取前缀,不加一般拒绝访问 headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'} req = request.Request(self.url, headers=headers) resp = request.urlopen(req) data = resp.read().decode() return data def parse_data(self, data): #正则匹配 article = re.findall(r'(.*?).*?

(.*?)

', data, re.S) return article if __name__ == '__main__': jianshu = jianshuhome() data = jianshu.get_page() for i in jianshu.parse_data(data): print(i[0]) print(i[1])

 

你可能感兴趣的:(爬虫)