爬取知乎热榜笔记

学习自网易云课堂的爬虫直播

import re
import requests
from lxml import etree
content_re=re.compile('"titleArea":{"text":"(.*?)"},"excerptArea":{"text":"(.*?)"},"imageArea":{"url":"(.*?)"}')
headers={
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
}

def get_hot(url):
    html=requests.get(url,headers=headers)
    soup=etree.HTML(html.text)
    hots=soup.xpath('//*[@id="root"]/div/main/div/a/div[2]/div[1]/text()')
    
    '''for h in hots:
        print(h)'''
    contents=content_re.findall(html.text)
    for c in contents:
        print('标题:',c[0])
        print('内容:',c[1])
        print('图片地址:',c[2].replace(r'\u002F','\\'))
    #print('https:\\pic4.zhimg.com\80\v2-bec82a1f6d71687ad0e5261a01d3fc77_1440w.jpg'.encode().decode("'unicode_escape','ignore'"))
if __name__ == '__main__':
    url='https://www.zhihu.com/billboard'
    get_hot(url)
	

你可能感兴趣的:(爬取知乎热榜笔记)