【Python爬虫】Beautiful Soup豆瓣电影TOP250

#coding:utf-8
from bs4 import BeautifulSoup
import requests


# 读取网页
# 解析网页
# 获取相关数据
# 翻页 通过取后页标签中的 a 标签

# 第一页 https://movie.douban.com/top250?start=0
# 第二页 https://movie.douban.com/top250?start=25&filter=
# 第三页 https://movie.douban.com/top250?start=50&filter=

def main(url_temp):
    rep=requests.get(url_temp)
    soup=BeautifulSoup(rep.text,'lxml')
    ol=soup.find("ol",class_="grid_view")
    li_list=ol.find_all('li')
    for li in li_list:
        img=li.find('img')
        #print(img['alt'],img['src'])
        img_src=img['src']
        title=li.find("span",class_="title").text.strip()
        act_info=li.find("div",class_="bd").p.get_text().strip()
        star_info_all = li.find('div', class_='star').find_all('span')
        mv_score =star_info_all[1].text.strip()
        comments_num =star_info_all[3].text.strip()
        print(title)
        print(img_src)
        print(act_info)
        print(mv_score)
        print(comments_num)
        print('-' * 50)
    next_span=soup.find("span",class_="next")
    next_a=next_span.find('a')
    next_url=None
    if next_a:
        next_url='https://movie.douban.com/top250?start=0'+next_a['href']
    return next_url



if __name__=="__main__":
    n_url="https://movie.douban.com/top250?start=0"
    # 当 n_url 不是None  为真的时候就进行翻页
    while n_url:
        n_url = main(n_url)

你可能感兴趣的:(【Python爬虫】Beautiful Soup豆瓣电影TOP250)