工作笔记|爬虫小案例 根据电影名称批量获取下载链接

Data

|-- move_list.txt (gbk编码)
|-- download.py

Python Program

import requests
from bs4 import BeautifulSoup
from urllib.request import quote
import json

# quote()函数,可以帮我们把内容转为标准的url格式,作为网址的一部分打开

if __name__ == "__main__":
    base_url = 'http://s.ygdy8.com/plus/s0.php?typeid=1&keyword='
    output_file=open("output_file.txt","w",encoding="gbk")
    with open("movie_list.txt","r",encoding="gbk") as f:
        lines=f.readlines()
        for line in lines:
            movie_name=line.strip()
            gbkmovie = movie_name.encode('gbk')
            url=base_url+ quote(gbkmovie)
            res = requests.get(url)
            res.encoding = 'gbk'
            soup_movie = BeautifulSoup(res.text, 'html.parser')
            try:
                # 解析网页
                urlpart = soup_movie.find(class_="co_content8").find_all('table')
                if urlpart:
                    urlpart = urlpart[0].find('a')['href']
                    urlmovie = 'https://www.ygdy8.com/' + urlpart
                    res1 = requests.get(urlmovie)
                    res1.encoding = 'gbk'
                    soup_movie1 = BeautifulSoup(res1.text, 'html.parser')
                    urldownload = soup_movie1.find('div', id="Zoom").find('span').find('table').find('a')['href']
                    print(urldownload)
                    output_file.write(f"{movie_name},{urldownload}\n")
                else:
                    print('没有' + movie_name)
                    # 有些电影是查询不到没下载链接的,因此加了个判断
            # 捕获异常错误并执行下方子句
            except:
                print('没有找到电影')

你可能感兴趣的:(闲文杂记)