简单爬取一下电影排行

  主要用到了requests和xpath来解析数据,然后储存在mysql数据库中,不过代码方面我是先写好简单实现工作,然后让ai帮我用类封装来成功实现,博主比较菜,如果有反爬措施可以找js逆向视频来学习

import requests
from fake_useragent import UserAgent
from lxml import etree
import pymysql
from pymysql.cursors import DictCursor
import time
import random

class DoubanMovieScraper:
    def __init__(self, db_config):
        """初始化数据库配置"""
        self.db_config = db_config
        self.conn = None
        self.cur = None

    def _init_db_connection(self):
        """初始化数据库连接"""
        try:
            self.conn = pymysql.connect(**self.db_config)
            self.cur = self.conn.cursor(DictCursor)
        except pymysql.Error as e:
            print(f"数据库连接失败: {e}")
            raise

    def _fetch_movie_data(self, url):
        """获取豆瓣电影数据"""
        ua = UserAgent()
        headers = {'user-agent': ua.chrome}

        try:
            resp = requests.get(
                url=url,
                headers=headers,
                timeout=10
            )
            resp.raise_for_status()

            html = etree.HTML(resp.text)
            film_list = html.xpath("//li/div/div[2]/div[1]/a/span[1]/text()")
            score_list = html.xpath("//li/div/div[2]/div[2]/div/span[2]/text()")
            return list(zip(film_list, score_list))

        except requests.RequestException as e:
            print(f"请求失败: {e}")
            return []

    def _insert_into_database(self, data):
        """插入数据到数据库"""
        if not data:
            print("无有效数据可插入")
            return

        try:
            sql = "INSERT INTO film_info(film_name, score) VALUES (%s, %s)"
            self.cur.executemany(sql, data)
            self.conn.commit()
            print(f"成功插入 {len(data)} 条数据")
        except pymysql.Error as e:
            print(f"数据插入失败: {e}")
            self.conn.rollback()

    def _query_data(self):
        """查询数据库数据"""
        try:
            self.cur.execute("SELECT * FROM film_info")
            return self.cur.fetchall()
        except pymysql.Error as e:
            print(f"数据查询失败: {e}")
            return []

    def _close_connections(self):
        """关闭数据库连接"""
        if self.cur:
            self.cur.close()
        if self.conn:
            self.conn.close()

    def run(self):
        """执行主流程"""
        for i in range(0, 250, 25):
            url = f'https://movie.douban.com/top250?start={i}&filter='
            try:
                self._init_db_connection()
                movie_data = self._fetch_movie_data(url)
                time.sleep(random.randint(0,1))
                self._insert_into_database(movie_data)

            except:
                print('run运行出错')
        results = self._query_data()
        print(f"\n共查询到 {len(results)} 条记录:")
        for item in results:
            print(f"电影: {item['film_name']},评分: {item['score']}")
        self._close_connections()


if __name__ == '__main__':
    # 数据库配置
    db_config = {
        'host': 'localhost',
        'port': 3306,
        'user': 'root',
        'password': 'hl1234567',
        'database': 'challenge'
    }

    # 创建实例并运行
    scraper = DoubanMovieScraper(db_config)
    scraper.run()

 简单爬取一下电影排行_第1张图片

简单爬取一下电影排行_第2张图片

 上边就是部分结果展示,用到了Navicat可视化工具

你可能感兴趣的:(数据库,python,爬虫)