利用Python爬虫简单地爬取网页上的数据

电影评分top 250

​
import requests
import pymysql
from bs4 import BeautifulSoup
from lxml import etree
import re

url="https://movie.douban.com/top250"
header = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"}
pre={'User-agent':'Mozilla/5.0'}
# 连接database
conn = pymysql.connect(host="localhost", user="root",password="123456",database="demo",charset="utf8")

# 得到一个可以执行SQL语句的光标对象
cursor = conn.cursor()


flag = True
while flag:
    html = requests.get(url, headers=header).text
    list = etree.HTML(html)
    lis = list.xpath('//ol[@class="grid_view"]/li')
    try:
        for oneSelector in lis:
            name = oneSelector.xpath("div/div[2]/div[1]/a/span[1]/text()")[0]
            score = oneSelector.xpath("div/div[2]/div[2]/div/span[2]/text()")[0]
            #people = oneSelector.xpath("div/div[2]/div[2]/div/span[4]/text()")[0]
            people = re.findall("(.*?)人评价",people)[0]

            #introduce=oneSelector.xpath("div/div[2]/div[2]/p[2]/span/text()")[0]
            
            #拼接sql

            sql = "INSERT INTO movie(name, score,people) VALUES ('{}', '{}','{}')".format(name, score,people)

            print(sql)
            cursor.execute(sql)
            

        conn.commit()
    except  Exception as e:
        print("获取失败:{}".format(e))
        conn.rollback()
        cursor.close()
        conn.close()

    #获取下一页地址
    try:
        next_url = list.xpath('//span[@class="next"]/a/@href')[0]
        if next_url:
            url = "https://movie.douban.com/top250"+ next_url
    except:
        flag = False


​

 

你可能感兴趣的:(Python)