爬信息到excel里

代码:

#-*- coding = utf-8 -*-

from bs4 import BeautifulSoup
import urllib.request,urllib.error
import re
import xlwt

# 主函数
def main():
    baseurl = 'https://movie.douban.com/top250?start='
    dataList = getData(baseurl)
    dbpath = "豆瓣电影250.xls"
    saveData(dbpath,dataList)

findLink = re.compile(r'')         #创建正则的字符串的模式,影片详情link规则
findImgSrc = re.compile(r'',re.S)       # 忽略换行,影片图片
findTitle = re.compile(r'(.*?)')     # 影片名
findRating = re.compile(r'(.*?)')   # 影片评分
findJudge = re.compile(r'(\d*)人评价')     # 影片评价人数
findInq = re.compile(r'(.*?)')     # 影片概况
findBd = re.compile(r'

(.*?)

'
,re.S) #影片的导演信息 # 得到一个指定url的网页内容 def askUrl(url): head = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36"} request = urllib.request.Request(url,headers = head) html = "" try: response = urllib.request.urlopen(request) html = response.read().decode("utf-8") # print(html) except urllib.error.URLError as e: print("报错") if hasattr(e,"code"): print(e.code) if hasattr(e,'reason'): print(e.reason) return html # 得到数据 def getData(baseurl): dataList = [] for i in range(0,10): #调用获取信息页面 url = baseurl + str(i*25) html = askUrl(url) # print(html) # 解析 soup = BeautifulSoup(html,"html.parser") for item in soup.find_all("div",class_="item"): #获取指定的列表 # print(item) data = [] #保存信息 item = str(item) # 正则提取 link = re.findall(findLink,item)[0] #re来找,影片详情link data.append(link) imgSrc = re.findall(findImgSrc,item)[0] data.append(imgSrc) titles = re.findall(findTitle,item) #可能只有一个中文名字,不用【0】 if (len(titles) == 2): ctitle = titles[0] data.append(ctitle) #添加中国名 otitle = titles[1].replace('/','') #替换’/‘ data.append(otitle) #添加外国名 else: data.append(titles[0]) #添加中国名 data.append(' ') #外国名留空 rating = re.findall(findRating,item)[0] data.append(rating) judgeNum = re.findall(findJudge,item)[0] data.append(judgeNum) inq = re.findall(findInq,item) #可能没有 if (len(inq) != 0): inq = inq[0].replace('。','') data.append(inq) else: data.append(' ') bd = re.findall(findBd,item)[0] bd = re.sub("(\s+)?",' ',bd) data.append(bd.strip()) #去掉前后的空格 # print(link) dataList.append(data) return dataList # 保存数据 def saveData(dbpath,datalist): book = xlwt.Workbook(encoding="utf-8",style_compression=0) # 创建Workbook对象 sheet = book.add_sheet('豆瓣电影250',cell_overwrite_ok=True) # 创建单元格 # sheet.write(0, 0, 'hello') # 行,列, col = ("电影详情连接","图片连接","影片中文名","影片外文名","评分","评价人数","概况","相关信息") for i in range(0,8): sheet.write(0,i,col[i]) for i in range(0,250): # print("第%d条"%i) data = datalist[i] for j in range(0,8): sheet.write(i+1,j,data[j]) book.save(dbpath) if __name__ == "__main__": main()

你可能感兴趣的:(python,chrome,数据挖掘,爬虫)