python爬虫实战之爬取成语大全

业余之余想学习中华文化之成语,身边没有成语词典,网上一个个翻网页又比较懒,花了半个小时搞定数字成语词典,你值得拥有!

爬取思路

  • 找到首页网址:https://www.chengyucidian.net/
  • 按照拼音字母种类抓取,注意有些字母没有成语;
  • 获取每个字母种类的每一页所有成语链接
  • 获取每一页链接下的内容

废话不多说,直接上代码给各位看客拿去上手撸!

import requests
from bs4 import BeautifulSoup
import re


headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.2 Safari/605.1.15'
}


def getChengyu(cate, page):
    f=open("chengyu_urls.csv","a",encoding="utf-8")
    res=requests.get("https://www.chengyucidian.net/letter/"+str(cate)+"/p/"+str(page), headers=headers, allow_redirects=False)
    res.encoding="utf-8"
    soup=BeautifulSoup(res.text)
    urls=soup.select('div[class="cate"]')
    print(urls)
    urls=re.findall('\d+',str(urls))
    for url in urls:
        f.write("https://www.chengyucidian.net/cy/"+str(url)+".html"+"\n")

def getPageNum(cate):
    res = requests.get("https://www.chengyucidian.net/letter/"+str(cate), headers=headers, allow_redirects=False)
    res.encoding="utf-8"
    soup=BeautifulSoup(res.text)
    pagenum=soup.select('div[class="page"]')
    pagenum=re.findall('\d+',str(pagenum))[-1]

    return pagenum

def getIntroduction(url):
    res = requests.get(url, headers=headers, allow_redirects=False)
    res.encoding = "utf-8"
    soup = BeautifulSoup(res.text)
    chengyu=soup.select('h1')
    chengyu=str(chengyu[0]).replace("

","") chengyu=chengyu.replace("

"
,"") print(chengyu) introText = soup.select('div[class="con"]') introText=str(introText[0]).replace("

","") introText = introText.replace("

"
, "") introText = introText.replace('

', "") introText = introText.replace('

', "") introText = introText.replace('

'
, "") introText = introText.replace('

', "") introText = introText.replace('
', "") introText = introText.replace('
'
, "") introText = introText.replace('', "") introText = introText.replace('', "") introText = introText.replace('

', "") print(introText) return chengyu, introText if __name__=="__main__": f1=open("chengyu_urls.csv","r",encoding="utf-8") f2=open("chengyu_introText.csv","a",encoding="utf-8") urls=f1.read().split("\n") print(len(urls)) for url in urls[10:]: chengyu,introText=getIntroduction(url) f2.write(str(chengyu)+"\n"+"["+str(introText)+"]"+"\n")

你可能感兴趣的:(Python爬虫,python,爬虫,后端)