爬虫小案例--古诗文网(史记全册)--xpath

案例爬取网址为史记全文_古诗文网

import time
import requests
# from multiprocessing.dummy import Pool
from lxml import etree


def search(url):
    index_page = requests.get(url=url, headers=headers).text
    tree = etree.HTML(index_page)
    detail_page = tree.xpath('//*[@class="contson"]/p/text()')
    detail_page = "".join(detail_page)
    detail_name = tree.xpath('//*[@class="cont"]/h1/span/b/text()')
    detail_name = "".join(detail_name)
    detail_all = detail_name + '\n' + detail_page
    fileName = './' + detail_name
    print(url,detail_name)
    with open(fileName, 'w', encoding='utf-8') as fp:
        fp.write(detail_all)



if __name__ == '__main__':

    start_time = time.time()
    url= 'https://so.gushiwen.cn/guwen/book_46653FD803893E4F9B29D6AEC1BFD4EA.aspx'
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36'
    }
    page_1 = requests.get(url=url,headers=headers).text
    tree = etree.HTML(page_1)
    urls = tree.xpath('//*[@id="html"]/body/div[2]/div[1]/div[3]/div/div[2]/span/a/@href')
    # print(urls)
    # pool = Pool(20)               # 开启20个线程
    # pool.map(search,urls)         # 多线程时间大约在 1s左右
    for url in urls:            # 单线程时间大约在 12-15s 或者更长
       search(url)
    end_time = time.time()
    print("花费时长为" + end_time-start_time)

 

你可能感兴趣的:(python爬虫,爬虫,html,前端)