Python学习笔记(5):爬取一页商品数据

我的代码

from bs4 import BeautifulSoup
import requests
import time


#获取产品链接
def get_links_from(who_sells):
    links = []
    urls = ['http://bj.58.com/pbdn/{}/pn{}/'.format(who_sells,i) for i in range(1,10)]
    for signal_url in urls:
        wb_data = requests.get(signal_url)
        time.sleep(5)     #避免访问频率过高,被网站反爬取
        if wb_data.status_code == 200:
            soup = BeautifulSoup(wb_data.text, 'lxml')
            for link in soup.select('td.t a.t'):
                links.append(link.get('href'))
    return links

def get_detail(who_sells=0):
    urls = get_links_from(who_sells)
    for url in urls:
        wd_data = requests.get(url)
        soup = BeautifulSoup(wd_data.text, 'lxml')
        
        #在浏览器的代码检查中灵活利用搜索定位元素位置,缩减代码长度
        catalogs = soup.select('div > span > a')
        titles = soup.select('.info_titile')
        prices = soup.select('.price_now i')
        tags = soup.select('.biaoqian_li')
        ranges = soup.select('.palce_li span i')
        views = soup.select('.look_time')
        
        #因为本例中select返回列表中只有一个元素,可以直接在字典中赋值,如price[0]
        for catalog, title, price, tag, range,view in zip(catalogs, titles, prices, tags, ranges,views):
            data = {
                'catalog': catalog.get_text(),
                'title': title.get_text(),
                'price': price.get_text(),
                'tag': tag.get_text(),
                'range': range.get_text(),
                'view': view.get_text(),
            }
            print(data)

#根据传入参数抓取不同数据,0代表个人(默认),1代表商家
get_detail()

总结

  • select方法返回的是一个列表

你可能感兴趣的:(Python学习笔记(5):爬取一页商品数据)