利用selenium爬取淘宝

爬取淘宝美食

操作流程:1.搜索关键字,利用selenium驱动浏览器搜索关键字,得到查询后的商品列表
2.得到商品页码数,模拟翻页,得到后续页面的商品列表
3.分析提取商品内容,利用pyquery分析源码,解析得到商品列表
4.存储至mongodb

from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import re
from pyquery import PyQuery as pq
from config import *
import pymongo
client=pymongo.MongoClient(MONGO_URL)
db=client[MONGO_DB]

chrome_diver="E:\Googedownload\chromedriver_win32\chromedriver.exe"
browser=webdriver.Chrome(executable_path=chrome_diver)
wait=WebDriverWait(browser,10)

def search():#获取首页数据
    try:
        browser.get('https://www.taobao.com')
        input=wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR,'#q'))
        )
        submit=wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'#J_TSearchForm > div.search-button > button')))
        input.send_keys('美食')
        #time.sleep(10)
        submit.click()
        total=wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'#mainsrp-pager > div > div > div > div.total')))
        get_products()
        return total.text
    except TimeoutException:
        return search()
def next_page(page_number):#翻页
    try:
        input = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.form > input'))
        )
        submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.form > span.btn.J_Submit')))
        input.clear()
        input.send_keys(page_number)
        submit.click()
        wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR,'#mainsrp-pager > div > div > div > ul > li.item.active > span'),str(page_number)))
        get_products()
    except TimeoutException:
        next_page(page_number)
def get_products():#获取商品信息
    wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'#mainsrp-itemlist .items .item')))
    html=browser.page_source
    doc=pq(html)
    items=doc('#mainsrp-itemlist .items .item').items()#items可以返回所有的内容
    for item in items:
        product={
            'image':item.find('.pic .img').attr('src'),
            'price':item.find('.price').text(),
            'deal':item.find('.deal-cnt').text()[:-3],
            'title':item.find('.title').text(),
            'shop':item.find('.shop').text(),
            'location':item.find('.location').text()
        }
        print(product)
        save_to_mongo(product)
def save_to_mongo(result):#参数就是字典
    try:
        if db[MONGO_TABLE].insert(result):
            print('存储成功',result)
    except Exception:
        print('失败',result)
def main():
    try:
        total=search()
        total=int(re.compile('(\d+)').search(total).group(1))
        for i in range(2,total+1):
            next_page(i)
    except Exception:
        print('出错了')
    finally:
        browser.close()
if __name__ == '__main__':
    main()

你可能感兴趣的:(python爬取网站)