python 根据需求灵活爬取唯品会商品动态数据

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from pyquery import PyQuery as pq
import time
import random

# window上必须要有
browser = webdriver.Chrome(r"C:\Users\dell\AppData\Local\Google\Chrome\Application\chromedriver.exe")
# 将窗口设置为最大窗口
browser.maximize_window()
# 将要爬取数据的关键字 KEYWORD 可以为任何关键字
KEYWORD = "手机"


class VIP(object):
    def search(self):
        """
        获取商品页面信息
        :param page: 当前页码数
        :return:
        """
        url = "https://www.vip.com/"
        browser.get(url)
        wait = WebDriverWait(browser, 5)
        # 等待搜索框出现
        input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, ".c-search-input")))
        # 等待点击按钮出现
        # 另一种方法是模拟键盘enter
        submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, ".J-search-button")))
        time.sleep(random.randint(1, 4))
        # 清空搜索框原始内容
        input.clear()
        # 将关键字填写进去
        input.send_keys(KEYWORD)
        time.sleep(random.randint(1, 4))
        # 模拟认为点击事件
        submit.click()
        time.sleep(random.randint(1, 4))
        # 跳转页面下拉到底
        self.scroll()
        # browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
        # 这种下拉有一个缺点就是frame遮盖时候不能下拉到最底部,出现点击 < 时候出现超时异常
        # 还有一种解决方法是将窗口最大化,可以避免遮盖问题发生
        time.sleep(random.randint(2, 4))

    def scroll(self):
        """
        针对下拉进行操作
        :return:
        """
        browser.execute_script(""" 
            (function () { 
                var y = document.body.scrollTop; 
                var step = 100; 
                window.scroll(0, y); 
                function f() { 
                    if (y < document.body.scrollHeight) { 
                        y += step; 
                        window.scroll(0, y); 
                        setTimeout(f, 50); 
                    }
                    else { 
                        window.scroll(0, y); 
                        document.title += "scroll-done"; 
                    } 
                } 
                setTimeout(f, 1000); 
            })(); 
            """)

    def getData(self):
        """
        获取页面信息
        :return:
        """
        html = browser.page_source
        doc = pq(html)
        items = doc(".goods-list-item").items()
        for index, item in enumerate(items):
            product = {
                "URL:": "http:{}".format(str(item(".goods-image a").attr("href"))),
                "DISCOUNT_PRICE:": item(".inner-exclusive").text(),
                "VIP_PRICE:": item(".goods-vipshop-wrap").text(),
                "DISCOUNT:": item(".goods-discount-wrap").text().replace("\n", " "),
                "TITLE:": item(".goods-title-info").text()
            }
            self.write(product)
            print(index, product)
            time.sleep(random.randint(1, 4))

    def write(self, content):
        """
        结果本地化保存
        :param content:
        :return:
        """
        with open(r"vip_iPhone.csv", 'a+', encoding="utf-8") as file:
            file.write(str(content) + "\n")

    def nextPage(self):
        """
        点击 < 跳转下一页
        :return:
        """
        wait = WebDriverWait(browser, 5)
        # 点击下一页按钮
        sumbit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, ".cat-paging-next")))
        time.sleep(4)
        sumbit.click()
        # 当前URL是页面跳转后的URL
        current_url = browser.current_url
        print(current_url)

    def execute(self):
        # 先执行搜索操作
        self.search()
        # 跳转页面
        self.getData()
        # 获取数据
        # 19 是基于商品的总页数来定的
        for i in range(1, 19):
            # 在跳转下一页
            self.nextPage()
            # 获取数据
            self.getData()


if __name__ == "__main__":
    vip = VIP()
    vip.execute()

 

你可能感兴趣的:(python)