selenium通用淘宝爬虫,保存至execl

import requests,re
from  selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pyquery import PyQuery as pq

import xlwt


# # 无页面显示# 定义一个无界面的浏览器
# from selenium import webdriver
# from selenium.webdriver.chrome.options import Options
# service_args=['--load-images=false','--disk--cache=true']
# chrome_options = Options()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--disable-gpu')
# browser = webdriver.Chrome(chrome_options=chrome_options)
browser=webdriver.Chrome()

#虽然无界面但是必须要定义窗口
browser.set_window_size(1400,900)
# #有页面显示
# browser=webdriver.Chrome()


# 10s无响应就down掉
wait=WebDriverWait(browser, 10)
'''
wait.until()语句是selenum里面的显示等待,wait是一个WebDriverWait对象,它设置了等待时间,如果页面在等待时间内
没有在 DOM中找到元素,将继续等待,超出设定时间后则抛出找不到元素的异常,也可以说程序每隔xx秒看一眼,如果条件
成立了,则执行下一步,否则继续等待,直到超过设置的最长时间,然后抛出TimeoutException
1.presence_of_element_located 元素加载出,传入定位元组,如(By.ID, 'p')
2.element_to_be_clickable 元素可点击
3.text_to_be_present_in_element 某个元素文本包含某文字
'''


# excel 准备工作
excel_book = xlwt.Workbook(encoding='utf-8')
sheet = excel_book.add_sheet('test',cell_overwrite_ok=True)
sheet.write(0,0,'image')
sheet.write(0,1,'price')
sheet.write(0,2,'deal')
sheet.write(0,3,'title')
# sheet.write(0,4,'accuracy')
sheet.write(0,4,'shop')
sheet.write(0,5,'location')



def search():
    print('正在搜索')
    try:
        browser.get('https://www.taobao.com/')
        # 选择到淘宝首页的输入框
        input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "#q")) )
        # 选择到搜索的那个按钮的输入框
        submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'#J_TSearchForm > div.search-button > button')))

        # send_key作为写到input的内容
        input.send_keys('演出服装租赁')
        # 执行点击搜索的操作
        submit.click()
        # submit2 = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#J_relative > div.sort-row > div > ul > li:nth-child(3) > a')))
        # submit2.click()
        # 查看到当前的页码一共是多少页
        tatal = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "#mainsrp-pager > div > div > div > div.total")))
        # 获取所有的商品的标签
        get_product()
        # 返回总页数
        return tatal.text
    except TimeoutError:
        return search()

def next_page(page_num):
    print('正在翻页',page_num)
    try:
        # 选择到跳转页的输入框
        input = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "#mainsrp-pager > div > div > div > div.form > input"))
        )
        # 选择到跳转页的确定框
        submit = wait.until(EC.element_to_be_clickable(
            (By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.form > span.btn.J_Submit')))
        # 清除里面的数字
        input.clear()
        # 重新输入数字
        input.send_keys(page_num)
        # 选择并点击
        submit.click()
        # 判断当前页是不是我们要现实的页
        wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR,'#mainsrp-pager > div > div > div > ul > li.item.active > span'),str(page_num)))
        # 调用函数获取商品信息
        get_product(page_num)
    # 捕捉超时,重新进入翻页的函数
    except TimeoutError:
        next_page(page_num)

def get_product(page_num=1):

    row = 1
    # 每一个商品标签,这里是加载出来以后才会拿网页源代码
    wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'#mainsrp-itemlist .items .item')))
    # 这里拿到的是整个网页源代码
    html=browser.page_source
    # pq解析网页源代码
    doc=pq(html)
    items=doc('#mainsrp-itemlist .items .item').items()
    for item in items:
        # products = {
        #     'image': item.find('.pic .img').attr('src'),
        #     'price': item.find('.price').text(),
        #     'deal': item.find('.deal-cnt').text()[:-3],
        #     'title': item.find('.title').text(),
        #     'shop': item.find('.shop').text(),
        #     'location': item.find('.location').text()
        # }
        sheet.write(row, 0, item.find('.pic .img').attr('src'))
        sheet.write(row, 1, item.find('.price').text())
        sheet.write(row, 2,item.find('.deal-cnt').text()[:-3])
        sheet.write(row, 3, item.find('.title').text())
        # sheet.write(row,4,right_count/(wrong_count+right_count))
        sheet.write(row,4, item.find('.shop').text())
        sheet.write(row, 5,item.find('.location').text())
        excel_book.save('test'+str(page_num)+'.xls')
        row += 1


def main():

    try:
        # 第一步搜索
        total = search()
        # int类型刚才找到的总页数标签,作为跳出循环的条件
        total = int(re.compile('(\d+)').search(total).group(1))
        # 只要后面还有就继续爬,继续翻页
        for i in range(2, total + 1):
            next_page(i)
    except Exception:
        print('出错')


    # finally:
    #     # 关闭浏览器
    #     browser.close()


if __name__ == '__main__':
    main()


你可能感兴趣的:(selenium通用淘宝爬虫,保存至execl)