# //get_goods_from_taobao

import requests

import re

import xlsxwriter

 

cok=''  # 此处写入登录之后自己的cookie

# 获取页面

def getHTMLText(url):

    headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}

    usercookies=cok

    cookies={}

    for a in usercookies.split(';'):

        name,value=a.strip().split('=',1)

        cookies[name]=value

    try:

        r=requests.get(url,cookies=cookies,headers=headers,timeout=60)

        r.raise_for_status()

        r.encoding=r.apparent_encoding

        return r.text

    except:

        return''

#  格式化页面,查找数据

def parsePage(ilt,html):

    try:

        print('爬取成功')

        plt=re.findall(r'\"view_price\"\:\"[\d\.]*\"',html)

        tlt=re.findall(r'\"raw_title\"\:\".*?\"',html)

        for i in range(len(plt)):

            price=eval(plt[i].split(':')[1])

            title=eval(tlt[i].split(':')[1])

            ilt.append([price,title])

    except:

        print('')

#  打印数据列表

def printGoodList(ilt):

    tplt='{:4}\t{:8}\t{:16}'

    print(tplt.format('序号','价格','名称'))

    count=0

    for c in ilt:

        count=count+1

        print(tplt.format(count,c[0],c[1]))

#  写入excel

def writetoexcel(list):

    print('开始创建excel表格')

    book = xlsxwriter.Workbook(u'淘宝数据.xlsx')

    sheet = book.add_worksheet()

    sheet.write(0, 0, '序号')

    sheet.write(0, 1, '名称')

    sheet.write(0, 2, '价格')

    row = 1

    col = 0

    for index, item in enumerate(list):

        print('写入第%s行数据'%row)

        sheet.write(row, col, index + 1)  # 写入序号值

        sheet.write(row, col + 1, item[1])  # 写入名称

        sheet.write(row, col + 2, item[0])  # 写入价格

        row += 1

    print('写入完成')

    book.close()  # 关闭

 

 

def main():

    goods=input('请输入想查询的内容:'.strip())  # 输入想搜索的商品名称

    function(){ //隔夜利息 http://www.kaifx.cn/question/kaifx/1752.html

    depth=3  # 爬取的页数

    start_url='http://s.taobao.com/search?q='+goods  # 搜索接口地址

    infoList=[]

    for i in range(depth):

        try:

            page=i+1

            print('正在爬取第%s页数据'%page)

            url=start_url+'&s='+str(44*i)

            html=getHTMLText(url)

            parsePage(infoList,html)

        except:

            continue

    printGoodList(infoList)

    writetoexcel(infoList)

 

main()