python操作JSON数据
Xpath知识点讲解
爬虫相关知识啊点
Requests库的使用
优雅使用字符串
HTML、URL、DOM树
OOP思想优化代码
可变对象vs不可变对象
图书标题、价格、店铺信息、购买地址=>比价
import requests
from lxml import html
def spider(sn, book_list=[]):
""" 爬取当当网的数据 """
url = 'http://search.dangdang.com/?key={sn}&act=input'.format(sn=sn)
# 获取html内容
html_data = requests.get(url).text
# xpath对象
selector = html.fromstring(html_data)
# 找到书本列表
ul_list = selector.xpath('//div[@id="search_nature_rg"]/ul/li')
print(len(ul_list))
for li in ul_list:
# 标题
title = li.xpath('a/@title')
print(title[0])
# 购买链接
link = li.xpath('a/@href')
print(link[0])
# 价格
price = li.xpath('p[@class="price"]/span[@class="search_now_price"]/text()')
print(price[0].replace('¥', ''))
# 商家
store = li.xpath('p[@class="search_shangjia"]/a/text()')
store = '当当自营' if len(store) == 0 else store[0]
print(store)
print('-----------------------')
book_list.append({
'title': title[0],
'price': price[0].replace('¥', ''),
'link': link[0],
'store': store[0]
})
if __name__ == '__main__':
sn = '9787115428028'
spider(sn)
import requests
from lxml import html
def spider(sn, book_list=[]):
""" 爬取京东的图书数据 """
url = 'https://search.jd.com/Search?keyword={0}'.format(sn)
# 获取HTML文档
resp = requests.get(url)
print(resp.encoding)
resp.encoding = 'utf-8'
html_doc = resp.text
# 获取xpath对象
selector = html.fromstring(html_doc)
# 找到列表的集合
ul_list = selector.xpath('//div[@id="J_goodsList"]/ul/li')
print(len(ul_list))
# 解析对应的内容,标题,价格,链接
for li in ul_list:
# 标题
title = li.xpath('div/div[@class="p-name"]/a/@title')
print(title[0])
# 购买链接
link = li.xpath('div/div[@class="p-name"]/a/@href')
print(link[0])
# 价格
price = li.xpath('div/div[@class="p-price"]/strong/i/text()')
print(price[0])
# 店铺
store = li.xpath('div//a[@class="curr-shop"]/@title')
print(store[0])
book_list.append({
'title': title[0],
'price': price[0],
'link': link[0],
'store': store[0]
})
if __name__ == '__main__':
spider('9787115428028')
import requests
from lxml import html
def spider(sn, book_list=[]):
""" 爬取1号店的图书数据 """
url = 'https://search.yhd.com/c0-0/k{0}/'.format(sn)
# 获取到html源码
html_doc = requests.get(url).text
# xpath对象
selector = html.fromstring(html_doc)
# 书籍列表
ul_list = selector.xpath('//div[@id="itemSearchList"]/div')
print(len(ul_list))
# 解析数据
for li in ul_list:
# 标题
title = li.xpath('div/p[@class="proName clearfix"]/a/@title')
print(title[0])
# 价格
price = li.xpath('div//p[@class="proPrice"]/em/@yhdprice')
print(price[0])
# 购买链接
link = li.xpath('div/p[@class="proName clearfix"]/a/@href')
print(link[0])
# 店铺
store = li.xpath('div/p[@class="storeName limit_width"]/a/@title')
print(store)
book_list.append({
'title': title[0],
'price': price[0],
'link': link[0],
'store': store[0]
})
if __name__ == '__main__':
spider('9787115428028')
import requests
def spider(sn, book_list=[]):
""" 爬取淘宝网的图数数据 """
url = 'https://s.taobao.com/api?ajax=true&m=customized&sourceId=tb.index&q={0}'.format(sn)
rest = requests.get(url).json()
print(rest)
bk_list = rest["API.CustomizedApi"]["itemlist"]["auctions"]
print(len(bk_list))
for bk in bk_list:
# 标题
title = bk['raw_title']
price = bk['view_price']
link = bk['detail_url']
store = bk['nick']
print('{title}: {price}: {link}: {store}'.format(
title=title,
price=price,
link=link,
store=store
))
book_list.append({
'title': title,
'price': price,
'link': link,
'store': store
})
if __name__ == '__main__':
spider('9787115428028')
from chapter01.spider_dangdang import spider as dangdang
from chapter01.spider_jd import spider as jd
from chapter01.spider_yhd import spider as yhd
from chapter01.spider_taobao import spider as taobao
def main(sn):
""" 图书比价工具整合 """
book_list = []
# 当当网的数据
print('当当网数据爬取完成')
dangdang(sn, book_list)
# 京东网数据
print('京东网数据爬取完成')
jd(sn, book_list)
# 1号店数据
print('1号店数据爬取完成')
yhd(sn, book_list)
# 淘宝数据
print('淘宝网数据爬取完成')
taobao(sn, book_list)
# 打印所有数据列表
for book in book_list:
print(book)
print('----------------开始排序-----------')
# 排序书的数据
book_list = sorted(book_list, key=lambda item: float(item["price"]), reverse=True)
for book in book_list:
print(book)
if __name__ == '__main__':
sn = input('请输入ISBN:')
main(sn)