爬取网址:http://gz.58.com/sale.shtml
爬取信息:标题,价格,区域,浏览量,想买数,宝贝描述内容,宝贝留言数
爬取方式:进入分类的详细页面,使用lxml解析。
存储方式:MongoDB存储 & MySQL存储
-
获取转转各个分类的URL。
-
进入分类页面,以二手手机为例。分类链接为http://gz.58.com//shouji/,点击多页后发现该类别的个人分类规律为:http://gz.58.com//shouji/0/pn{}/,其中{}放置数字即可实现翻页。
不过,置顶页还是会出现商家信息,后期爬取时看能否过滤掉。
-
详细页面爬取信息为:标题,浏览量,想买数,价格,区域,宝贝描述内容,宝贝留言数。
在计算机中新建一个文件夹,然后新建三个py文件:
- class_urls.py : 获取各商品类别的URL
- page_spider.py:获取详细页面URL,获取页面信息
- main.py:主程序
class_urls.py:这里仅使用简单的二级目录作为演示。
import requests
from lxml import etree
start_url = "http://cs.58.com/sale.shtml"
base_url = "http://cs.58.com"
r = requests.get(start_url)
html = etree.HTML(r.text)
infos = html.xpath('//li[@class="ym-tab"]')
for info in infos:
urls = info.xpath('span/a/@href') ##简单二级目录
#urls = info.xpath('ul/li/span/a/@href') ##复杂二级目录
for url in urls:
class_url = base_url + url
print(class_url)
#打印出URL后,人工剔除一些不符合页面规则的URL,然后将URL存储为字符串数据。
##简单的二级类目,共11个。
class_urls1 = '''
http://gz.58.com/shouji/
http://gz.58.com/danche/
http://gz.58.com//diandongche/
http://gz.58.com/diannao/
http://gz.58.com/shuma/
http://gz.58.com/jiadian/
http://gz.58.com/ershoujiaju/
http://gz.58.com/yingyou/
http://gz.58.com/fushi/
http://gz.58.com/meirong/
http://gz.58.com/wenti/
'''
page_spider.py:采用单进程时,用url_list进行返回。
采用多进程时,省略掉url_list,同时需要修改main.py的运行方式。
import requests
from bs4 import BeautifulSoup
import time
import pymongo
client = pymongo.MongoClient('localhost',27017)
mydb = client['mydb']
zhuanzhuan_url = mydb['zhuanzhuan_url']
zhuanzhuan_info = mydb['zhuanzhuan_info']
headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3294.6 Safari/537.36'}
def get_page(url,page):
try:
detail_url = '{}0/pn{}/'.format(url,page)
r = requests.get(detail_url,headers=headers)
soup = BeautifulSoup(r.text,"lxml")
infos = soup.select("tr")
#url_list = [] #单进程时使用
for info in infos:
detail_url = info.select("a.t")[0].get("href")
if detail_url.startswith("http://zhuanzhuan.58.com/detail/"):
#url_list.append(detail_url) #单进程时使用
zhuanzhuan_url.insert_one({'详细页面链接':detail_url})
else:
pass
#return url_list #单进程时使用
except requests.exceptions.ConnectionError:
pass
def get_info(url):
try:
r = requests.get(url,headers=headers)
soup = BeautifulSoup(r.text,"lxml")
title = soup.select("h1.info_titile")[0].text
price = soup.select("span.price_now i")[0].text
area = soup.select("div.palce_li span i")[0].text
view = soup.select("span.look_time")[0].text
want = soup.select("span.want_person")[0].text
content = soup.select("div.baby_kuang p")[0].text
left_num = soup.select("h3.box_title_h3 i")[0].text
info = {'标题':title,
'价格':price,
'区域':area,
'浏览量':view,
'想买数':want,
'宝贝描述内容':content,
'宝贝留言数':left_num,
'链接':url
}
zhuanzhuan_info.insert_one(info)
time.sleep(2)
except InexError:
pass
main.py(单进程版):
from class_urls import class_urls1
from page_spider import get_page,get_info
url_list = class_urls1.split()
for url in url_list:
for page in range(1,101):
detail_url_list = get_page(url,page)
for detail_url in detail_url_list:
get_info(detail_url)
main.py(多进程版):
- ①先获取详细页面链接:
from multiprocessing import Pool
from class_urls import class_urls1
from page_spider import get_page,get_info
def get_links_from(url):
for page in range(1,101):
get_page(url,page)
if __name__ == '__main__':
url_list = class_urls1.split() #将字符串转变成列表
pool = Pool(processes=4) #创建进程池
pool.map(get_links_from, url_list) #调用进程池
- ②获取转转信息(支持断点续传):
#多进程
from multiprocessing import Pool
from class_urls import class_urls1
from page_spider import get_page,get_info
from page_spider import zhuanzhuan_url,zhuanzhuan_info
def get_links_from(url):
for page in range(1,101):
get_page(url,page)
zz_urls = [item['详细页面链接'] for item in zhuanzhuan_url.find()] #获取数据库中的详细页面链接
zz_urls_2 = [item['链接'] for item in zhuanzhuan_info.find()] #数据表zhuanzhuan_info不存在时返回0
rest_urls = set(zz_urls) - set(zz_urls_2) #使用集合来过滤重复链接,支持断点续传
if __name__ == '__main__':
url_list = class_urls1.split() #将字符串转变成列表
## pool = Pool(processes=4) #创建进程池
## pool.map(get_links_from, url_list) #调用进程池
pool = Pool(processes=4) #创建进程池
pool.map(get_info,rest_urls) #调用进程池
由于网络或其他原因,抓取信息也不是一帆风顺的,不妨再考虑加多一点反爬措施。获取详细页面链接倒是比较顺利,所以运行一次即可注释掉。