第二周实战作业:爬取10万商品数据

实战计划0430-石头的练习作业

作业要求

实现效果

实现代码

__author__ = 'daijielei'

'''
    获取赶集网的二手物品分类信息,提取出各个分类的链接,并保存在一个数组中以备提取用
'''
from bs4 import BeautifulSoup
import requests

hosturl = 'http://bj.ganji.com'

def getGanjiAlltype(url='http://bj.ganji.com/wu/#all_category'):
    webdata = requests.get(url)
    soup = BeautifulSoup(webdata.text,'lxml')

    for item in soup.select('div.content div.clearfix dl.fenlei a'):
        print(hosturl + str(item.get('href')))
        #print('---------------------------------------------')


#getGanjiAlltype()

links = '''
    http://bj.ganji.com/jiaju/
    http://bj.ganji.com/rirongbaihuo/
    http://bj.ganji.com/chuangdian/
    http://bj.ganji.com/guizi/
    http://bj.ganji.com/zhuoyi/
    http://bj.ganji.com/shafachaji/
    http://bj.ganji.com/zixingchemaimai/
    http://bj.ganji.com/diandongche/
    http://bj.ganji.com/motuoche/
    http://bj.ganji.com/shouji/
    http://bj.ganji.com/shoujihaoma/
    http://bj.ganji.com/iphone/
    http://bj.ganji.com/nokia/
    http://bj.ganji.com/htc/
    http://bj.ganji.com/sanxingshouji/
    http://bj.ganji.com/motorola/
    http://bj.ganji.com/shouji/_%E5%B0%8F%E7%B1%B3/
    http://bj.ganji.com/shouji/_%E9%AD%85%E6%97%8F/
    http://bj.ganji.com/tongxuntaocan/
    http://bj.ganji.com/qqhao/
    http://bj.ganji.com/bangong/
    http://bj.ganji.com/nongyongpin/
    http://bj.ganji.com/bangongjiaju/
    http://bj.ganji.com/jiguangyitiji/
    http://bj.ganji.com/dayinji/z1/
    http://bj.ganji.com/shipinjiagongshebei/
    http://bj.ganji.com/shengchanjiamengshebei/
    http://bj.ganji.com/jichuang/
    http://bj.ganji.com/tuolaji/
    http://bj.ganji.com/jiadian/
    http://bj.ganji.com/dianshi/
    http://bj.ganji.com/bingxiang/
    http://bj.ganji.com/kongtiao/
    http://bj.ganji.com/reshuiqi/
    http://bj.ganji.com/xiyiji/
    http://bj.ganji.com/diancilu/
    http://bj.ganji.com/weibolu/
    http://bj.ganji.com/yueqiyinxiang/
    http://bj.ganji.com/ershoubijibendiannao/
    http://bj.ganji.com/pingbandiannao/z1/
    http://bj.ganji.com/ershoubijibendiannao/z1/_%E8%8B%B9%E6%9E%9C/
    http://bj.ganji.com/ershoubijibendiannao/z1/_%E8%81%94%E6%83%B3/
    http://bj.ganji.com/ershoubijibendiannao/z1/_Thinkpad/
    http://bj.ganji.com/ershoubijibendiannao/z1/_%E7%B4%A2%E5%B0%BC/
    http://bj.ganji.com/ershoubijibendiannao/z1/_%E6%88%B4%E5%B0%94/
    http://bj.ganji.com/ershoubijibendiannao/z1/_%E5%8D%8E%E7%A1%95/
    http://bj.ganji.com/ershoubijibendiannao/z1/_%E6%83%A0%E6%99%AE/
    http://bj.ganji.com/ruanjiantushu/
    http://bj.ganji.com/yueqi/
    http://bj.ganji.com/yinxiang/
    http://bj.ganji.com/yundongqicai/
    http://bj.ganji.com/yingyouyunfu/
    http://bj.ganji.com/tongche/
    http://bj.ganji.com/tongzhuang/
    http://bj.ganji.com/yingerche/
    http://bj.ganji.com/yingerchuang/z1/
    http://bj.ganji.com/niaobushi/
    http://bj.ganji.com/wanju/
    http://bj.ganji.com/naifen/
    http://bj.ganji.com/diannao/
    http://bj.ganji.com/taishidiannaozhengji/
    http://bj.ganji.com/xianka/
    http://bj.ganji.com/cpu/
    http://bj.ganji.com/yingpan/
    http://bj.ganji.com/luyouqi/
    http://bj.ganji.com/3gwangka/
    http://bj.ganji.com/xianzhilipin/
    http://bj.ganji.com/shoucangpin/
    http://bj.ganji.com/qitalipinzhuanrang/
    http://bj.ganji.com/baojianpin/
    http://bj.ganji.com/xiaofeika/
    http://bj.ganji.com/fushixiaobaxuemao/
    http://bj.ganji.com/meironghuazhuang/
    http://bj.ganji.com/fushi/
    http://bj.ganji.com/xiangbao/
    http://bj.ganji.com/xuemao/
    http://bj.ganji.com/shoubiao/
    http://bj.ganji.com/shipin/
    http://bj.ganji.com/huazhuangpin/
    http://bj.ganji.com/hufupin/
    http://bj.ganji.com/shuma/
    http://bj.ganji.com/shumaxiangji/
    http://bj.ganji.com/shumashexiangji/
    http://bj.ganji.com/youxiji/
    http://bj.ganji.com/suishenting/
    http://bj.ganji.com/yidongcunchu/
    http://bj.ganji.com/laonianyongpin/
    http://bj.ganji.com/zibubaojian/z2/
    http://bj.ganji.com/anmobaojian/z1/
    http://bj.ganji.com/bawanwujian/
    http://bj.ganji.com/xuniwupin/
    http://bj.ganji.com/qitawupin/
    http://bj.ganji.com/ershoufree/
    http://bj.ganji.com/wupinjiaohuan/
    http://bj.ganji.com/zhuanqu_anjia/all/
    http://bj.ganji.com/zhuanqu_jiaren/all/
    http://bj.ganji.com/zhuanqu_shenghuo/all/
'''
__author__ = 'daijielei'

'''
    爬虫,抓取赶集网下的相关信息

'''
from bs4 import BeautifulSoup
import requests
import pymongo
import time
import random
import IP_Proxy
import datetime


__Test__ = ''   # 'test'   #'该开关用来做测试用的控制',打开时,不会写入数据库和做数据库的判断
__Prosey__ = '' # 'use'  #该开关用来测试控制是否使用IP代理
'''
-----------------------------------------------------------------------------------------------------
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
以下为数据库相关代码
更多的mongoDB可以查看如下:http://www.runoob.com/mongodb/mongodb-indexing.html
                        https://blog.codecentric.de/files/2012/12/MongoDB-CheatSheet-v1_0.pdf
1、mongoDB可以通过建立索引来提高查询效率
2、可以使用条件操作符来进行> >= < <= !=的查询
3、可以使用$type来查询指定同一个名称下指定类型的数据
4、可以使用limit来限制查询的结果的数量,使用skip来跳过指定数目的结果
5、可以进行排序、索引、聚合等



'''
client = pymongo.MongoClient('localhost',27017)
ganjiDB = client['ganjiDB']


'''
    ---------            gangjiGetlistChecksheet该表用来保存已经爬过的列表,防止重新爬               --------
    ---------            gangjiGetInfoChecksheet该表用来保存已经爬过的商品信息,防止重新爬               --------
'''
gangjiGetlistChecksheet = ganjiDB['gangjiGetlistChecksheet']#保存爬过的list,如果已经爬过就不再爬了
gangjiGetInfoChecksheet = ganjiDB['gangjiGetInfoChecksheet']#保存爬过的信息,如果已经爬过就不再爬了
#将爬过的列表的URL进行保存
def insertgangjiGetlistChecksheet(data):
    if(__Test__ == 'test'):return
    gangjiGetlistChecksheet.insert_one(data)
#将爬过的商品的URL进行保存
def insertgangjiGetInfoChecksheet(data):
    if(__Test__ == 'test'):return
    gangjiGetInfoChecksheet.insert_one(data)
#判断该列表URL是否已经爬过
def ifexistgangjiGetlistChecksheet(url):
    if(__Test__ == 'test'):return False
    ifexist = False
    if(gangjiGetlistChecksheet.find({'url':url}).count()>0):
        ifexist = True
    return ifexist
#判断该商品URL是否已经爬过
def ifexistgangjiGetInfoChecksheet(url):
    if(__Test__ == 'test'):return False
    ifexist = False
    if(gangjiGetInfoChecksheet.find({'url':url}).count()>0):
        ifexist = True
    return ifexist
'''
    ---------            ganjiUrlSheet该表用来保存已经爬过的商品URL,以用判断用               --------
'''
ganjiUrlSheet = ganjiDB['ganjiUrlSheet']#保存爬下来的赶集商品url内容
#对商品的url表进行插入
def insertganjiUrlSheet(data={}):
    if(__Test__ == 'test'):return
    ganjiUrlSheet.insert_one(data)
#对商品的url表进行显示
def showganjiUrlSheet():
    for item in ganjiUrlSheet.find():
        print(item)
#对商品的url表进行删除
def deletAllganjiUrlSheet():
    ganjiUrlSheet.delete_many({})
    print(ganjiUrlSheet.count())
#对商品的url表是否存在进行判断
def ifexistganjiUrlSheet(url):
    if(__Test__ == 'test'):return False
    ifexist = False
    if ganjiUrlSheet.find({'url':url}).count() > 0:
        ifexist = True
    return ifexist



'''
    ---------            ganjiInfoSheet该表用来保存已经爬过的商品信息,以做判断用               --------
'''
ganjiInfoSheet = ganjiDB['ganjiInfoSheet']
#对商品信息插入对应的表
def insertganjiInfoSheet(data={}):
    if(__Test__ == 'test'):return
    ganjiInfoSheet.insert_one(data)
#显示全部商品信息
def showganjiInfoSheet():
    for item in ganjiInfoSheet.find():
        print(item)


'''
    ---------            对多个表进行操作               --------
'''
#显示当前表中已有的数据
def showAllDBcount():
    print('赶集url表中以存数据:'+str(ganjiUrlSheet.count()))
    print('赶集商品表中以存数据:'+str(ganjiInfoSheet.count()))

#showAllDBcount()

#清除全部表中的全部数据
def clearAllganjisheet():
    ganjiUrlSheet.delete_many({})
    gangjiGetlistChecksheet.delete_many({})
    gangjiGetInfoChecksheet.delete_many({})
    ganjiInfoSheet.delete_many({})
    print(ganjiUrlSheet.count())
    print(gangjiGetlistChecksheet.count())
    print(gangjiGetInfoChecksheet.count())
    print(ganjiInfoSheet.count())

'''
-----------------------------------------------------------------------------------------------------
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
以下为爬虫部分的代码

gangji_getlist  用来抓取列表里的商品URL等信息

gangji_getInfo  用来抓取商品的相关信息

'''

headers = {
    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.110 Safari/537.36',
    'Connection':'keep-alive',
}
#代理服务器可以从中选择:http://cn-proxy.com/
#获取随机的代理服务器,以缓解被服务器禁用的风险
ip_proxy = ['1','2']#IP_Proxy.getProxy() 不用Proxy
if __Prosey__ == 'use':
    ip_proxy = IP_Proxy.getProxy()
def getRandomProxy():
    proxies = {
        'http':random.choice(ip_proxy),
    }
    return  proxies

def delProxy(proxy):
    proxy = proxy['http']
    index = list(ip_proxy).index(proxy)
    del ip_proxy[index]
    print('清除无效的ip_proxy,剩余数量:'+str(len(ip_proxy)))


#connect()用来连接URL获取页面的html数据
def connect(url):
    try:
        proxies = getRandomProxy()
        if __Prosey__ == 'use':
            webdata = requests.get(url,headers=headers,proxies=proxies) #使用proxies,来缓解被阻拦的风险
        else:
            webdata = requests.get(url)
        return webdata
    except (requests.exceptions.ConnectionError,TimeoutError):
        delProxy(proxies)
        print('ConnectionError')
    return None

#connectTry()因为该页面链接经常失效,故通过一个重连机制来不断重试
def connectTry(url):
    count = 0
    webdata = None
    while not webdata:  #如果没有获取到数据,继续重试
        time.sleep(1)
        webdata = connect(url)
        count = count + 1
        print('第'+str(count)+'次重试')
        if(count > 100):    #超过最大重试次数,直接退出
            return None
    return webdata



'''                              gangji_getlist  用来抓取列表里的商品URL等信息                            '''
def gangji_getlist(url='http://bj.ganji.com/motuoche/',page=0):
    typeName = url.strip('/').split('/')[-1]
    finalUrl = url + 'o' + str(page)

    if(ifexistgangjiGetlistChecksheet(finalUrl)):#判断是否爬过
        print('已经爬过该列表')
        return

    webdata = connectTry(finalUrl)
    #webdata = requests.get(finalUrl)
    soup = BeautifulSoup(webdata.text,'lxml')


    for item in soup.select('dl.list-bigpic.clearfix dt a'):#将列表中的数据内容按数据块方式拆解,以方便解析
        if('x.htm' in item.get('href')):#只解析赶集标准的商品url,不标准的如转转等不进行解析
            data = {
                'url':item.get('href'),
                'type':typeName,
                'catchTime':"-",

            }
            print(data)
            gangji_getInfo(data['url'])   #函数本身会判断是否已经爬过,这边就没必要进行判断了
            #if(not ifexistganjiUrlSheet(item.get('href'))):#如果该项已经存在了,则直接跳过该项目
            #    gangji_getInfo(data['url'])
            #    insertganjiUrlSheet(data)
            #else:
            #    print('已经存在')
        print('---------------------------------------------')

    insertgangjiGetlistChecksheet({'url':finalUrl})#将爬过的url插入表中,以方便下次重新开始爬起时跳过已爬过内容




'''                              gangji_getInfo  用来抓取商品的相关信息                            '''
def gangji_getInfo(url = 'http://bj.ganji.com/motuoche/1537729832x.htm'):
    if(ifexistgangjiGetInfoChecksheet(url)):#如果该项已经存在了,则直接跳过该项目
        print('已经爬过该列表')
        return

    webdata = connectTry(url)
    #webdata = requests.get(url)
    soup = BeautifulSoup(webdata.text,'lxml')

    data = {#要保存下来的数据内容和相关结构
        'title':soup.select('h1.title-name')[0].get_text() if soup.select('h1.title-name') else "",
        'type':[a.get_text() for a in soup.select('.crumbs.routes.clearfix a')] if soup.select('.crumbs.routes.clearfix a') else "",
        'price':None,
        'place':None,
        'phone':None,
        'date':'无日期',
        'saleday':None,
        'chense':'无数据',
        'catchDate':time.strftime("%m-%d", time.localtime()),
    }
    if soup.select('i.pr-5'):
        if(len(soup.select('i.pr-5')[0].get_text()) > 4):
            data['date'] = str(soup.select('i.pr-5')[0].get_text()).strip().split()[0]


    for item in soup.select('div.second-sum-cont ul.second-det-infor.clearfix li'):#获取成色
        label = item.label.get_text() if item.label else ""#获取label,以方便区分该块是什么
        if('新旧程度:' in label):
            #print(item.get_text())
            data['chense'] = item.get_text().strip().replace('新旧程度:\n','').replace(' ','')

    for item in soup.select('ul.det-infor li'):#获取某个块的细分项目,在根据项目的标签提取信息
        label = item.label.get_text() if item.label else ""#获取label,以方便区分该块是什么
        if('价' in label):
            data['price'] = item.select('i.f22')[0].get_text() if item.select('i.f22') else ''
        #对交易地点的数据做相关处理
        if('交易地点' in label):
            if(item.select('a')):
                data['place'] = [a.get_text() if a else  "" for a in item.select('a')]

    print(data)
    insertganjiInfoSheet(data)
    insertgangjiGetInfoChecksheet({'url':url})








if __name__ == '__main__':
    print('main')
    #clearAllganjisheet()
    #showAllDBcount()
    gangji_getlist('http://bj.ganji.com/iphone/')
    #gangji_getInfo('http://bj.ganji.com/diannao/1991466771x.htm')
    #gangji_getInfo()


你可能感兴趣的:(第二周实战作业:爬取10万商品数据)