Pyspider的使用

from pyspider.libs.base_handler import *
import pymongo
class Handler(BaseHandler):
crawl_config = {
}
client = pymongo.MongoClient('localhost')
db = client['trip']
@every(minutes=24 * 60)
def on_start(self):
    self.crawl('https://www.tripadvisor.cn/Attractions-g34345-Activities-c47-Key_West_Florida_Keys_Florida.html', callback=self.index_page, validate_cert=False)

@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
    for each in response.doc('.listing_title > a').items():
        self.crawl(each.attr.href, callback=self.detail_page, validate_cert=False)
    next = response.doc('#FILTERED_LIST > div.al_border.deckTools.btm > div > div > a').attr.href
    self.crawl(next, callback=self.index_page, validate_cert=False)

@config(priority=2)
def detail_page(self, response):
    name = response.doc('.shelf_row_1 .name > a').text()
    address=response.doc('#taplc_attraction_detail_listing_0 > div.section.location').text()
    phone = response.doc('#taplc_attraction_detail_listing_0 > div.section.contact.last > div.ui_columns > div.ui_column.phone > div').text()
    
    return {
        "name":name,
        "url": response.url,
        "title": response.doc('title').text(),
        "address":address,
        "phone":phone
    }

def on_result(self, result):
    if result:
        self.save_to_mongo(result)
def save_to_mongo(self, result):
    if self.db['london'].insert(result):
        print('saved to mongo', result)

pyspider用pyquery来获取元素
在抓取时如果报下边错误

HTTP 599: SSL certificate problem: self signed certificate in certificate chain

只需要使用这个即可
self.crawl(url, callback=self.index_page, validate_cert=False)
该实例是抓取https://www.tripadvisor.cn的一些信息的实例,并且把抓取的信息存储到Mongodb数据库中

你可能感兴趣的:(Pyspider的使用)