scrapy-pipeline的方法

scrapy-pipeline的方法

a

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
# from pymongo import MongoClient

# client = MongoClient()
# collection = client['SpiderAnything']['hr']
import pymysql


class SpiderSuningBookPipeline(object):
    def process_item(self, item, spider):
        if spider.name == 'book_info':
            # collection.insert(dict(item))
            sql = """
                insert into book(title,author,download_text,new) values('%s','%s','%s','%s')"""\
                  %(
                item['title'],
                item['author'],
                item['download_text'],
                item['new']
    )
            print(sql)
            self.cursor.execute(sql)
        elif spider.name == 'dangdang':
            print(item)

        return item

    def open_spider(self, spider):
        # 连接数据库
        self.connect = pymysql.connect(
        host='127.0.0.1',
        port=3306,
        db='study',
        user='root',
        passwd='123456',
        charset='utf8',
        use_unicode=True)

        # 通过cursor执行增删查改
        self.cursor = self.connect.cursor()
        self.connect.autocommit(True)

    def close_spider(self, spider):
        self.cursor.close()
        self.connect.close()

 

posted on 2019-04-11 17:33 .Tang 阅读( ...) 评论( ...) 编辑 收藏

转载于:https://www.cnblogs.com/tangpg/p/10691114.html

你可能感兴趣的:(scrapy-pipeline的方法)