scrapy通过连接池连接mysql工具(python3)
背景:自己写的简单爬取电影种子的爬虫,数据存储到Mysql
版本:python3
IDE:pycharm
环境:windows10
项目:scrapy爬虫
注:如使用,请自行修改,谢谢
工具文件如下:
""" 数据库连接工具类 # """ import pymysql import traceback from DBUtils.PooledDB import PooledDB from scrapy.utils.project import get_project_settings class MysqlUtil(object): # 获取setting文件中的配置 settings = get_project_settings() config = { 'host': settings.get('MYSQL_HOST'), 'port': settings.get('MYSQL_PORT'), 'database': settings.get('MYSQL_DATABASE'), 'user': settings.get('MYSQL_USER'), 'password': settings.get('MYSQL_PASSWORD'), 'charset': settings.get('MYSQL_CHARSET') } """ MYSQL数据库对象,负责产生数据库连接 , 此类中的连接采用连接池实现获取连接对象:conn = Mysql.getConn() 释放连接对象;conn.close()或del conn """ # 连接池对象 __pool = None def __init__(self): # 数据库构造函数,从连接池中取出连接,并生成操作游标 self._conn = MysqlUtil.get_conn() self._cursor = self._conn.cursor() # 获取链接 @staticmethod def get_conn(): """ @summary: 静态方法,从连接池中取出连接 @return MySQLdb.connection """ if MysqlUtil.__pool is None: __pool = PooledDB(creator=pymysql, mincached=1, maxcached=20, host=MysqlUtil.config['host'], port=MysqlUtil.config['port'], user=MysqlUtil.config['user'], passwd=MysqlUtil.config['password'], db=MysqlUtil.config['database'], charset=MysqlUtil.config['charset']) return __pool.connection() # 查询所有数据 def get_all(self, sql, param=None): """ @summary: 执行查询,并取出所有结果集 @param sql:查询SQL,如果有查询条件,请只指定条件列表,并将条件值使用参数[param]传递进来 @param param: 可选参数,条件列表值(元组/列表) @return: result list(字典对象)/boolean 查询到的结果集 """ try: if param is None: count = self._cursor.execute(sql) else: count = self._cursor.execute(sql, param) if count > 0: result = self._cursor.fetchall() else: result = False return result except Exception as e: traceback.print_exc(e) # 查询某一个数据 def get_one(self, sql, param=None): """ @summary: 执行查询,并取出第一条 @param sql:查询SQL,如果有查询条件,请只指定条件列表,并将条件值使用参数[param]传递进来 @param param: 可选参数,条件列表值(元组/列表) @return: result list/boolean 查询到的结果集 """ try: if param is None: count = self._cursor.execute(sql) else: count = self._cursor.execute(sql, param) if count > 0: result = self._cursor.fetchone() else: result = False return result except Exception as e: traceback.print_exc(e) # 查询数量 def get_count(self, sql, param=None): """ @summary: 执行查询,返回结果数 @param sql:查询SQL,如果有查询条件,请只指定条件列表,并将条件值使用参数[param]传递进来 @param param: 可选参数,条件列表值(元组/列表) @return: result list/boolean 查询到的结果集 """ try: if param is None: count = self._cursor.execute(sql) else: count = self._cursor.execute(sql, param) return count except Exception as e: traceback.print_exc(e) # 查询部分 def get_many(self, sql, num, param=None): """ @summary: 执行查询,并取出num条结果 @param sql:查询SQL,如果有查询条件,请只指定条件列表,并将条件值使用参数[param]传递进来 @param num:取得的结果条数 @param param: 可选参数,条件列表值(元组/列表) @return: result list/boolean 查询到的结果集 """ try: if param is None: count = self._cursor.execute(sql) else: count = self._cursor.execute(sql, param) if count > 0: result = self._cursor.fetchmany(num) else: result = False return result except Exception as e: traceback.print_exc(e) # 插入一条数据 def insert_one(self, sql, value): """ @summary: 向数据表插入一条记录 @param sql:要插入的SQL格式 @param value:要插入的记录数据tuple/list @return: insertId 受影响的行数 """ try: row_count = self._cursor.execute(sql, value) return row_count except Exception as e: traceback.print_exc(e) self.end("rollback") # 插入多条数据 def insert_many(self, sql, values): """ @summary: 向数据表插入多条记录 @param sql:要插入的SQL格式 @param values:要插入的记录数据tuple(tuple)/list[list] @return: count 受影响的行数 """ try: row_count = self._cursor.executemany(sql, values) return row_count except Exception as e: traceback.print_exc(e) self.end("rollback") # def __get_insert_id(self): # """ # 获取当前连接最后一次插入操作生成的id,如果没有则为0 # """ # self._cursor.execute("SELECT @@IDENTITY AS id") # result = self._cursor.fetchall() # return result[0]['id'] # 执行sql def __query(self, sql, param=None): try: if param is None: count = self._cursor.execute(sql) else: count = self._cursor.execute(sql, param) return count except Exception as e: traceback.print_exc(e) # 更新 def update(self, sql, param=None): """ @summary: 更新数据表记录 @param sql: SQL格式及条件,使用(%s,%s) @param param: 要更新的 值 tuple/list @return: count 受影响的行数 """ return self.__query(sql, param) # 删除 def delete(self, sql, param=None): """ @summary: 删除数据表记录 @param sql: SQL格式及条件,使用(%s,%s) @param param: 要删除的条件 值 tuple/list @return: count 受影响的行数 """ return self.__query(sql, param) def begin(self): """ @summary: 开启事务 """ self._conn.autocommit(0) def end(self, option='commit'): """ @summary: 结束事务 """ if option == 'commit': self._conn.commit() else: self._conn.rollback() def dispose(self, is_end=1): """ @summary: 释放连接池资源 """ if is_end == 1: self.end('commit') else: self.end('rollback') self._cursor.close() self._conn.close()
Mysql与logger配置如下:(在setting.py文件中,其余自行配置)
import datetime
# start MySQL database configure setting MYSQL_HOST = "127.0.0.1" MYSQL_PORT = 3306 MYSQL_DATABASE = "crawler_db" MYSQL_USER = "admin" MYSQL_PASSWORD = "admin" MYSQL_CHARSET = "utf8" # end of MySQL database configure setting # start logger configure setting current_day = datetime.datetime.now() LOG_ENABLED = True # 启用日志,默认不启用 LOG_ENCODING = 'utf-8' LOG_FILE = "C:/xxx_spider.{}-{}-{}.log".format(current_day.year, current_day.month, current_day.day) LOG_LEVEL = "INFO" LOG_STDOUT = True # 输出重定向至log日志,比如print # end logger configure setting
调用方式如下:
# -*- coding: utf-8 -*- from torrentSpider.utils.db_util import MysqlUtil import traceback import logging class MySqlPipeline(object): pool = None def __init__(self): pass # 开启爬虫时链接数据库 def open_spider(self, spider): self.pool = MysqlUtil() # 处理 def process_item(self, item, spider): try: # 执行sql语句 # sql = "select * from torrent_ye" # count = self.pool.get_all(sql, None) # print('查询数量为:' + str(count)) # 先去数据库查询,查到了就不入库了 sql_select = """select count(1) from torrent_ye where torrent_url = %(torrent_url)s""" params_select = {'torrent_url': item['torrent_url']} flag = self.pool.get_count(sql_select, params_select) if flag > 0: logging.info('记录已经存在:[%s][%s]', item['torrent_title'], item['torrent_url']) return sql_insert = """insert into torrent_ye(torrent_title, torrent_name, torrent_director, torrent_actor, torrent_language, torrent_type, torrent_region, torrent_update_time, torrent_status, torrent_show_time, torrent_introduction, torrent_url) values (%(torrent_title)s,%(torrent_name)s,%(torrent_director)s,%(torrent_actor)s,%(torrent_language)s, %(torrent_type)s,%(torrent_region)s,%(torrent_update_time)s,%(torrent_status)s,%(torrent_show_time)s,%(torrent_introduction)s,%(torrent_url)s)""" params = {'torrent_title': item['torrent_title'], 'torrent_name': item['torrent_name'], 'torrent_director': item['torrent_director'], 'torrent_actor': item['torrent_actor'], 'torrent_language': item['torrent_language'], 'torrent_type': item['torrent_type'], 'torrent_region': item['torrent_region'], 'torrent_update_time': item['torrent_update_time'], 'torrent_status': item['torrent_status'], 'torrent_show_time': item['torrent_show_time'], 'torrent_introduction': item['torrent_introduction'], 'torrent_url': item['torrent_url']} self.pool.insert_one(sql_insert, params) self.pool.end("commit") except Exception as e: logging.error('发生异常:[%s]', e) traceback.print_exc(e) self.pool.end("rollback") # 结束 def close_spider(self, spider): pass
posted @
2019-01-15 22:21 故意养只喵叫顺儿 阅读(
...) 评论(
...) 编辑 收藏