scrapy 定时执行的两种方法

window下是  dir  命令,  linux是  ls

提到定时任务调度的时候,相信很多人会想到芹菜celery,要么就写个脚本塞到crontab中。不过,一个小的定时脚本,要用celery的话太“重”了。所以,我找到了一个轻量级的定时任务调度的库:schedule。库的安装还是最简单的pip install schedule,使用起来也是很容易理解的。

from   scrapy    import   cmdline

import   schedule, time, os,  subprocess,  logging,  datetime

from   subprocess   import   run

from  multiprocessing   import   Process

schedule.every(10).minutes.do(job) 

schedule.every().hour.do(job) 

schedule.every().day.at("10:30").do(job) 

schedule.every(5).to(10).days.do(job) 

schedule.every().monday.do(job) 

schedule.every().wednesday.at("13:15").do(job) 

# -*- coding: utf-8 -*-

import   subprocess,  schedule,  time,  datetime,  logging

from   multiprocessing   import   Process

from  scrapy  import   cmdline

def   crawl_work():

        print("I'm working...")

       ## cmd = "scrapy crawl NanPing"

       ## subprocess.Popen(cmd)

      ## run(cmd, shell=True)

     ## pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout

     ## print(pipe.read())

    print('-'*100)

    args = ["scrapy", "crawl", 'it']

    while True:

        start = time.time()

        p = Process(target=cmdline.execute, args=(args,))

        p.start()

        p.join()

        logging.debug("### use time: %s" % (time.time() - start))

if __name__=='__main__':

    print('*'*10+'开始执行定时爬虫'+'*'*10)

    schedule.every(1).minutes.do(crawl_work)

    print('当前时间为{}'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))

    print('*' * 10 + '定时爬虫开始运行' + '*' * 10)

    while True:

        schedule.run_pending()

        time.sleep(10)

第二种定时:

from multiprocessing import Process

from scrapy import cmdline

import time,logging

# 配置参数即可, 爬虫名称,运行频率

confs = [

    {

        "spider_name": "it",

        "frequency": 2,

    },

]

def start_spider(spider_name, frequency):

    args = ["scrapy", "crawl", spider_name]

    while True:

        start = time.time()

        p = Process(target=cmdline.execute, args=(args,))

        p.start()

        p.join()

        logging.debug("### use time: %s" % (time.time() - start))

        time.sleep(frequency)

if __name__ == '__main__':

    for conf in confs:

        process = Process(target=start_spider,args=(conf["spider_name"], conf["frequency"]))

        process.start()

        time.sleep(86400)

你可能感兴趣的:(scrapy 定时执行的两种方法)