scrapy.png
本文主要内容针对Scrapy有初步了解的同学。结合作者的实际项目中遇到的一些问题,汇成本文。
之后会写一些具体的爬虫demo, 放到 https://github.com/hanguangchao/scrapy_awesome
鉴于作者接触爬虫不久,水平有限,文章难免出现纰漏,还请各位达人留言指导。
内容提要
Scrapy问题记录
Scrapy问题示例代码
Scrapy常用代码片段
Scrapy常用设置
Scrapy参考资料
pip install scrapy
# 终端执行以下命令,创建一个爬虫
scrapy startproject myspider
# 运行第一个爬虫
# myspider/myspider/spiders/myspider.py中定义爬虫 spider1
cd myspider
scrapy crawl spider1
可以利用scrapy shell 分析网页
通过sel.xpath() 返回一个Selector, 可以判断页面结构是否存在。
scrapy shell http://news.163.com/
sel.xpath('h1').extract()
针对以上问题,下面给出具体的代码示例
# myspider/items.py
import scrapy
class Item1(scrapy.Item):
url = scrapy.Field()
name = scrapy.Field()
class Item2(scrapy.Item):
url = scrapy.Field()
job_id = scrapy.Field()
job_title = scrapy.Field()
# myspider/spiders/myspider.py
class Spider1(CrawlSpider):
# 爬虫的名字
name = "spider1"
# 要抓取的网页限制
allowed_domains = ["mysite.com"]
# 指定开始抓取的url
start_urls = [
"http://mysite.com/",
"http://mysite2.com/",
]
# parse方法为抓取入口
# 抓取 start_urls 中的网址
def parse(self, response):
sel = Selector(response)
# 在一个页面抓取多个Item
# 在分析网页过程中,通过 yield item ,可以生成多个item
item = new Item1()
item['url'] = response.url
item['name'] = sel.xpath('').extract()
yield item
item2 = new Item2()
item2['url'] = response.url
item2['job_id'] = sel.xpath('//h2[1]/a/@href').extract()
item2['job_title'] = sel.xpath('//div[@class="box"]/a/text()').extract()
# 如何使爬虫进入下一级网页?
# 使用 yield Request()方法
yield Request(url, callback=self.parse1)
# 在爬虫中携带自定义数据
# 添加meta参数
yield Request(url,
meta={'referer' : url, 'job_id': item2['job_id']},
callback=self.parse2)
# 重复抓取一个页面的方法
# scrapy默认会过滤重复网页,发起Request添加dont_filter=True,则可以重复请求
# 使用的时候要注意, 不要进入死循环
if some_condition:
yield Request(url, callback=self.parse, dont_filter=True)
def parse1(self, response):
sel = Selector(response)
def parse2(self, response):
sel = Selector(response)
# 使用 response.meta 来访问
print response.meta['job_id']
# 在pipelines中的脚本示例
# myspider/pipelines.py
class MyPipeline(object):
def __init__(self):
print("MyPipeline init")
def open_spider(self, spider):
print("Pipeline opend")
def close_spider(self, spider):
print("MyPipeline closed")
def process_item(self, item, spider):
print 'process_item'
if isinstance(item, Item1):
# item1的存储逻辑
print item
if isinstance(item, Item2):
# item2的存储逻辑
print item
# myspider/settings.py
# 使用MyPipeline分析Item
ITEM_PIPELINES = {
'myspider.pipelines. MyPipeline': 300,
}
使用custom_settings
该设置是一个dict.当启动spider时,该设置将会覆盖项目级的设置. 由于设置必须在初始化(instantiation)前被更新,所以该属性 必须定义为class属性
# myspider/spiders/spider3.py
class Spider3(CrawlerSpider):
name = "spider3"
custom_settings = {
"DOWNLOAD_DELAY": 5.0,
"RETRY_ENABLED": False,
"LOG_LEVEL" : 'DEBUG',
"DOWNLOADER_MIDDLEWARES" : {
}
"ITEM_PIPELINES" : {
}
}
# settings.py
# 绕过robots策略
ROBOTSTXT_OBEY = False
# 禁用Cookie
COOKIES_ENABLED = False
# 限制爬取速度
DOWNLOAD_DELAY = 5
# 禁止重定向
REDIRECT_ENABLED = False
# 全局并发数
CONCURRENT_REQUESTS = 500
# 禁止重试
RETRY_ENABLED = False
# 减小下载超时
DOWNLOAD_TIMEOUT = 15
from proxy import PROXIES, FREE_PROXIES
class CustomHttpProxyMiddleware(object):
def process_request(self, request, spider):
if self.use_proxy(request):
p = random.choice(FREE_PROXIES)
try:
request.meta['proxy'] = "http://%s" % p['ip_port']
except Exception, e:
#log.msg("Exception %s" % e, _level=log.CRITICAL)
log.critical("Exception %s" % e)
from agents import AGENTS
class CustomUserAgentMiddleware(object):
def process_request(self, request, spider):
agent = random.choice(AGENTS)
request.headers['User-Agent'] = agent
from selenium import webdriver
from scrapy.http import HtmlResponse
import time
class CustomJavaScriptMiddleware(object):
def process_request(self, request, spider):
print "PhantomJS is starting..."
driver = webdriver.PhantomJS() #指定使用的浏览器
driver.get(request.url)
time.sleep(1)
js = "var q=document.documentElement.scrollTop=10000"
driver.execute_script(js) #可执行js,模仿用户操作。此处为将页面拉至最底端。
time.sleep(1)
body = driver.page_source
print ("访问"+request.url)
return HtmlResponse(driver.current_url, body=body, encoding='utf-8', request=request)
#pipelines.py
from scrapy.exceptions import DropItem
class DuplicatesPipeline(object):
def __init__(self):
self.ids_seen = set()
def process_item(self, item, spider):
if item['id'] in self.ids_seen:
raise DropItem("Duplicate item found: %s" % item)
else:
self.ids_seen.add(item['id'])
return item
#settings.py
ITEM_PIPELINES = {
'mySpider.pipelines. DuplicatesPipeline': 301,
}
from twisted.enterprise import adbapi
import datetime
import MySQLdb.cursors
class SQLStorePipeline(object):
def __init__(self):
self.dbpool = adbapi.ConnectionPool('MySQLdb',
host='127.0.0.1',
db='webspider',
user='mysql',
passwd='secret',
cursorclass=MySQLdb.cursors.DictCursor,
charset='utf8',
use_unicode=True
)
print("SQLStorePipeline init")
def close_spider(self, spider):
print("SQLStorePipeline closed")
def open_spider(self, spider):
print("SQLStorePipeline opend")
def process_item(self, item, spider):
if isinstance(item, myItem):
query = self.dbpool.runInteraction(self._conditional_insert, item)
query.addErrback(self._database_error, item)
return item
def _conditional_insert(self, tx, item):
try:
tx.execute(sql)
except Exception, e:
print e
def _database_error(self, e, item):
print "Database error: ", e
import json
import codecs
from collections import OrderedDict
class JsonWithEncodingPipeline(object):
def __init__(self):
self.file = codecs.open('data_utf8.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(OrderedDict(item), ensure_ascii=False, sort_keys=True) + "\n"
self.file.write(line)
return item
def close_spider(self, spider):
print("JsonWithEncodingPipeline closed")
self.file.close()
def open_spider(self, spider):
print("JsonWithEncodingPipeline opend")
# @todo 把Item保存到Redis
class RedisStorePipeline(object):
pass
# @todo 把Item保存到MongoDB
class MongodbStorePipeline(object):
pass