spiders.py
# -*- coding: utf-8 -*-
import scrapy
# 引入数据模型类
from ..items import ImgItem
class SucaiSpider(scrapy.Spider):
name = 'sucai'
allowed_domains = ['sc.chinaz.com']
start_urls = ['http://sc.chinaz.com//']
def parse(self, response):
# 200 OK 请求成功
# 302 重定向
# 404 url地址不存在
# 403 没有权限访问,服务器拒绝连接
# 5XX 服务器的错误
# 找到图标的链接
tb_href = response.xpath('//div[@class="nav"]/ul/li[@class="nos"]/a[3]/@href').extract_first('')
# 拼接完整的url
tb_url = 'http://sc.chinaz.com'+tb_href
# 发送一个请求
# 1.url请求的地址 2.callback 回调函数 默认调用parse()
yield scrapy.Request(
url=tb_url,
callback=self.parse_list
)
# 解析所有图标列表
def parse_list(self,response):
# 根据xpath找到所有图标详情页的地址
detail_links = response.xpath('//ul[@class="pngblock imgload"]/li/p/a/@href').extract()
# for循环遍历列表,取出每一个图片的详细地址,发起请求
for link in detail_links:
yield scrapy.Request(
url=link,
# 请求成功了,调用parse_detail解析详情
callback=self.parse_detail,
# 用于请求数据时,携带一些额外的参数
meta={'hello':'world'}
)
# 找到下一页的链接
next_href = response.xpath('//a[@class="nextpage"]/@href').extract_first('')
# 判断是否有下一页
if next_href:
# 拼接下一页的地址
next_url = 'http://sc.chinaz.com/tubiao/'+next_href
yield scrapy.Request(
url=next_url,
callback=self.parse_list
)
# 解析详情页中的每一张图片地址
def parse_detail(self,response):
# 解析图片分类名称
categray = response.xpath('//h2/a/text()').extract_first('')
# 解析图片地址
imgs_src = response.xpath('//div[@class="png_pic"]/img/@src').extract()
# for循环遍历所有图片的详细地址
for src in imgs_src:
# 创建数据模型对象
img = ImgItem()
# src图片下载地址必须放在列表中
img['src'] = [src]
# 图片分类
img['categray'] = categray
yield img
items.py
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SucaiSpiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
# 新建图片的数据模型
class ImgItem(scrapy.Item):
# 图标的下载地址
src = scrapy.Field()
# 图标的分类
categray = scrapy.Field()
pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
# 引入scrapy自带的图片下载类
from scrapy.pipelines.images import ImagesPipeline
# 引入scrapy
import scrapy
class SucaiSpiderPipeline(object):
def process_item(self, item, spider):
return item
# 1.下载图片 2.指定图片存放目录
# 继承于scrapy自带的图片下载类
class MyImagePipeline(ImagesPipeline):
# 获取图片下载请求的函数
def get_media_requests(self, item, info):
# 1.取出图片下载的url地址
url = item['src'][0]
# 2.根据url地址创建请求对象
request = scrapy.Request(
url=url,
# 利用meta将item传递到file_path函数中
meta={"item":item}
)
# 最终返回一个下载图片的请求对象的列表
return [request]
# 重写file_path指定图片存放路径
def file_path(self, request, response=None, info=None):
# 根据key取出meta中的item,这个item中包含的就是下载图片的数据
item = request.meta['item']
# 取出分类
categray = item['categray']
# 取出图片下载地址
src = item['src'][0]
# 分割获取图片名称
name = src.split('/')[-1]
# 最终只需要将图片的存放路径和图片的名称返回即可
return '%s/%s'%(categray, name)
settings.py
# -*- coding: utf-8 -*-
# Scrapy settings for SuCai_Spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'SuCai_Spider'
SPIDER_MODULES = ['SuCai_Spider.spiders']
NEWSPIDER_MODULE = 'SuCai_Spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'SuCai_Spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'SuCai_Spider.middlewares.SucaiSpiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'SuCai_Spider.middlewares.SucaiSpiderDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'SuCai_Spider.pipelines.MyImagePipeline': 300,
# 'scrapy.pipelines.images.ImagesPipeline': 200
}
# 指定图片的下载地址是item的哪个属性
IMAGES_URLS_FIELD = 'src'
# 指定图片存放的路径
IMAGES_STORE = 'imgs'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
debug.py
# coding: utf-8
# 引入cmdline中的execute函数
from scrapy.cmdline import execute
# 执行启动爬虫的命令
execute(['scrapy', 'crawl', 'sucai'])
middleware.py
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class SucaiSpiderSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SucaiSpiderDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
scrapy startproject name
scrapy genspider name url