Scrapy的使用

Scrapy框架使用步骤

创建scrapy工程文件:scrapy startproject Myfirst_spiders

创建爬虫文件:

创建完成后我们就可以在创建的爬虫文件里边编写爬虫代码了。
怎么运行scrapy?:1、创建main.py文件

在main.py中输入如下代码,然后直接运行main文件即可:

import os
import sys
from scrapy.cmdline import execute
dir_file = (os.path.dirname(os.path.abspath(__file__)))#定位绝对位置,相当于myfirst_spider
sys.path.append(dir_file)  #把路径加到控制台
execute(['scrapy','crawl','jobbole','--nolog']) #前两个参数固定写法,'jobbole这个参数位置是自己创建的爬虫的名字,--nolog不显示日志

1、编写解析网页内容的源代码写在爬虫文件的parser下,解析方法自选

css、xpath、re

2、在items.py下定义需要的数据结构,例如:

3、在自己创建的爬虫项目里边的parser里边编写代码时使用,
用到了css选择器

css选择器:选取标签的属性值方法,例如选择a标签下的属性为href的值:response.css(‘a::attr(href)’).extract()

在scrapy shell里边运行会快一些,所以在提取东西的时候可以去scrapy试,然后再复制回编译器

Scrapy想解析多页内容:

方法1:

方法2:

传参,除了url之外,还想传递图片的链接以便下载:
Scrapy存图片:

在site-packages—>scrapy—>pipelines—>images.py


由于在pipelines里边定义的函数是这样的,所以传图片地址的时候必须用列表才行

在settings里边做下边的配置
实现代码
spiders里边的代码jobbole.py
# -*- coding: utf-8 -*-
#jobbole.py
import scrapy
from Myfirst_Spider.items import JobboleItem
from scrapy.http import Request


class JobboleSpider(scrapy.Spider):
    name = 'jobbole'  #爬虫的名字
    # allowed_domains = ['blog.jobbole.com']   #允许爬取的域名范围
    start_urls = ['http://blog.jobbole.com/all-posts/'] #开始的网址

#获得列表页进行解析,并交给scrapy进行下载
    def parse(self, response):
        #获取一页所有文章和图片的链接(20篇)
        post_urls = response.css('#archive .floated-thumb .post-thumb a')
        for post_url in post_urls:
            # 文章的链接
            url = post_url.css('::attr(href)').extract_first('')
            # 图片的链接
            img = post_url.css('img::attr(src)').extract_first('')
            # 获取网址并交给scrapy下载,meta传图片,callback把下载的源码传递给detail_parse方法
            yield Request(url=url,meta={'post_img':img},callback=self.detail_parse)
            # 获取下一页链接
        next_url = response.css('.next.page-numbers::attr(href)').extract_first('')
        # 如果连接存在,callback把网页源码传递给parser(调用自己)
        if next_url:
            yield Request(url=next_url,callback=self.parse)
    def detail_parse(self,response):
        #实例化item类
        item = JobboleItem()
        #获取文章标题
        title = response.css('.entry-header h1::text').extract()[0]
        #获取文章日期
        creat_date = response.css('.entry-meta-hide-on-mobile::text').extract()[0].replace('·','').strip()
        #获得点赞数
        like = response.css('.btn-bluet-bigger.href-style.vote-post-up.register-user-only h10::text').extract()
        #图片链接
        img = response.meta.get('post_img')
        item['title'] = title  #将title保存到item中
        item['creat_date'] = creat_date  #将creat_date保存到item中
        item['like'] = like  #将like保存到item中
        item['img'] = [img]   #图片要传成列表  #将[img]保存到item中
        yield item  #将保存的item进行下载
items.py
# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy

class MyfirstSpiderItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    pass

class JobboleItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    title = scrapy.Field()
    creat_date = scrapy.Field()
    like = scrapy.Field()
    img = scrapy.Field()

settings.py
# -*- coding: utf-8 -*-

# Scrapy settings for Myfirst_Spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://doc.scrapy.org/en/latest/topics/settings.html
#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://doc.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'Myfirst_Spider'

SPIDER_MODULES = ['Myfirst_Spider.spiders']
NEWSPIDER_MODULE = 'Myfirst_Spider.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Myfirst_Spider (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'Myfirst_Spider.middlewares.MyfirstSpiderSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'Myfirst_Spider.middlewares.MyfirstSpiderDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#设置输出管道,管道号越小越先执行
ITEM_PIPELINES = {
   # 'Myfirst_Spider.pipelines.MyfirstSpiderPipeline': 300,
    #用scrapy自带的下载图片方法
    'scrapy.pipelines.images.ImagesPipeline': 200,  #调用pipelines的images方法
   # 'Myfirst_Spider.pipelines.jobbolePipeline': 250,
}

import os
#获取下载图片链接
IMAGES_URLS_FIELD = 'img'  #图片的url,在items里器的名字是img,所以用img
#定位settings位置
file_path = os.path.dirname(os.path.abspath(__file__))#获得地址当前地址的上一级,即和spiders同级
# IMAGES_STORE = (file_path+'/imges') #保存图片的路径,下边一种方法不用+号
IMAGES_STORE = os.path.join(file_path,'imges') #保存图片的路径




# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
main.py
import os
import sys
from scrapy.cmdline import execute
# print(os.path.abspath(__file__) ) #文件的绝对位置
dir_file = (os.path.dirname(os.path.abspath(__file__)))#定位绝对位置,相当于myfirst_spider
sys.path.append(dir_file)  #把路径加到控制台
execute(['scrapy','crawl','jobbole','--nolog']) #前两个参数固定写法,最后一个参数是自己创建的爬虫的名字
保存json文本步骤:

1、 在pipelines里边定义一个类

2、 在settings里边做配置

爬虫文件里边:

Items文件里边:
这里爬取的是糗事百科的文字部分,注意,要在settings里边加上user-agent,不然可能解析不到东西
spiders下的爬虫文件demo.py
# -*- coding: utf-8 -*-
import scrapy
from Jockbaike.items import BaikeItem


class DemoSpider(scrapy.Spider):
    name = 'demo'
    # allowed_domains = ['baidu.com']
    start_urls = ['https://www.qiushibaike.com/text/']

    def parse(self, response):
        item = BaikeItem()
        item['author'] = response.css('.author.clearfix h2::text').extract()
        item['content'] = response.css('.content span::text').extract()
        item['vote'] = response.css('.stats-vote .number::text').extract()
        item['comment']  = response.css('.stats-comments .number::text').extract()
        print('作者:{}\n内容:{}\n{}人觉得好笑\n{}人评论'.format(item['author'],item['content'],item['vote'],item['comment']))
        yield item
items.py
# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy


class JockbaikeItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    pass

class BaikeItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    author = scrapy.Field()
    content = scrapy.Field()
    vote = scrapy.Field()
    comment = scrapy.Field()
pipelines.py
# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import codecs

class JockbaikePipeline(object):
    def process_item(self, item, spider):
        return item

class qiubaiPipeline(object):
    def __init__(self):
        self.file = codecs.open('artile.json','w','utf-8')
    def process_item(self, item, spider):
        lines = json.dumps(dict(item),ensure_ascii=False)+'\n'
        self.file.write(lines)
        return item
    def close_spider(self,spider):
        self.file.close()
settings.py,激活ITEM_PIPELINES即可,其他的不变
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'User-Agent: Mozilla/5.0 (+http://www.yourdomain.com)'

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'Jockbaike.pipelines.JockbaikePipeline': 300,
    'Jockbaike.pipelines.qiubaiPipeline':200,  #调用在pipelines里边定义的类
}
main.py
import os
import sys
from scrapy.cmdline import execute
dif_file = (os.path.dirname(os.path.abspath(__file__)))
sys.path.append(dif_file)
execute(['scrapy','crawl','demo','--nolog'])

这样就可以实现简单的保存爬下来的内容,存为了json文件。

你可能感兴趣的:(Scrapy的使用)