一.基本命令
创建项目 scrapy startproject 项目名字
创建爬虫文件 scrapy genspider 文件名 域名
运行爬虫项目 scrapy crawl name属性
查看可运行的爬虫 scrapy list
二.小试牛刀(scrapy.Spider)
全书网
- 创建项目
scrapy startproject qsw_spider
2.创建爬虫文件
cd qsw_spider
scrapy genspider qsw_test www.quanshuwang.com
3.设置字段,,,,,items.py
import scrapy
class QswSpiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
author = scrapy.Field()
context = scrapy.Field()
link_url = scrapy.Field()
4.编写爬虫文件,,,qsw_test.py
# -*- coding: utf-8 -*-
import scrapy
from ..items import QswSpiderItem
class QswTestSpider(scrapy.Spider):
name = 'qsw_test'
# allowed_domains = ['www.quanshuwang.com']
start_urls = ['http://www.quanshuwang.com/list/1_1.html']
def parse(self, response):
'''
解析网页
:param response:
:return:
'''
link_url_list = response.xpath(r'//ul[@class="seeWell cf"]/li/a/@href').extract()
title_list = response.xpath(r'//ul[@class="seeWell cf"]/li/span/a[@target="_blank"]/text()').extract()
author_list = response.xpath(r'//ul[@class="seeWell cf"]/li/span/a[2]/text()').extract()
context_list = response.xpath(r'//ul[@class="seeWell cf"]/li/span/em/text()').extract()
for title,author,context,link_url in zip(title_list,author_list,context_list,link_url_list):
items = QswSpiderItem()
items['title']= title
items['author']= author
items['context']= context
items['link_url']= link_url
yield items
next_url = response.xpath('//div[@class="pages"]/div[@class="pagelink"]/a[@class="next"]/@href').extract()
if next_url:
yield scrapy.Request(next_url[0], callback=self.parse)
5.设置管道,,,pipelines.py
class QswSpidertestPipeline(object):
def __init__(self):
self.fa = open('result.json','w+',encoding='utf-8')
def process_item(self, item, spider):
self.fa.write(str(item)+'\r\n')
return item
def close_spider(self,spider):
self.fa.close()
- 激活 管道,,,,settings.py
ITEM_PIPELINES = {
'qsw_spider.pipelines.QswSpidertestPipeline': 300,
}
- 运行爬虫
scrapy crawl qsw_test
三.另一个爬虫(CrawlSpider)
,需要设置UA,打开settings.py
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3314.0 Safari/537.36 SE 2.X MetaSr 1.0',
}
-
创建文件
cd/d D:\Python_scrapy_test\spider_qsw\qsw_spider>
#创建CrawlSpider爬虫文件
scrapy genspider -t crawl js jianshu.com
2.设置需要的字段 items.py
class JianshuItem(scrapy.Item):
title = scrapy.Field()
url = scrapy.Field()
author = scrapy.Field()
pub_time = scrapy.Field()
read_count = scrapy.Field()
word_count = scrapy.Field()
-
编写爬虫文件 js.py
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class JsSpider(CrawlSpider):
name = 'js'
# allowed_domains = ['jianshu.com']
start_urls = ['https://www.jianshu.com/']
rules = (
Rule(LinkExtractor(allow=r'https://www.jianshu.com/p/.*?'), callback='parse_item', follow=True), #后面的是uid,匹配uid即可
)
def parse_item(self, response):
item = {}
#item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
#item['name'] = response.xpath('//div[@id="name"]').get()
#item['description'] = response.xpath('//div[@id="description"]').get()
item['title'] = response.xpath(r'//h1[@class="_1RuRku"]/text()').extract_first()
item['author'] = response.xpath(r'//span[@class="_22gUMi"]/text()').extract_first()
item['pub_time'] = response.xpath(r'//time/text()').extract_first()
item['url'] = response.url
count = response.xpath(r'//div[@class="s-dsoj"]/span[2]/text()').extract_first()
if '字数' in count:
item['word_count'] = count.split(' ')[-1]
item['read_count'] = response.xpath(r'//div[@class="s-dsoj"]/span[3]/text()').extract_first().split(' ')[-1]
elif '阅读' in count:
item['read_count'] = count.split(' ')[-1]
item['word_count'] = response.xpath(r'//div[@class="s-dsoj"]/span[1]/text()').extract_first().split(' ')[-1]
yield item
-
设置管道,,,pipelines.py
class JianShuPipeline(object):
def __init__(self):
self.f = open('result_jianshu.json','w+',encoding='utf-8')
def process_item(self, item, spider):
self.f.write(str(item)+'\r\n')
return item
def close_spider(self,spider):
self.f.close()
5.激活管道 settings.py
ITEM_PIPELINES = {
'qsw_spider.pipelines.JianShuPipeline': 301,
}
- 运行爬虫
scrapy crawl js
-
效果图