用scrapy爬取博客园新闻的简单程序

导航

  • 1:项目目录结构
  • 2:spiders里jobbole.py的代码内容
  • 3:cmmon.py代码内容
  • 4:items.py里的文件内容
  • 5:main.py
  • 6:pipelines.py
  • 最后是setting.py文件里的信息

1:项目目录结构

用scrapy爬取博客园新闻的简单程序_第1张图片

2:spiders里jobbole.py的代码内容

# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request

from urllib import parse
import requests 
import re
import json
from spiderBaby.items import JobBoleArticleItem
from spiderBaby.utils import common


proxy='proxy-cn.toshiba.co.jp:8080'
proxies={
    'http':'http://'+proxy,
    'https':'https://'+proxy,
}

class JobboleSpider(scrapy.Spider):

    name = 'jobbole'
    allowed_domains = ['news.cnblogs.com']
    start_urls = ['http://news.cnblogs.com/',]

    def parse(self, response):

        # 1: 获取新闻列表页中的新闻url并交给scrapy进行下载后调用相应的解析方法
        # 2: 获取下一页的url并交给scrapy进行下载,下载完成后交给parse继续跟进
        # 获取到所有新闻的selector

        post_nodes = response.css('#news_list .news_block')[:1]  #只获取第一页第一个新闻,正常是获取第一页所有新闻.



        for post_nodes in post_nodes:
            # 通过遍历得到图片的url和新闻的url
            
            image_url = post_nodes.css(".entry_summary a img::attr(src)").extract_first("") #获取第一页第一个新闻的图片url
            if image_url.startswith("//"):
                image_url = "https:" + image_url
            post_url = post_nodes.css("h2 a::attr(href)").extract_first("") #获取第一页第一个新闻的url

            """
            路径拼接,urljoin(1,2) 2如果不是一个完整的路径就把1和2拼接成一个完整的路径.
            如果是一个完整的就不拼接,直接用2作用路径
            """
            # 这里做了一个分发异步, 用另一个函数处理下一步的请求,也就是新闻的详情页.
            # 回调到另一个函数,meta参数是为了把图片的url传到回调函数里.
            yield Request(url="{}{}".format("http://news.cnblogs.com", post_url), 
                    meta={"front_image_url":image_url}, callback=self.parse_detail) 
        
        # 提取下一页的url

        # next_url = response.xpath("//a[contains(text(), 'Next >')]/@href").extract_first()
        # yield Request(url="{}{}".format("https://news.cnblogs.com", next_url), callback=self.parse)

    def parse_detail(self,response):
        """
            这里主要做三件事
            1:从详情页获取到各个自己需要的字段(实际我并不需要)
            2:把值加入到item, 键和items.py 
            3:又进一步请求了另一个url,是动态加载的相关页面,并把item值传到下一个解析函数.

        """
        

        match_re = re.match(".*?(\d+)", response.url)
        # html = requests.get(parse.urljoin)
        if match_re:
            post_id = match_re.group(1)

            article_item = JobBoleArticleItem()
            news_title = response.css("#news_title a::text").extract_first("")

            create_time = response.css("#news_info .time::text").extract_first("")
            match_res = re.match(".*?(\d+.*)", create_time)
            if match_res:
                create_time = match_res.group(1)

            news_content = response.css("#news_content #news_body").extract()
            tag_list = response.css(".news_tags a::text").extract()
            tag_str = ",".join(tag_list)


            # 这里
            article_item["news_title"] = news_title
            article_item["create_time"] = create_time
            article_item["news_content"] = news_content
            article_item["tag_str"] = tag_str
            if response.meta.get("front_image_url", ""):
                article_item["front_image_url"] = [response.meta.get("front_image_url", "")]
            else:
                article_item["front_image_url"] = []
            article_item["url"] = response.url

            # html = requests.get("https://news.cnblogs.com/NewsAjax/GetAjaxNewsInfo?contentId={}".format(post_id))
            # j_data = json.loads(html.text)
            yield Request(url="https://news.cnblogs.com/NewsAjax/GetAjaxNewsInfo?contentId={}".format(post_id), dont_filter=True, meta = {"article_item":article_item}, callback=self.parse_nums)

            # praise_nums = j_data["DiggCount"]
            # fav_nums = j_data["TotalView"]
            # comment_nums = j_data["CommentCount"]

    
    def parse_nums(self, response):
        """
            1:取出item对象
            2:把动态加载的数据存到item对象中,并把不固定长度的url用md5解析成固定长度
            3:返回item数据到items.py
        """

        j_data = json.loads(response.text)
        article_item = response.meta.get("article_item", "")

        praise_nums = j_data["DiggCount"]
        fav_nums = j_data["TotalView"]
        comment_nums = j_data["CommentCount"]

        article_item["praise_nums"] = praise_nums
        article_item["fav_nums"] = fav_nums
        article_item["comment_nums"] = comment_nums

        article_item["url_object_id"] = common.get_md5(article_item["url"])

        yield article_item

3:cmmon.py代码内容

import hashlib

def get_md5(url):
    if isinstance(url, str):
        url = url.encode("utf-8")
    m = hashlib.md5()
    m.update(url)

    return m.hexdigest()


if __name__ == "__main__":
    print(get_md5("https"))

4:items.py里的文件内容

	# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class SpiderbabyItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    pass
class JobBoleArticleItem(scrapy.Item):
    news_title = scrapy.Field()
    create_time = scrapy.Field()
    url = scrapy.Field()
    url_object_id = scrapy.Field()
    front_image_url = scrapy.Field()
    front_image_path = scrapy.Field()

    praise_nums = scrapy.Field()
    fav_nums = scrapy.Field()
    comment_nums = scrapy.Field()
    tag_str = scrapy.Field()
    news_content = scrapy.Field()

5:main.py

from scrapy.cmdline import execute
import sys
import os

sys.path.append(os.path.dirname(os.path.abspath(__file__)))

execute(["scrapy", "crawl", "jobbole"])

6:pipelines.py

# -*- coding: utf-8 -*-
from scrapy.pipelines.images import ImagesPipeline
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import codecs
import json
import MySQLdb


class SpiderbabyPipeline:
    def process_item(self, item, spider):
        return item
class MysqlPipeline(object):
    def __init__(self):
        self.connect = MySQLdb.connect("127.0.0.1", 'root', 'rootJUST666', 'spider',charset='utf8', use_unicode=True) 
        self.cursor = self.connect.cursor()

    def process_item(self, item, spider):
        insert_sql = """
            insert into spiders(content) values(%s)

        """

        self.cursor.execute(insert_sql, (item.get("news_title", ""),))

        self.connect.commit()

        return item

class JsonWithEncodingPipeline(object):
    #json文件导出数据
    def __init__(self):
        self.file = codecs.open("article.txt", "a", encoding="utf-8")
    
    def process_item(self, item, spider):
        lines = json.dumps(dict(item), ensure_ascii=False)
        self.file.write(lines)
        return item

    def spider_closed(self, spider): 
        self.file.close()

class ArticleImagePipeline(ImagesPipeline):
    def item_completed(self, results, item, info):
        if "front_image_url" in item:
            for ok, value in results:
                image_file_path = value["path"]
            
            item["front_image_path"] = image_file_path

        return item

最后是setting.py文件里的信息

# -*- coding: utf-8 -*-
import os
# Scrapy settings for spiderBaby project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'spiderBaby'

SPIDER_MODULES = ['spiderBaby.spiders']
NEWSPIDER_MODULE = 'spiderBaby.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'spiderBaby (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'spiderBaby.middlewares.SpiderbabySpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'spiderBaby.middlewares.SpiderbabyDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   # 配置允许图片的下载
   'spiderBaby.pipelines.ArticleImagePipeline': 1,
   'spiderBaby.pipelines.JsonWithEncodingPipeline':2,
   'spiderBaby.pipelines.MysqlPipeline':3,
   'spiderBaby.pipelines.SpiderbabyPipeline': 300,


} 
IMAGES_URLS_FIELD = "front_image_url"
project_dir = os.path.dirname(os.path.abspath(__file__))
IMAGES_STORE = os.path.join(project_dir, 'images')
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

你可能感兴趣的:(爬虫)