selenium+scrapy

spider.py

# -*- coding: utf-8 -*-
import scrapy

from wy.items import WyItem

from selenium import webdriver


class WySpiderSpider(scrapy.Spider):
    name = 'wy_spider'
    # allowed_domains = ['news.163.com']
    start_urls = ['https://news.163.com/']
    # 存放五大新闻板块的链接
    urls = []

    # 浏览器实例化
    path = r'D:\chromedriver\chromedriver.exe'
    bro = webdriver.Chrome(executable_path=path)

    def parse(self, response):
        li_list = response.xpath("//div[@class='bd']//div[@class='ns_area list']/ul/li")

        for index in [3, 4, 7, 8]:
            li = li_list[index]
            link = li.xpath(".//a/@href").get()
            self.urls.append(link)
            # 对五大板块对应的url进行请求发送
            yield scrapy.Request(url=link, callback=self.parse_news)

    # 是用来解析每一个板块对应的新闻数据
    # 在这个页面可以解析出来新闻的标题
    def parse_news(self, response):

        div_list = response.xpath("//div[@class='ndi_main']//div")
        for div in div_list:
            item = WyItem()

            url = div.xpath(".//div[@class='news_title']/h3/a/@href").get()
            print("*" * 50)
            print(url)
            print("*" * 50)
            title = div.xpath(".//div[@class='news_title']/h3/a/text()").get()
            print(title)
            item['title'] = title
            if url:
                yield scrapy.Request(url=url, callback=self.parse_news_detail, meta={'item': item})
            else:
                return

    # 用来解析每一个新闻的详情页面
    def parse_news_detail(self, response):
        item = response.meta['item']
        content = response.xpath("//div[@id='endText']//text()").extract()
        print(content)
        item['desc'] = content
        yield item

    # 关闭浏览器,当整个爬虫全部结束的时候
    def close(self, spider):
        print("爬虫整体结束、、、、")
        self.bro.quit()

middlewares.py

from scrapy.http import HtmlResponse

from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

import random


class WyDownloaderMiddleware(object):
    user_agent = [
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
        "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
        "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
        "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
        "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
        "Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
        "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
        "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
        "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
        "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
        "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
        "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
        "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
        "UCWEB7.0.2.37/28/999",
        "NOKIA5700/ UCWEB7.0.2.37/28/999",
        "Openwave/ UCWEB7.0.2.37/28/999",
        "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
        # iPhone 6:
        "Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",
    ]

    def process_request(self, request, spider):
        # print("this is process_request!!!")
        # UA伪装
        request.headers['User-Agent'] = random.choice(self.user_agent)
        return None

    # 拦截工程中所有的响应对象
    def process_response(self, request, response, spider):
        if request.url in spider.urls:
            # 就要将其对应的响应对象进行处理

            # 获取了在爬虫类中定义好的浏览器对象
            bro = spider.bro
            bro.get(url=request.url)
            WebDriverWait(bro, 20).until(
                EC.presence_of_all_elements_located((By.CLASS_NAME, 'load_more_tip'))
            )

            while bro.find_element_by_class_name('load_more_btn').get_attribute("style") == "display: none;":
                bro.find_element_by_class_name('load_more_btn').click()

            page_text = bro.page_source
            # 实例化一个新的响应对象
            new_response = HtmlResponse(url=request.url, body=page_text, encoding="utf-8", request=request)
            return new_response
        else:
            return response

    def process_exception(self, request, exception, spider):
        pass

你可能感兴趣的:(爬虫)