爬虫

import requests

from lxml import etree

from threading import Thread

import time

from queue import Queue

#单线程

base_url='https://movie.douban.com/top250?filter=&start='

movie_list=[]

for i in range(10):

    url1=base_url+str((i*25))

    response=requests.get(url=url1)

    content=response.content

    #接收数据

    content=content.decode('utf-8')

    html1 = etree.HTML(content)

    item_list=html1.xpath('//div[@class="item"]')

    for item in item_list:

        movie = {}

        movie['编号'] = item.xpath('./div[1]/em/text()')[0]

        movie['名称'] = item.xpath('./div[2]//a/span[1]/text()')[0]

        movie['评分'] = item.xpath('./div[2]//span[@class="rating_num"]/text()')[0]

        movie_list.append(movie)

print(len(movie_list))

#多线程

def douban(url1):

    response = requests.get(url=url1)

    content=response.content

    #接收数据

    content=content.decode('utf-8')

    html1 = etree.HTML(content)

    item_list=html1.xpath('//div[@class="item"]')

    for item in item_list:

        movie = {}

        movie['编号'] = item.xpath('./div[1]/em/text()')[0]

        movie['名称'] = item.xpath('./div[2]//a/span[1]/text()')[0]

        movie['评分'] = item.xpath('./div[2]//span[@class="rating_num"]/text()')[0]

        #自动创建锁防止多线程执行造成存储数据时发生混乱

        movie_list.put(movie)

if __name__ == '__main__':

    start_time=time.time()

    movie_list = Queue()

    base_url = 'https://movie.douban.com/top250?filter=&start='

    movie_list=[]

    thread_list=[]

    for i in range(10):

        url1=base_url+str((i*25))

        thread_spider=Thread(target=douban,args=[url1])

        thread_list.append(thread_spider)

        thread_spider.start()

    for thread_one in thread_list:

        thread_one.join()

    #防止主进程先运行到这句话,所以需要让住主进程等待子进程

    end_time = time.time()

    while not movie_list.empty():

        print(movie_list.get())

    print('爬取时间总结:%s' % (end_time - start_time))

#利用协程爬取数据

import requests

import json

import time

from lxml import etree

from threading import Thread

from queue import Queue

from gevent import  monkey

import gevent

import random

# gevent 让我们可以按同步的方式来写异步程序

# monkey.patch_all() 会在Python程序执行时动态的将网络库(socket,select,thread)

# 替换掉,变成异步的库,让我们的程序可以异步的方式处理网络相关的任务

monkey.patch_all()

def spider(url):

    response = requests.get(url=url,headers=headers,proxies = {'http':random.choice(proxies_ips)})

    content = response.content

    content = content.decode('utf-8')

    '''数据处理'''

    html = etree.HTML(content)

    item_list = html.xpath('//div[@class="item"]')

    for item in item_list:

        movie = {}

        movie['编号'] = item.xpath('./div[1]/em/text()')[0]

        print(movie['编号'])

        movie['名称'] = item.xpath('./div[2]//a/span[1]/text()')[0]

        movie['评分'] = item.xpath('./div[2]//span[@class="rating_num"]/text()')[0]

        movie_list.put(movie)

if __name__ == '__main__':

    proxies_ips = [

        'http://122.114.31.177:808',

        'http://61.135.217.7:80',

        'http://14.20.180.209:8118',

    ]

    headers={

         'Host':'movie.douban.com',

        'Referer':'https://accounts.douban.com/login?redir=https://movie.douban.com/top250?filter=&source=None',

        'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',

    }

    start_time = time.time()

    # 存储数据

    movie_list = Queue()

    '''发送请求'''

    base_url = 'https://movie.douban.com/top250?filter=&start='

    gevent_spiders = []

    for i in range(10):

        url = base_url + str(i * 25)

        print(url)

        gevent_spider = gevent.spawn(spider,url)

        gevent_spiders.append(gevent_spider)

    gevent.joinall(gevent_spiders)

    end_time = time.time()

    print('爬取时间总结:%s' % (end_time - start_time))

你可能感兴趣的:(爬虫)