豆瓣爬取

import requests
import json

class Douban(object):
def init(self):
#api接口请求地址
#Request URL: https://m.douban.com/rexxar/api/v2/subject_collection/tv_american/items?os=ios&for_mobile=1&callback=jsonp1&start=0&count=18&loc_id=108288&_=0

    self.url_list = "https://m.douban.com/rexxar/api/v2/subject_collection/tv_american/items?start={}&count=18"
    #定义请求头需要携带的参数
    self.headers = {
        "Referer": "https://m.douban.com/tv/american",#反爬虫
        "User-Agent": "Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Mobile Safari/537.36"
    }


def run(self):
    # 1.发起请求
    json_str = self.parse_get(self.url_list.format(0))
    self.save_content(1, json_str)
    # 2.获取总条目数量
    total = self.get_total(json_str)
    # 3.组装剩余的url
    url_list = self.get_url_list(total)
    # 遍历每个url 发送请求
    for url in url_list:
        json_str = self.parse_get(url)
        # 获取响应保存数据
        self.save_content(url_list.index(url) + 2, json_str)


def parse_get(self, url):
    response = requests.get(url=url, headers = self.headers)
    return response.content.decode()

def save_content(self, page_index, json_str):
    ret_dict = json.loads(json_str)
    with open("./movies/第{}页.txt".format(page_index), "w", encoding="utf-8") as f:
        f.write(json.dumps(ret_dict, ensure_ascii=False, indent=4))

    print("保存第{}页".format(page_index))

def get_total(self, json_str):
    ret_dict = json.loads(json_str)
    return ret_dict["total"]

def get_url_list(self, total):
    total_page = total // 18 if  total % 18 == 0 else total // 18 + 1
    return [self.url_list.format(i*18) for i in range(1, total_page)]

if name == ‘main’:
Douban().run()
```

你可能感兴趣的:(爬虫)