Ajax

Ajax

ajax的get请求

获取豆瓣喜剧电影排行榜第一页的数据

import urllib.request

url = 'https://movie.douban.com/j/chart/top_list?type=24&interval_id=100%3A90&action=&start=0&limit=20'

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
}

request = urllib.request.Request(url=url, headers=headers)

response = urllib.request.urlopen(request)

content = response.read().decode('utf-8')

# print(content)
# 将数据下载到本地
# open方法默认情况下使用的是gbk编码,要想保存汉字就要在open中指定编码为utf-8
fp = open('douban.json', 'w', encoding='utf-8')
fp.write(content)

获取豆瓣喜剧电影排行榜任何几页的数据

第一页:https://movie.douban.com/j/chart/top_list?type=24&interval_id=100%3A90&action=&start=0&limit=20
第二页:https://movie.douban.com/j/chart/top_list?type=24&interval_id=100%3A90&action=&start=20&limit=20
第三页:https://movie.douban.com/j/chart/top_list?type=24&interval_id=100%3A90&action=&start=40&limit=20

由上面前三个网页可以得到网页的规律为

page   1   2   3   4
start  0   20  40  60
start= (page-1)*20
所以可以利用循环去获取想要页数的网页数据

代码如下

import urllib.request
import urllib.parse


# 定义多个函数用来降低耦合度
def create_request(page):
    base_url = 'https://movie.douban.com/j/chart/top_list?type=24&interval_id=100%3A90&action=&'

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
    }

    data = {
        'start': (page-1)*20,
        'limit': 20
    }
    data = urllib.parse.urlencode(data)
    url = base_url + data
    request = urllib.request.Request(url=url, headers=headers)
    return request

def get_content(request):
    response = urllib.request.urlopen(request)
    content = response.read().decode('utf-8')
    return content
# 下载
def download(page, content):
    with open('douban_' + str(page) + '.json', 'w', encoding='utf-8') as fp:
        fp.write(content)

if __name__ == '__main__':
    start_page = int(input('请输入开始的页数'))
    end_page = int(input('请输入结束的页数'))

    for page in range(start_page, end_page+1):
           request = create_request(page)
           content = get_content(request)
           download(page, content)
ajax的post请求

获取肯德基餐厅地址的数据,如图

Ajax_第1张图片

主要就是page变量

import urllib.request
import urllib.parse


def create_request(page):
    base_url = 'http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=cname'

    data = {
        'cname': '郑州',
        'pid': '',
        'pageIndex': page,
        'pageSize': '10',
    }
    data = urllib.parse.urlencode(data).encode('utf-8')
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
    }
    request = urllib.request.Request(url = base_url, data=data, headers=headers)
    return request

def get_content(request):
    response = urllib.request.urlopen(request)
    content = response.read().decode('utf-8')
    return content

def download(page, content):
    with open('kfc_' + str(page) + '.json', 'w', encoding='utf-8') as fp:
        fp.write(content)


if __name__ == '__main__':
    start_page = int(input('请输入开始的页数'))
    end_page = int(input('请输入结束的页数'))

    for page in range(start_page, end_page+1):
        request = create_request(page)
        content = get_content(request)
        download(page, content)

age, end_page+1):
request = create_request(page)
content = get_content(request)
download(page, content)


你可能感兴趣的:(Python爬虫,ajax)