爬虫学习笔记--爬取静态网页

声明:我这里是学习 唐松老师的《Python网络爬虫从入门到实践》的学习笔记 只是记录我自己学习的过程  详细内容请购买老师正版图书


import requests

r = requests.get('http://www.santostang.com/')
print ("文本状态码:",r.encoding);
print("响应状态码:",r.status_code);
print("字符串响应体:",r.text);
#print("字节响应体:",r.content);
#print("json解码器:",r.json());


#传递URL参数
import requests
key_dic = {'key1':'value1','key2':'value2'}
r = requests.get('http://httpbin.org/get',params=key_dic)
print("URL 编码",r.url)
print("响应体:\n",r.text)



#定制请求头
import requests
headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
    'Host': 'www.santosang.com'
}
r = requests.get("http://www.santosang.com/", headers = headers)
print("响应状态码:",r.status_code)



#发送POST请求
import requests
key_dic = {"key1":'value1','key2':'value2'}
r = requests.post("http://httpbin.org/post",data=key_dic)
print(r.text)



#设置超时
import requests
link = "http://www.santostang.com/"
r = requests.get(link,timeout=20)

#豆瓣Top250
import requests
from bs4 import BeautifulSoup


def get_movie():
    movie_list = []
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
        'Host': 'movie.douban.com'
    }
    for i in range(0,10):
        link = 'https://movie.douban.com/top250?start='+str(i*25)
        r = requests.get(link,headers=headers,timeout=10)
        print(str(i+1)+"页的响应状态码",r.status_code)

        soup = BeautifulSoup(r.text,"lxml")
        div_list = soup.find_all('div',class_='hd')
        for each in div_list:
            movie = each.a.span.text.strip()
            movie_list.append(movie)
        return movie_list

if __name__ == "__main__":
    a = get_movie()
    print(a)






你可能感兴趣的:(爬虫学习笔记)