1.图片下载
# 图片下载
import requests
response = requests.get('http://g.hiphotos.baidu.com/image/pic/item/5366d0160924ab18014cefd83bfae6cd7a890b82.jpg')
#获取bytes类型的响应
data = response.content
with open('python.png','wb') as f:
f.write(data)
2.实例之豆瓣电影榜单TOP250爬虫
import requests
# from lxml import etree #解析string类型html
from lxml import html
etree = html.etree
def spider_douban_top250():
movie_list_info=[]
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"}
for i in range(0,250,25): #分页,从0-250,每25条分一页
url = 'https://movie.douban.com/top250?start={}&filter='.format(i) #把i赋值到{}中
data = requests.get(url, headers=headers).content
html = etree.HTML(data)
#etree.HTML()构造了一个XPath解析对象并对HTML文本进行自动修正。etree.tostring():输出修正后的结果,类型是bytes
ol_list=html.xpath('//div[@id="content"]//div[@class="article"]/ol/li')
for movie in ol_list:
# 影片序号 serial_number
serial_number = movie.xpath('./div[@class="item"]/div[@class="pic"]/em/text()')
if len(serial_number) == 0:
serial_number = ''
else:
serial_number = serial_number[0]
#print(serial_number)
#电影名字
movie_name = movie.xpath('./div[@class="item"]/div[@class="info"]/div[@class="hd"]/a/span[@class="title"]/text()')[0] # 获取提取出来的每条内容的第一个元素,即电影名字
#print(movie_name)
# 电影介绍
movie_introduce = movie.xpath('./div[@class="item"]/div[@class="info"]/div[@class="bd"]/p[1]/text()')[0].strip()
#print(movie_introduce)
# 电影星级
star = movie.xpath('./div[@class="item"]/div[@class="info"]/div[@class="bd"]/div[@class="star"]/span[2]/text()')[0]
#print(star)
# 电影评价
evalute = movie.xpath('./div[@class="item"]/div[@class="info"]/div[@class="bd"]/div[@class="star"]/span[4]/text()')
evalute = evalute[0].replace('人评价', '')
#print(evalute)
# 电影描述
describe = movie.xpath('./div[@class="item"]/div[@class="info"]/div[@class="bd"]/p[@class="quote"]/span[1]/text()')
#print(describe)
# 电影图片
movie_img_url = movie.xpath('./div[@class="item"]/div[@class="pic"]/a/img/@src')[0] #把图片地址取出来,使其不是个数组
print(movie_img_url)
movie_list_info.append({
'serial_number': serial_number,
'movie_name': movie_name,
'movie_introduce': movie_introduce,
'star': star,
'evalute': evalute,
'describe': describe,
'movie_img_url': movie_img_url
})
# for movie in movie_list_info:
# print(movie)
#下载图片
for movie in movie_list_info:
url = movie['movie_img_url']
resp = requests.get(url)
if resp.status_code == 200:
img_name = '0000000{}.jpg'.format(movie['serial_number'])
with open('./imgs/{}'.format(img_name), 'wb') as f:
f.write(resp.content)
spider_douban_top250()
3.爬虫12306
import requests
import re #在字符串内查找模式匹配,直到找到第一个匹配然后返回,如果字符串没有匹配,则返回None。
import json
# 关闭https证书验证警告
requests.packages.urllib3.disable_warnings()
def getStation():
# 12306的城市名和城市代码js文件url
url = 'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js?station_version=1.9018'
r = requests.get(url, verify=False) #verify=False 避免进行ssl认证
pattern = u'([\u4e00-\u9fa5]+)\|([A-Z]+)'
result = re.findall(pattern, r.text) # re.findall()返回string中所有与pattern相匹配的全部字串,返回形式为数组
station = dict(result) # {'北京北': 'VAP', '北京东': 'BOP', '北京': 'BJP',}
#print(station)
return station
'''
查询两站之间的火车票信息
输入参数:
12306 api:
'https://kyfw.12306.cn/otn/leftTicket/query?leftTicketDTO.train_date=2017-07-18&leftTicketDTO.from_station=NJH&leftTicketDTO.to_station=SZH&purpose_codes=ADULT'
'''
# 生成查询的url
def get_query_url(text):
# 城市名代码查询字典
# key:城市名 value:城市代码
try:
# date = '2019-08-03'
# from_station_name = '上海'
# to_station_name = '北京'
date = input('请输入你的出发时间,格式如2019-08-03')
from_station_name = input('请输入出发地:')
to_station_name = input('请输入目的地:')
from_station = text[from_station_name]
to_station = text[to_station_name]
except:
date, from_station, to_station = '--', '--', '--'
# 将城市名转换为城市代码
# api url 构造
url = (
'https://kyfw.12306.cn/otn/leftTicket/query?'
'leftTicketDTO.train_date={}&'
'leftTicketDTO.from_station={}&'
'leftTicketDTO.to_station={}&'
'purpose_codes=ADULT'
).format(date, from_station, to_station)
print(url)
return url
# 获取信息
def query_train_info(url, text):
'''
查询火车票信息:
返回 信息查询列表
'''
info_list = []
try:
r = requests.get(url, verify=False)
# 获取返回的json数据里的data字段的result结果
raw_trains = r.json()['data']['result']
for raw_train in raw_trains:
# 循环遍历每辆列车的信息
data_list = raw_train.split('|')
# 车次号码
train_no = data_list[3]
# 出发站
from_station_code = data_list[6]
#from_station_name = text['北京']
# 终点站
to_station_code = data_list[7]
#to_station_name = text['深圳']
# 出发时间
start_time = data_list[8]
# 到达时间
arrive_time = data_list[9]
# 总耗时
time_fucked_up = data_list[10]
# 一等座
first_class_seat = data_list[31] or '--'
# 二等座
second_class_seat = data_list[30] or '--'
# 软卧
soft_sleep = data_list[23] or '--'
# 硬卧
hard_sleep = data_list[28] or '--'
# 硬座
hard_seat = data_list[29] or '--'
# 无座
no_seat = data_list[26] or '--'
# 打印查询结果
info = (
'车次:{}\n出发站:{}\n目的地:{}\n出发时间:{}\n到达时间:{}\n消耗时间:{}\n座位情况:\n 一等座:「{}」 \n二等座:「{}」\n软卧:「{}」\n硬卧:「{}」\n硬座:「{}」\n无座:「{}」\n\n'.format(
train_no, from_station_code, to_station_code, start_time, arrive_time, time_fucked_up, first_class_seat,
second_class_seat, soft_sleep, hard_sleep, hard_seat, no_seat))
print(info)
info_list.append(info)
return info_list
except:
return ' 输出信息有误,请重新输入'
text = getStation();
url = get_query_url(text)
query_train_info(url, text)