爬虫人人网站

from Day1.tuozhan_all import post, get
import json
from urllib import request, parse

# 保存cookie
from http import cookiejar
# 通过对象保存cookie
cookie_object = cookiejar.CookieJar()
# handler 对应着一个操作
handler = request.HTTPCookieProcessor(cookie_object)
# opener 遇到有cookie的response的时候,
# 调用handler内部的一个函数, 存储到cookie object
opener = request.build_opener(handler)

# def store(cookie):
#   cookie_object.append(cookie)
#
# def check_response(response):
#   if 'cookie' in response:
#       handler.store(response['cookie'])


# url
url = 'http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=2018721441132'
# form
form = {
    'email': '18510556963',
    'icode': '',
    'origURL': 'http://www.renren.com/home',
    'domain': 'renren.com',
    'key_id': '1',
    'captcha_type': 'web_login',
    'password': '95cb2a1d59b918e0d16ab5d3535fb40103e4b546e651a3e3c99b91876927c78a',
    'rkey': 'a7bccfbafd7ee702247450942dff5611',
    'f': 'http%3A%2F%2Fwww.renren.com%2F966927992',
}
# post
#urlencode是把from里面的数据转换成字符串,然后再用encode转换成byte
form_bytes = parse.urlencode(form).encode('utf-8')

# response = request.urlopen(url, form_bytes)
# opener = request.build_opener()
# opener.open()

#向服务器发起请求,成功后返回一个网站
response = opener.open(url, form_bytes)
#response.read()读取存到变量html_bytes中
html_bytes = response.read()
#html_bytes = post(url, form=form)
# 打印结果
#print(html_bytes)
# 通过json获取一个字典类型
res_dict = json.loads(html_bytes.decode('utf-8'))
#获取登陆上去的链接
home_url = res_dict['homeUrl']

# 访问页面
response = opener.open(home_url)
#将登陆页面的信息给html_bytes
html_bytes = response.read()
#输出页面信息
print(html_bytes.decode('utf-8'))

你可能感兴趣的:(doraemon)