知乎数据爬取及存储

import requests
from pymysql_conn import Mysql_connect

url = 'https://www.zhihu.com/api/v4/members/leedaye/answers?include=data%5B*%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Cmark_infos%2Ccreated_time%2Cupdated_time%2Creview_info%2Cquestion%2Cexcerpt%2Crelationship.is_authorized%2Cvoting%2Cis_author%2Cis_thanked%2Cis_nothelp%3Bdata%5B*%5D.author.badge%5B%3F(type%3Dbest_answerer)%5D.topics&offset={}&limit=20&sort_by=created'
# 分析所有请求后发现上面的url可以直接获取所需数据的json文件,两个参数分
# 别为offset,limit。其中offset控制起始数据序号,limit控制本次数据数量;
# 经测试offset可以随意设置;limit未测试,采取网页上的数值20。本程序只爬
# 取前3页。


headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36'
}
# 准备sql语句
sql = 'insert into data_zhihu VALUES ({},{},{})'
# 创建数据库连接对象
mysql_object = Mysql_connect()

for i in range(0,60,20):
    # 获取前3页数据的json
    response = requests.get(url.format(i), headers=headers)
    data_str = response.json()
    # 循环获取每页的所有数据并写入数据库
    for j in range(0,20):
        id = data_str['data'][j]['question']['id']
        title = data_str['data'][j]['question']['title']
        content = data_str['data'][j]['content'][3:-4]
        # print(id,title,content)
        mysql_object.execute_sql(sql.format(repr(id),repr(title),repr(content)))

你可能感兴趣的:(知乎数据爬取及存储)