微博采集

成果

morelinks()###

注意里面的while和for循环
因为i=i+1是在try里面的,所以有问题的时候会,继续尝试链接。。。
for url in urls:多个账号
i=1 while i<9 第1到8页

def result()###

用了json
也用了正则,排除一些无效的内容:
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
b = re.compile('<.*?>'
re.sub(b,'',cards[0]['card_group'][i]['mblog']['text']
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
web_date = requests.get(url2,timeout=3.0001)
timeout,有点意思。。。

很简单,所以不用读写分离

#!/usr/bin/env python
#-*- coding: utf-8 -*-
import requests
import time
import json
import re
from urllib import request
from bs4 import BeautifulSoup
from requests.adapters import HTTPAdapter
#https://zhidao.baidu.com/question/1637388888787200780.html?skiptype=2
#https://zhidao.baidu.com/question/326188178157901725.html?qbl=relate_question_2&skiptype=2
time1 = time.strftime("%H:%M:%S").replace(':','-')
path ='./result/'
ids =['2634877355','3957040489','1850988623','1477045392','1642292081','1974808274']
#ConnectionAbortedError: [WinError 10053] 您的主机中的软件中止了一个已建立的连接。
#('Connection aborted.', TimeoutError(10060, '由于连接方在一段时间后没有正确答复或连接的主机没有反应,连接尝试失败。', None, 10060, None))
#requests.exceptions.ConnectionError: ('Connection aborted.', ConnectionAbortedError(10053, '您的主机中的软件中止了一个已建立的连接。', None, 10053, None))
urls= ['http://m.weibo.cn/page/json?containerid=100505{}_-_WEIBO_SECOND_PROFILE_WEIBO&page='.format(i)for i in ids]
headers = {
    'Cookie': '_T_WM=fb891da3dee671bfb37a045660512441; SUHB=0rrKYBXxJUK4-l; SCF=AiKs4RYjd4sGkYDERWq3Fw0Ro69PlySf72VxZ78iuPueo-VQ97U_W4_U1R2oj1BuXnsZbWChTDVfGZ28_3048Pw.; SUB=_2A251FAuWDeTxGedG6lQY8S3Pyz6IHXVW9pXerDV6PUJbkdBeLVbRkW1Ogg679NBeDJ6iXr9hNupHMsGDIA..; M_WEIBOCN_PARAMS=featurecode%3D20000181%26luicode%3D10000011%26lfid%3D1005051904769205',
    'User - Agent': 'User-Agent:Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) '
                    'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.76 Mobile Safari/537.36'
}
def morelinks():
    for url in urls: #账号
            i=1
            while i<9: #改成while就可以不停的重试了
                url2 = url + str(i)
                try:
                    result(url2)
                    print(url2)
                    i = i + 1
                except Exception as e:
                    print(e)
                    time.sleep(10)
                    #i = i - 1


#http://weibo.com/p/aj/v6/mblog/mbloglist?ajwvr=6&domain=100606&refer_flag=0000015010_&from=feed&loc=nickname&is_all=1&pagebar=0&pl_name=Pl_Official_MyProfileFeed__25&id=1006061904769205&script_uri=/zhihu&feed_type=0&page=1&pre_page=1&domain_op=100606&__rnd=1477474592263
#http://docs.python-requests.org/zh_CN/latest/user/quickstart.html
def result(url2='http://m.weibo.cn/page/json?containerid=1005052634877355_-_WEIBO_SECOND_PROFILE_WEIBO&page=1'):
    web_date = requests.get(url2,timeout=3.0001) #恰好发布的时候会有问题?
    b = re.compile('\<.*?\>')
    cont = web_date.text
    jcont = json.loads(cont) #json 转成了字典
    cards=jcont['cards']
    #print(cards[0])#数列
    #JSON.cards[0].card_group[0].mblog.created_at
    username=cards[0]['card_group'][0]['mblog']['user']['screen_name']
    path = './'
    path=path+username+time1+'.txt'
    print(path)
    print(len(cards[0]['card_group']))
    with open(path,'a+',encoding='utf-8') as text:
        for i in range(0,len(cards[0]['card_group'])):
            date= {
                'time':(cards[0]['card_group'][i]['mblog']['created_at']),
                'sum':(cards[0]['card_group'][i]['mblog']['reposts_count'])+
                      (cards[0]['card_group'][i]['mblog']['comments_count'])+
                      (cards[0]['card_group'][i]['mblog']['attitudes_count']),
                'reposts_count':(cards[0]['card_group'][i]['mblog']['reposts_count']),
                'comments_count':(cards[0]['card_group'][i]['mblog']['comments_count']),
                'attitudes_count':(cards[0]['card_group'][i]['mblog']['attitudes_count']),
                #'text':cards[0]['card_group'][i]['mblog']['text'],
                'text2':re.sub(b,'',cards[0]['card_group'][i]['mblog']['text']),
                'USER':cards[0]['card_group'][i]['mblog']['user']['screen_name'],
            }
            print(str(date))
            text.write(str(date['time'])+'$'+str(date['sum'])+'$'+str(date['reposts_count'])+'$'
                       +str(date['comments_count'])+'$'+str(date['attitudes_count'])
                       +'$'+str(date['text2'])+'\n')
    time.sleep(3)
        #返回数据,读写分离

#def


morelinks()
#result()

你可能感兴趣的:(微博采集)