爬取我的微博并生成词云图

import requests
from urllib.parse import urlencode
from pyquery import PyQuery as pq
#from pymongo import MongoClient
import jieba
import wordcloud



base_url = 'https://m.weibo.cn/api/container/getIndex?'
headers = {
    #'Host': 'm.weibo.cn',
    'Referer': 'https://m.weibo.cn/u/6390210715',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
    'X-Requested-With': 'XMLHttpRequest',
}
#client = MongoClient()
#db = client['weibo']
#collection = db['weibo']
max_page = 20


def get_page(page):
    params = {
        'type': 'uid',
        'value': '6390210715',
        'containerid': '1076036390210715',
        'page': page
    }
    url = base_url + urlencode(params)
    try:
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            return response.json(), page
    except requests.ConnectionError as e:
        print('Error', e.args)

list1=[]
def parse_page(json, page: int):
    if json:
        items = json.get('data').get('cards')
        for index, item in enumerate(items):
            if page == 1 and index == 1:
                continue
            else:
                item = item.get('mblog', {})
                weibo = {}
                list2=[]
                #weibo['id'] = item.get('id')
                weibo[''] = pq(item.get('text')).text()
                list2.append(pq(item.get('text')).text())
                #weibo['attitudes'] = item.get('attitudes_count')
                #weibo['comments'] = item.get('comments_count')
                #weibo['reposts'] = item.get('reposts_count')
                list1.append(list2)
                yield weibo



#def save_to_mongo(result):
 #   if collection.insert(result):
  #      print('Saved to Mongo')


def writefile():
    for item in list1:
        with open('weibo2.txt', mode='a', encoding='utf-8') as f:
            f.write('%s\n' % (item))


def wordCloud():
    m = open('weibo2.txt', mode='r', encoding='utf-8')
    t = m.read()
    ls = jieba.lcut(t)
    m.close()
    txt = " ".join(ls)
    w = wordcloud.WordCloud(font_path="msyh.ttc", width=1000, height=700, max_words=25)
    w.generate(txt)
    w.to_file("myweibo.png")


if __name__ == '__main__':
    for page in range(1, max_page + 1):
        json = get_page(page)
        results = parse_page(*json)
        for result in results:
            print(result)
    writefile()
    wordCloud()

爬取我的微博并生成词云图_第1张图片

你可能感兴趣的:(网络爬虫,python)