本文利用BeautifulSoup,实现了百度贴吧爬虫,可以爬取所有帖子的每一页。
import urllib.request
from bs4 import BeautifulSoup
import re
word_dict = {}
def page_analyse(content):
p = re.compile(r'《.*?》')
words = p.findall(content.decode('utf8','ignore')) #得到的所有书名的列表
for w in words:
if w not in word_dict:
word_dict[w] = 1
else:
word_dict[w] = word_dict[w] + 1
#print(word_dict)
def page_traverse(link):
content = urllib.request.urlopen(link).read()
page_analyse(content)
soup = BeautifulSoup(content)
items=soup.select("span.red")
item=items[1]
num_page=int(list(filter(lambda x:x.isdigit(),item))[0]) #帖子的总页数
for i in range(2,num_page):
content = urllib.request.urlopen(link+'?pn='+str(i)).read()
page_analyse(content)
for pn in range(0,151,50):
content = urllib.request.urlopen('http://tieba.baidu.com/f?kw=%E4%B9%A6%E8%8D%92&ie=utf-8&tab=good&cid=7&pn='+str(pn)).read() #男频推荐 pn=0 50 100 150
soup = BeautifulSoup(content)
items=soup.select("a.j_th_tit")
for item in items:
link='http://tieba.baidu.com'
link+=BeautifulSoup(str(item)).a['href']#得到每个帖子的链接
page_traverse(link)
f=open('data.csv','w')
for key, value in word_dict.items():
f.write('"'+key+'",'+ str(value)+"\n")
f.close()