import re
import requests
import jieba
from bs4 import BeautifulSoup as bp
from nltk.classify import NaiveBayesClassifier
# 爬取李白和杜甫的诗集
urls = ['https://so.gushiwen.org/authors/authorvsw_b90660e3e492A{}.aspx','https://so.gushiwen.org/authors/authorvsw_515ea88d1858A{}.aspx']
author = ['李白','杜甫']
for index,url in enumerate(urls):
file = open('{}.txt'.format(author[index]),'w',encoding='utf8')
longstr = ''
for n in range(1,21):
urlp = url.format(n)
r = requests.get(urlp,)
bs1 = bp(r.text,'lxml')
contsons = bs1.select('div .contson')
for conston in contsons:
longstr += re.sub('\s|[,。]','',conston.text)
file.write(longstr)
file.close()
text1 = open(r"李白.txt", "rb").read()
list1 = jieba.cut(text1)
text2 = open(r"杜甫.txt", "rb").read()
list2 = jieba.cut(text2)
# 数据准备
libai = list1
dufu = list2
# 特征提取
def word_feats(words):
return dict([(word, True) for word in words])
libai_features = [(word_feats(lb), 'lb') for lb in libai]
dufu_features = [(word_feats(df), 'df') for df in dufu]
train_set = libai_features + dufu_features
# 训练决策
classifier = NaiveBayesClassifier.train(train_set)
# 分析测试
sentence = input("请输入一句你喜欢的诗:")
print("\n")
seg_list = jieba.cut(sentence)
seg_list = [seg for seg in seg_list]
# 统计结果
lb = 0
df = 0
for word in seg_list:
classResult = classifier.classify(word_feats(word))
if classResult == 'lb':
lb = lb + 1
if classResult == 'df':
df = df + 1
# 呈现比例
x = float(str(float(lb) / len(seg_list)))
y = float(str(float(df) / len(seg_list)))
print('李白的可能性:%.2f%%' % (x * 100))
print('杜甫的可能性:%.2f%%' % (y * 100))