谣言识别系统是新闻分类系统的后续,这次我补充了正确新闻的数据集,为了体现新闻的绝对正确性,我爬取了澎湃新闻的数据。
谣言的数据集爬取与处理请参考我的新闻处理系统的数据集,请看点开下面的网址。
http://blog.csdn.net/sileixinhua/article/details/74943336
所有的数据集和代码,结果截图都上传至github
https://github.com/sileixinhua/News-classification/
谣言数据集为false,有3183个。
非谣言新闻数据集为true,有1674个。
这个实验结果是99%,我想结果是过于高了,产生了过拟合。可能谣言新闻都是生活类的,非谣言新闻因为都是澎湃新闻的原因,所以用两类完全不同用词的新闻,用贝叶斯也很好区分分类。
Beautiful Soup 4.4.0 文档: http://beautifulsoup.readthedocs.io/zh_CN/latest/#id28
Requests : http://cn.python-requests.org/zh_CN/latest/
Python3
sklearn :http://scikit-learn.org/stable/
Windows10
sublime
jieba分词
由于新闻内容全部都是在news_txt类名标签中,所以也很好处理,直接
soup_text.find_all(["news_txt"])
获取新闻内容即可。
# 2017年7月13日15:27:02
# silei
# 爬虫目标网站:http://www.thepaper.cn/newsDetail_forward_
# 获取信息BeautifulSoup+request
# 正确新闻的爬去,分词,去停用词
# -*- coding:UTF-8 -*-
from urllib import request
from bs4 import BeautifulSoup
import re
import sys
import codecs
import jieba
import requests
if __name__ == "__main__":
text_file_number = 0
web_url_number = 1701736
while web_url_number < 1731414 :
get_url = 'http://www.thepaper.cn/newsDetail_forward_'+str(web_url_number)
head = {} #设置头
head['User-Agent'] = 'Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19'
# 模拟浏览器模式,定制请求头
download_req_get = request.Request(url = get_url, headers = head)
# 设置Request
r = requests.get(get_url)
print(get_url)
print(r.status_code)
download_response_get = request.urlopen(download_req_get)
# 设置urlopen获取页面所有内容
download_html_get = download_response_get.read().decode('UTF-8','ignore')
# UTF-8模式读取获取的页面信息标签和内容
soup_text = BeautifulSoup(download_html_get, 'lxml')
soup_text.find_all(["news_txt"])
# BeautifulSoup读取页面html标签和内容的信息
web_text = re.compile("<[^>]+>")
content=web_text.sub("", str(soup_text))
if soup_text == "" :
print('字符串为空')
continue
# 去除页面标签
stoplist = {}.fromkeys([content.strip() for content in open("../data/stopword.txt",encoding= 'UTF-8') ])
# 读取停用词在列表中
seg_list = jieba.lcut(content,cut_all=False)
# jieba分词精确模式
seg_list = [word for word in list(seg_list) if word not in stoplist]
# 去除停用词
# print("Default Mode:", "/ ".join(seg_list))
file_write = codecs.open('../data/train_data_news/true/'+str(text_file_number)+'.txt','w','UTF-8')
# 将信息存储在本地
for i in range(len(seg_list)):
file_write.write(str(seg_list[i])+'\n')
file_write.close()
print('写入成功')
text_file_number = text_file_number + 1
web_url_number = web_url_number + 1
# 时间:2017年7月13日17:10:27
# silei
# 正确的新闻个数1674
#coding: utf-8
import os
import time
import random
import jieba
import nltk
import sklearn
from sklearn.naive_bayes import MultinomialNB
import numpy as np
import pylab as pl
import matplotlib.pyplot as plt
def MakeWordsSet(words_file):
words_set = set()
with open(words_file, 'r', encoding='UTF-8') as fp:
for line in fp.readlines():
word = line.strip()
if len(word)>0 and word not in words_set: # 去重
words_set.add(word)
return words_set
def TextProcessing(folder_path, test_size=0.2):
folder_list = os.listdir(folder_path)
data_list = []
class_list = []
# 类间循环
for folder in folder_list:
new_folder_path = os.path.join(folder_path, folder)
files = os.listdir(new_folder_path)
# 类内循环
j = 0
for file in files:
if j > 410: # 每类text样本数最多100
break
with open(os.path.join(new_folder_path, file), 'r', encoding='UTF-8') as fp:
raw = fp.read()
# print raw
## --------------------------------------------------------------------------------
## jieba分词
# jieba.enable_parallel(4) # 开启并行分词模式,参数为并行进程数,不支持windows
word_cut = jieba.cut(raw, cut_all=False) # 精确模式,返回的结构是一个可迭代的genertor
word_list = list(word_cut) # genertor转化为list,每个词unicode格式
# jieba.disable_parallel() # 关闭并行分词模式
# print word_list
## --------------------------------------------------------------------------------
data_list.append(word_list)
class_list.append(folder)
j += 1
## 划分训练集和测试集
# train_data_list, test_data_list, train_class_list, test_class_list = sklearn.cross_validation.train_test_split(data_list, class_list, test_size=test_size)
data_class_list = list(zip(data_list, class_list))
random.shuffle(data_class_list)
index = int(len(data_class_list)*test_size)+1
train_list = data_class_list[index:]
test_list = data_class_list[:index]
train_data_list, train_class_list = zip(*train_list)
test_data_list, test_class_list = zip(*test_list)
# 统计词频放入all_words_dict
all_words_dict = {}
for word_list in train_data_list:
for word in word_list:
if word in all_words_dict:
all_words_dict[word] += 1
else:
all_words_dict[word] = 1
# key函数利用词频进行降序排序
all_words_tuple_list = sorted(all_words_dict.items(), key=lambda f:f[1], reverse=True) # 内建函数sorted参数需为list
all_words_list = list(zip(*all_words_tuple_list))[0]
return all_words_list, train_data_list, test_data_list, train_class_list, test_class_list
def words_dict(all_words_list, deleteN, stopwords_set=set()):
# 选取特征词
feature_words = []
n = 1
for t in range(deleteN, len(all_words_list), 1):
if n > 1000: # feature_words的维度1000
break
# print all_words_list[t]
if not all_words_list[t].isdigit() and all_words_list[t] not in stopwords_set and 15:
feature_words.append(all_words_list[t])
n += 1
return feature_words
def TextFeatures(train_data_list, test_data_list, feature_words, flag='nltk'):
def text_features(text, feature_words):
text_words = set(text)
## -----------------------------------------------------------------------------------
if flag == 'nltk':
## nltk特征 dict
features = {word:1 if word in text_words else 0 for word in feature_words}
elif flag == 'sklearn':
## sklearn特征 list
features = [1 if word in text_words else 0 for word in feature_words]
else:
features = []
## -----------------------------------------------------------------------------------
return features
train_feature_list = [text_features(text, feature_words) for text in train_data_list]
test_feature_list = [text_features(text, feature_words) for text in test_data_list]
return train_feature_list, test_feature_list
def TextClassifier(train_feature_list, test_feature_list, train_class_list, test_class_list, flag='nltk'):
## -----------------------------------------------------------------------------------
if flag == 'nltk':
## nltk分类器
train_flist = zip(train_feature_list, train_class_list)
test_flist = zip(test_feature_list, test_class_list)
classifier = nltk.classify.NaiveBayesClassifier.train(train_flist)
# print classifier.classify_many(test_feature_list)
# for test_feature in test_feature_list:
# print classifier.classify(test_feature),
# print ''
test_accuracy = nltk.classify.accuracy(classifier, test_flist)
elif flag == 'sklearn':
## sklearn分类器
classifier = MultinomialNB().fit(train_feature_list, train_class_list)
# print classifier.predict(test_feature_list)
# for test_feature in test_feature_list:
# print classifier.predict(test_feature)[0],
# print ''
test_accuracy = classifier.score(test_feature_list, test_class_list)
else:
test_accuracy = []
return test_accuracy
if __name__ == '__main__':
print("start")
## 文本预处理
folder_path = 'C:\\Code\\uwasa\\data\\train_data_news'
all_words_list, train_data_list, test_data_list, train_class_list, test_class_list = TextProcessing(folder_path, test_size=0.2)
# 生成stopwords_set
stopwords_file = 'C:\\Code\\uwasa\\data\\stopword.txt'
stopwords_set = MakeWordsSet(stopwords_file)
## 文本特征提取和分类
# flag = 'nltk'
flag = 'sklearn'
deleteNs = range(0, 1000, 20)
test_accuracy_list = []
for deleteN in deleteNs:
# feature_words = words_dict(all_words_list, deleteN)
feature_words = words_dict(all_words_list, deleteN, stopwords_set)
train_feature_list, test_feature_list = TextFeatures(train_data_list, test_data_list, feature_words, flag)
test_accuracy = TextClassifier(train_feature_list, test_feature_list, train_class_list, test_class_list, flag)
test_accuracy_list.append(test_accuracy)
print(test_accuracy_list)
# 结果评价
plt.figure()
plt.plot(deleteNs, test_accuracy_list)
plt.title('Relationship of deleteNs and test_accuracy')
plt.xlabel('deleteNs')
plt.ylabel('test_accuracy')
plt.savefig('result_rumor.png')
print("finished")
由于数据集的原因产生了过拟合,有兴趣的同学可以再收集一些新闻,我的两个数据集一个生活养生类的谣言,一个是澎湃新闻,两者差距太大,所以分类结果会过高。
不知不觉从四月开学到现在三个多月过去了,每周的开会和研究报告,学习了整本的《python machine learning》,但是代码还没有全部实现完,马上回家要把PDF书看完,然后回来之后再接着找点实际的数据处理处理。
现在我关注了很多最新论文解说的公众号,的确能有效提高效率,但是我还是找点论文看,英语不能落下。
下一阶段计划有空把Python的网络编程和go语言学习一下。
加油。
——————————————————————————————————-
有学习机器学习相关同学可以加群,交流,学习,不定期更新最新的机器学习pdf书籍等资源。
QQ群号: 657119450