新闻主题分类

from keras.datasets import reuters
#reuters路透社,加载路透社新闻
(train_data,train_labels),(test_data,test_labels)=reuters.load_data(num_words=10000)
#仅加载前面10000个常用单词(按常用率排),这样数据量不大而且便于处理
#权重比较高
print('前10000个高频单词中训练数据有:',len(train_data))
print('前10000个高频单词中预测数据有:',len(test_data))
print('第一训练数据样本:',train_data[0])
print('第一训练结果样本:',train_labels[0])
#######################################################
word_index=reuters.get_word_index();
#下载reuters_word_index.json文文件。对应为单词的索引值。
#print('word_index:',word_index)
#将(单词:索引)转换为(索引:单词)词典
#[里面是个表达式]
reverse_word_index=dict([(value,key) for (key,value) in word_index.items()])
decoded_newswire=''.join([reverse_word_index.get(i-3,'?') for i in train_data[0]])
print('第一个训练数据的解码wire:',decoded_newswire)
#################################################
#独热编码向量化
import numpy as np
import _3_4_2独热编码 
x_train=_3_4_2独热编码.vectorize_sequence(train_data)
#x_train=np.array(train_data)
x_test=_3_4_2独热编码.vectorize_sequence(test_data)
#x_test=np.array(test_data)
#import _3_5_2多分类结果标签数据的独热编码 as oneHot
#from _3_5_2多分类结果标签数据的独热编码 import *
#one_hot_train_labels= to_one_hot(train_labels)
#one_hot_test_labels=to_one_hot(test_labels)
none_one_hot_train_labels= np.array(train_labels)
none_one_hot_test_labels=np.array(test_labels)

#更复杂的问题使用更多维度的分层
from keras import models
from keras import layers,activations,optimizers,losses,metrics
#
model=models.Sequential()
model.add(layers.Dense(64,activation='relu',input_shape=(10000,)))
#64个神经元的处理,载入10000个
model.add(layers.Dense(64,activation='relu'))
#64个神经元的处理
model.add(layers.Dense(46,activation= activations.softmax))
#分类到46个
model.compile(optimizer='rmsprop',
              loss=losses.sparse_categorical_crossentropy,
              metrics=['accuracy'])
########################################################
#抽出1000个训练样本作为训练集(前1000个),后1000个作为验证集合
x_val=x_train[:1000]
partial_x_train=x_train[1000:]

y_val=none_one_hot_train_labels[:1000]
partial_y_train=none_one_hot_train_labels[1000:]

history=model.fit(partial_x_train,partial_y_train,
                  epochs=9,batch_size=512,
                  validation_data=(x_val,y_val))
result=model.evaluate(x_test,none_one_hot_test_labels)
print("result:",result)#0.98,0.78
#####################################################
#绘制损失曲线和精度曲线
import matplotlib.pyplot as plt
loss=history.history['loss']
val_loss=history.history['val_loss']

epochs=range(1,len(loss)+1)

plt.plot(epochs,loss,'bo',label='Training loss')
plt.plot(epochs,val_loss,'b',label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf()#清除当前图形
acc=history.history['acc']
val_acc=history.history['val_acc']

plt.plot(epochs,acc,'bo',label='Training acc')
plt.plot(epochs,val_acc,'b',label='Validation acc')
plt.plot('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
#######################################################
#print('完全随机精度,没啥用-----------------------------------------')
#import copy
#test_labels_copy=copy.copy(test_labels)
#np.random.shuffle(test_labels_copy)
#hits_array=np.array(test_labels)==np.array(test_labels_copy)
#print('hits_array:',hits_array)
#r=float(np.sum(hits_array)/len(test_labels))
#print('结果',r)
#############验证##############################################
#predictions=model.predict(x_test)
#print('predictions[0]:',predictions[0])
#print('np.sum(predictions[0])',np.sum(predictions[0]))
#print('np.argmax(predictions[0])',np.argmax(predictions[0]))

 

你可能感兴趣的:(Keras,AI,深度学习)