import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Input, LSTM, Conv2D, Flatten, Reshape
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, KernelPCA
import matplotlib as plt
import tensorflow as tf
# from keras import backend as K
# from keras.layers import Activation
# from keras.utils.generic_utils import get_custom_objects
import matplotlib.pyplot as plt
# k=45
dataset = pd.read_csv(' ')
X = np.array(dataset.values[2:, :-1]) #导入数据,维度为1024
X= (X-X.min(axis=0)) / (X.max(axis=0)-X.min(axis=0)) #数据归一化处理
# pca=KernelPCA(n_components=k )
# # pca = PCA(n_components=k ) #降到k维
# pca.fit(X) #训练
# newX=pca.fit_transform(X) #降维后的数据
# PCA(copy=True, n_components=2, whiten=False)
# print(pca.explained_variance_ratio_) #输出贡献率
# print(newX.shape[1])
labels1 = dataset.values[2:, -1]
# images = dataset.values[:, 2:1026]
labels2 = pd.get_dummies(labels1) # 转化one-hot格式
labels = labels2.values # DataFrame转化为array
X_train, X_test, y_train, y_test = train_test_split(X, labels, test_size=0.2) # 划分训练集和测试集
print(X.shape[0]) #10578
def create_model(learning_rate, num_dense_layers=4,num_dense_nodes=256):
# tf.nn.swish(x)
model=Sequential()
model.add(Dense(64,input_shape=(X.shape[1],)))
for i in range(num_dense_layers):
name = 'layer_dense_{0}'.format(i + 1)
# add dense layer
model.add(Dense(num_dense_nodes,activation='relu',name=name))
# use softmax-activation for classification.
# model.add(LSTM(16,activation='relu',return_sequences=False))
model.add(Dense(6, activation='softmax', name='output'))
# Use the Adam method for training the network.
#optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# optimizer = Adam(lr=learning_rate)
# compile the model so it can be trained.
model.compile(optimizer=tf.keras.optimizers.RMSprop(learning_rate),loss='categorical_crossentropy',metrics=['accuracy'])
model.summary() #打印出模型
return model
def fitness(learning_rate, num_dense_layers, num_dense_nodes,epochs=20):
""" Hyper-parameters: learning_rate: Learning-rate for the optimizer. num_dense_layers: Number of dense layers. num_dense_nodes: Number of nodes in each dense layer. activation: Activation function for all layers. """
# Print the hyper-parameters.
print('learning rate: {0:.1e}'.format(learning_rate))
print('num_dense_layers:', num_dense_layers)
print('num_dense_nodes:', num_dense_nodes)
# Create the neural network with these hyper-parameters.
model = create_model(learning_rate=learning_rate,
num_dense_layers=num_dense_layers,
num_dense_nodes=num_dense_nodes,
)
# Use Keras to train the model.
history = model.fit(x=X_train,
y=y_train,
epochs=100,
batch_size=64,
validation_data=(X_test, y_test),
callbacks=None)
accuracy = history.history['val_accuracy'][-1]
# plt.plot(history.history['loss'], label='train')
# plt.plot(history.history['val_loss'], label='test')
plt.plot(history.history['val_accuracy'],label='test')
plt.plot(history.history['accuracy'], label='train')
plt.title('bpnn', fontsize='12')
plt.ylabel('L', fontsize='20')
plt.xlabel('epoch', fontsize='20')
plt.legend()
plt.show()
print("Accuracy: {0:.2%}".format(accuracy))
return history
history = fitness(0.0005, 5, 256) #设置学习率,隐藏层数及其节点
# plt.plot(history.history['acc'], 'r', label='Accuracy of training data')
# plt.plot(history.history['val_acc'], 'b', label='Accuracy of validation data')
# # plt.plot(history.history['loss'], 'r--', label='Loss of training data')
# # plt.plot(history.history['val_loss'], 'b--', label='Loss of validation data')
# plt.title('Model PCNN-GRU')
# plt.ylabel('Accuracy and Loss')
# plt.xlabel('Training Epoch')
# plt.legend()
# plt.show()