全连接层
#手写数字数据集
import numpy as np
np.random.seed(1337)
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Activation,Convolution2D,MaxPooling2D,Flatten
from keras.optimizers import RMSprop
from keras.optimizers import Adagrad
from keras.optimizers import Adam
(X_train,y_train),(X_test,y_test)=mnist.load_data()
#全连接层
print(X_train.shape)
X_train=X_train.reshape(X_train.shape[0],-1)/255
print(X_train.shape,'\n')
print(X_test.shape)
X_test=X_test.reshape(X_test.shape[0],-1)/255
print(X_test.shape,'\n')
print(y_train.shape)
y_train=np_utils.to_categorical(y_train,num_classes=10)
print(y_train.shape,'\n')
print(y_test.shape)
y_test=np_utils.to_categorical(y_test,num_classes=10)
print(y_test.shape,'\n')
model=Sequential([
Dense(32,input_dim=784),
Activation('relu'),
Dense(10),
Activation('softmax')
])
# rmsprop=RMSprop(lr=0.001,rho=0.9,epsilon=1e-08,decay=0.0)
adagrad=Adagrad(lr=0.1, epsilon=0.5, decay=0.0)
model.compile(
optimizer=adagrad,
loss='categorical_crossentropy',
metrics=['accuracy']
)
model.fit(X_train,y_train,epochs=6,batch_size=32)
loss,acc=model.evaluate(X_test,y_test)
print('loss',loss)
print('acc',acc)
CNN
#cnn层
import numpy as np
np.random.seed(1337)
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Activation,Convolution2D,MaxPooling2D,Flatten
from keras.optimizers import RMSprop
from keras.optimizers import Adagrad
from keras.optimizers import Adam
(x_train, y_train), (x_test, y_test) = mnist.load_data()
img_x, img_y = 28, 28
x_train = x_train.reshape(x_train.shape[0],img_x, img_y, 1)
x_test = x_test.reshape(x_test.shape[0], img_x, img_y, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 5. 定义模型结构
model = Sequential()
model.add(Convolution2D(32, kernel_size=(5,5), activation='relu', input_shape=(img_x, img_y, 1)))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Convolution2D(64, kernel_size=(5,5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(10, activation='softmax'))
# 6. 编译
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# 7. 训练
model.fit(x_train, y_train, batch_size=128, epochs=10)
loss,acc=model.evaluate(x_test,y_test)
print('loss',loss)
print('acc',acc)
RNN
#simplernn层
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import Callback
from keras.layers.recurrent import SimpleRNN, LSTM, GRU
from keras.optimizers import Adam
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
# 数据长度 一行有28个像素
input_size = 28
# 序列的长度
time_steps = 28
# 隐藏层block的个数
cell_size = 200
# 载入数据
(x_train,y_train), (x_test,y_test) = mnist.load_data()
# (60000,28,28)
x_train = x_train/255.0
x_test = x_test/255.0
# 换one hot格式
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)
# 创建模型
model = Sequential()
# 循环神经网络
model.add(SimpleRNN(
units = cell_size, # 输出
input_shape = (time_steps, input_size), # 输入
))
# 输出层
model.add(Dense(1000, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(10, activation='softmax'))
# 定义优化器
adam = Adam(lr=1e-4)
# 定义优化器、loss function, 训练过程中计算准确率
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
history = LossHistory()
# 训练模型
model.fit(x_train, y_train, batch_size=100, epochs=2, callbacks=[history])
print(len(history.losses))
# 评估模型
loss, accuracy = model.evaluate(x_test, y_test)
print('test loss', loss)
print('test accuracy', accuracy)