第一种
import numpy as np
import tensorflow as tf
mnist=tf.keras.datasets.mnist
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("TkAgg")
(x_train, y_train), (x_test, y_test)=mnist.load_data()
x_train=x_train.reshape(60000,784).astype(np.float32)/255
x_test=x_test.reshape(10000,784).astype(np.float32)/255
# 变形 归一化
y_train=tf.keras.utils.to_categorical(y_train,10)
y_test=tf.keras.utils.to_categorical(y_test,10)#变成独热编码
# 1
input=tf.keras.layers.Input(shape=(None,10,784),name="input")
xx=tf.keras.layers.Dense(10,activation="relu",kernel_initializer="he_normal")(input)
bn=tf.keras.layers.BatchNormalization()(xx)
x=tf.keras.layers.Dense(15,activation="relu",kernel_initializer="he_normal")(bn)
out=tf.keras.layers.Dense(10,activation="softmax",kernel_initializer="he_normal")(x)
model=tf.keras.Model(inputs=input,outputs=out)
model.compile(optimizer="adam",loss="categorical_crossentropy",metrics=["categorical_accuracy"])
history=model.fit(x_train,y_train,batch_size=10,epochs=5,verbose=1,validation_data=(x_test,y_test))
loss=history.history["loss"]
categorical_accuracy=history.history["categorical_accuracy"]
val_loss=history.history["val_loss"]
val_categorical_accuracy=history.history["val_categorical_accuracy"]
plt.figure()
plt.plot(range(len(loss)),loss,label="loss")
plt.plot(range(len(loss)),categorical_accuracy,label="categorical_accuracy")
plt.plot(range(len(loss)),val_loss,label="val_loss")
plt.plot(range(len(loss)),val_categorical_accuracy,label="val_categorical_accuracy")
plt.legend()
plt.show()
第二种
import numpy as np
import tensorflow as tf
mnist=tf.keras.datasets.mnist
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("TkAgg")
(x_train, y_train), (x_test, y_test)=mnist.load_data()
x_train=x_train.reshape(60000,784).astype(np.float32)/255
x_test=x_test.reshape(10000,784).astype(np.float32)/255
# 变形 归一化
y_train=tf.keras.utils.to_categorical(y_train,10)
y_test=tf.keras.utils.to_categorical(y_test,10)
# 1
model=tf.keras.Sequential([
tf.keras.layers.Dense(10,activation="relu",kernel_initializer="he_normal"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(15,activation="relu",kernel_initializer="he_normal"),
tf.keras.layers.Dense(10,activation="softmax",kernel_initializer="he_normal"),
])
model.compile(optimizer="adam",loss="categorical_crossentropy",metrics=["categorical_accuracy"])
history=model.fit(x_train,y_train,batch_size=10,epochs=5,verbose=1,validation_data=(x_test,y_test))
loss=history.history["loss"]
categorical_accuracy=history.history["categorical_accuracy"]
val_loss=history.history["val_loss"]
val_categorical_accuracy=history.history["val_categorical_accuracy"]
plt.figure()
plt.plot(range(len(loss)),loss,label="loss")
plt.plot(range(len(loss)),categorical_accuracy,label="categorical_accuracy")
plt.plot(range(len(loss)),val_loss,label="val_loss")
plt.plot(range(len(loss)),val_categorical_accuracy,label="val_categorical_accuracy")
plt.legend()
plt.show()
第三种
import numpy as np
import tensorflow as tf
mnist=tf.keras.datasets.mnist
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("TkAgg")
(x_train, y_train), (x_test, y_test)=mnist.load_data()
x_train=x_train.reshape(60000,784).astype(np.float32)/255
x_test=x_test.reshape(10000,784).astype(np.float32)/255
# 变形 归一化
y_train=tf.keras.utils.to_categorical(y_train,10)
y_test=tf.keras.utils.to_categorical(y_test,10)#变成独热编码
# 1
input=tf.keras.layers.Input(shape=(None,10,784),name="input")
xx=tf.keras.layers.Dense(10,activation="relu",kernel_initializer="he_normal")(input)
bn=tf.keras.layers.BatchNormalization()(xx)
x=tf.keras.layers.Dense(15,activation="relu",kernel_initializer="he_normal")(bn)
out=tf.keras.layers.Dense(10,activation="softmax",kernel_initializer="he_normal")(x)
model=tf.keras.Model(inputs=input,outputs=out)
model.compile(optimizer="adam",loss="categorical_crossentropy",metrics=["categorical_accuracy"])
history=model.fit(x_train,y_train,batch_size=10,epochs=5,verbose=1,validation_data=(x_test,y_test))
loss=history.history["loss"]
categorical_accuracy=history.history["categorical_accuracy"]
val_loss=history.history["val_loss"]
val_categorical_accuracy=history.history["val_categorical_accuracy"]
plt.figure()
plt.plot(range(len(loss)),loss,label="loss")
plt.plot(range(len(loss)),categorical_accuracy,label="categorical_accuracy")
plt.plot(range(len(loss)),val_loss,label="val_loss")
plt.plot(range(len(loss)),val_categorical_accuracy,label="val_categorical_accuracy")
plt.legend()
plt.show()
第四种
import numpy as np
import tensorflow as tf
mnist=tf.keras.datasets.mnist
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("TkAgg")
(x_train, y_train), (x_test, y_test)=mnist.load_data()
x_train=x_train.reshape(60000,784).astype(np.float32)/255
x_test=x_test.reshape(10000,784).astype(np.float32)/255
# 变形 归一化
y_train=tf.keras.utils.to_categorical(y_train,10)
y_test=tf.keras.utils.to_categorical(y_test,10)
# 1
class NumberModel(tf.keras.Model):
def __init__(self):
super(NumberModel, self).__init__()
self.layer1=tf.keras.layers.Dense(10,activation="relu",kernel_initializer="he_normal")
self.layer2 = tf.keras.layers.Dense(15, activation="relu", kernel_initializer="he_normal")
self.layer3 = tf.keras.layers.Dense(10, activation="softmax", kernel_initializer="he_normal")
def call(self, inputs):
x=self.layer1(inputs)
x = self.layer2(x)
out = self.layer3(x)
return out
model=NumberModel()
model.compile(optimizer="adam",loss="categorical_crossentropy",metrics=["categorical_accuracy"])
history=model.fit(x_train,y_train,batch_size=10,epochs=5,verbose=1,validation_data=(x_test,y_test))
loss=history.history["loss"]
categorical_accuracy=history.history["categorical_accuracy"]
val_loss=history.history["val_loss"]
val_categorical_accuracy=history.history["val_categorical_accuracy"]
plt.figure()
plt.plot(range(len(loss)),loss,label="loss")
plt.plot(range(len(loss)),categorical_accuracy,label="categorical_accuracy")
plt.plot(range(len(loss)),val_loss,label="val_loss")
plt.plot(range(len(loss)),val_categorical_accuracy,label="val_categorical_accuracy")
plt.legend()
plt.show()
第五种
import numpy as np
import tensorflow as tf
mnist=tf.keras.datasets.mnist
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("TkAgg")
(x_train, y_train), (x_test, y_test)=mnist.load_data()
x_train=x_train.reshape(60000,784).astype(np.float32)/255
x_test=x_test.reshape(10000,784).astype(np.float32)/255
# 变形 归一化
y_train=tf.keras.utils.to_categorical(y_train,10)
y_test=tf.keras.utils.to_categorical(y_test,10)
# 1
def createModel():
model=tf.keras.Sequential()
model.add(tf.keras.layers.Dense(10,activation="relu",kernel_initializer="he_normal",kernel_regularizer="l2"))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dense(15,activation="relu",kernel_initializer="he_normal"))
model.add(tf.keras.layers.BatchNormalization())
return model
model=tf.keras.Sequential()
model.add(createModel())
model.add(tf.keras.layers.Dense(10, activation="softmax", kernel_initializer="he_normal"))
model.compile(optimizer="adam",loss="categorical_crossentropy",metrics=["categorical_accuracy"])
history=model.fit(x_train,y_train,batch_size=10,epochs=5,verbose=1,validation_data=(x_test,y_test))
loss=history.history["loss"]
categorical_accuracy=history.history["categorical_accuracy"]
val_loss=history.history["val_loss"]
val_categorical_accuracy=history.history["val_categorical_accuracy"]
plt.figure()
plt.plot(range(len(loss)),loss,label="loss")
plt.plot(range(len(loss)),categorical_accuracy,label="categorical_accuracy")
plt.plot(range(len(loss)),val_loss,label="val_loss")
plt.plot(range(len(loss)),val_categorical_accuracy,label="val_categorical_accuracy")
plt.legend()
plt.show()