# https://stackoverflow.com/questions/70413900/how-to-get-conv2d-kernel-values-in-tensorflow-2
# 在 Tensorflow 2 中获取 Conv2D 内核值
"""
import tensorflow as tf
input_shape = (4, 28, 28, 3)
x = tf.random.normal(input_shape)
model = tf.keras.layers.Conv2D(2, 3, activation='relu', input_shape=input_shape[1:])
y = model(x)
print(model.kernel.conv1 )
print(model.kernel )"""
#todo 可以设立一个特殊的随机噪声 tensor= batch,w,h,channel,用卷积核过滤一次,高亮点代表随机噪声形状与卷积核相同。 得到卷积核的形状, 再展开为w,h 用plot打印出来
# todo model(images, training=True) ?? 没有看到training=True 在哪里定义
# todo 显示每批次耗时,打印卷积核形状, 也许 keras 没有卷积核的输出方法?? 需要用更底层的编程方法
# 用 callbacks
# 用tensorboard
# todo 如何把数据集做成一个文件,而不是一个文件夹?
# from https://tensorflow.google.cn/tutorials/quickstart/advanced
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
from matplotlib import pyplot as plt # <<<<<<<<<<<<<<<<<<<<<<<<<<< 画图准备
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Add a channels dimension
x_train = x_train[..., tf.newaxis].astype("float32")
x_test = x_test[..., tf.newaxis].astype("float32")
CNN_kernal=[] # <<<<<<<<<<<<<<<<<<<<<<<<<<< 画图准备
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000).batch(32) # <<<<<<<<<<<<<<<<<<<<<<< batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32) # <<<<<<<<<<<<<<<<<<<<<<< batch(32)
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(16, 3, activation='relu') # <<<<<<<<<<原程序中卷积核数量 = 32个
self.flatten = Flatten()
self.d1 = Dense(4, activation='relu') # <<<<<<<<<<原程序中全连接层神经元数量 = 128个
self.d2 = Dense(10)
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
# CNN_kernel=self.kernel #
return self.d2(x)
# Create an instance of the model
model = MyModel()
'''
压缩卷积核,压缩全连接层神经元
--------------------------------------------
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(4, 3, activation='relu') # <<<<<<<<<< 卷积核数量 = 4个
self.flatten = Flatten()
self.d1 = Dense(32, activation='relu') # <<<<<<<<<< 全连接层神经元数量 = 32个
self.d2 = Dense(10)
可以达到的准确率
Epoch 1, Loss: 0.23707012832164764, Accuracy: 93.20166778564453, Test Loss: 0.11074736714363098, Test Accuracy: 96.72000122070312
Epoch 2, Loss: 0.09355325251817703, Accuracy: 97.1500015258789, Test Loss: 0.07059688121080399, Test Accuracy: 97.66999816894531
Epoch 3, Loss: 0.06814359873533249, Accuracy: 97.97833251953125, Test Loss: 0.07042749226093292, Test Accuracy: 97.75
Epoch 4, Loss: 0.05377012863755226, Accuracy: 98.2933349609375, Test Loss: 0.07100935280323029, Test Accuracy: 97.70999908447266
Epoch 5, Loss: 0.04366404563188553, Accuracy: 98.62166595458984, Test Loss: 0.07366002351045609, Test Accuracy: 97.61000061035156
--------------------------------------------
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(2, 3, activation='relu') # <<<<<<<<<< 卷积核数量 = 2个
self.flatten = Flatten()
self.d1 = Dense(16, activation='relu') # <<<<<<<<<< 全连接层神经元数量 = 16个
self.d2 = Dense(10)
可以达到的准确率
Epoch 1, Loss: 0.4003552496433258, Accuracy: 88.25, Test Loss: 0.2517136037349701, Test Accuracy: 93.0
Epoch 2, Loss: 0.2139425277709961, Accuracy: 93.97166442871094, Test Loss: 0.1797635555267334, Test Accuracy: 95.02999877929688
Epoch 3, Loss: 0.16504262387752533, Accuracy: 95.2316665649414, Test Loss: 0.15187139809131622, Test Accuracy: 95.69000244140625
Epoch 4, Loss: 0.13611823320388794, Accuracy: 96.02000427246094, Test Loss: 0.13357087969779968, Test Accuracy: 96.13999938964844
Epoch 5, Loss: 0.11914531141519547, Accuracy: 96.44332885742188, Test Loss: 0.1219385638833046, Test Accuracy: 96.44000244140625
--------------------------------------------
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(16, 3, activation='relu') # <<<<<<<<<<原程序中卷积核数量 = 16个
self.flatten = Flatten()
self.d1 = Dense(16, activation='relu') # <<<<<<<<<<原程序中全连接层神经元数量 = 16个
self.d2 = Dense(10)
TensorFlow version: 2.3.0
Epoch 1, Loss: 0.2446216195821762, Accuracy: 92.91166687011719, Test Loss: 0.1149502843618393, Test Accuracy: 96.36000061035156
Epoch 2, Loss: 0.09467839449644089, Accuracy: 97.23332977294922, Test Loss: 0.07694929093122482, Test Accuracy: 97.63999938964844
Epoch 3, Loss: 0.06800872832536697, Accuracy: 97.96333312988281, Test Loss: 0.07287246733903885, Test Accuracy: 97.53999328613281
Epoch 4, Loss: 0.05138058960437775, Accuracy: 98.46833038330078, Test Loss: 0.06521112471818924, Test Accuracy: 97.89999389648438
Epoch 5, Loss: 0.03978590667247772, Accuracy: 98.7316665649414, Test Loss: 0.06942927837371826, Test Accuracy: 97.80999755859375
--------------------------------------------
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(8, 3, activation='relu') # <<<<<<<<<<原程序中卷积核数量 = 8个
self.flatten = Flatten()
self.d1 = Dense(8, activation='relu') # <<<<<<<<<<原程序中全连接层神经元数量 = 8个
self.d2 = Dense(10)
Epoch 1, Loss: 0.3922784924507141, Accuracy: 88.51167297363281, Test Loss: 0.20396079123020172, Test Accuracy: 94.19999694824219
Epoch 2, Loss: 0.15926599502563477, Accuracy: 95.5, Test Loss: 0.12827329337596893, Test Accuracy: 96.30000305175781
Epoch 3, Loss: 0.11262572556734085, Accuracy: 96.74500274658203, Test Loss: 0.10609399527311325, Test Accuracy: 96.88999938964844
Epoch 4, Loss: 0.09163926541805267, Accuracy: 97.27333068847656, Test Loss: 0.09585564583539963, Test Accuracy: 97.1199951171875
Epoch 5, Loss: 0.07721196115016937, Accuracy: 97.76333618164062, Test Loss: 0.09701179713010788, Test Accuracy: 97.19000244140625
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(16, 3, activation='relu') # <<<<<<<<<<原程序中卷积核数量 = 16个
self.flatten = Flatten()
self.d1 = Dense(4, activation='relu') # <<<<<<<<<<原程序中全连接层神经元数量 = 4个
self.d2 = Dense(10)
Epoch 1, Loss: 0.8463388085365295, Accuracy: 72.0183334350586, Test Loss: 0.49747708439826965, Test Accuracy: 86.02999877929688
Epoch 2, Loss: 0.4489363729953766, Accuracy: 87.56333923339844, Test Loss: 0.4068147838115692, Test Accuracy: 88.87000274658203
Epoch 3, Loss: 0.38576701283454895, Accuracy: 89.4000015258789, Test Loss: 0.3815392553806305, Test Accuracy: 89.7800064086914
Epoch 4, Loss: 0.34514060616493225, Accuracy: 90.56166076660156, Test Loss: 0.343354195356369, Test Accuracy: 90.90999603271484
Epoch 5, Loss: 0.3147679567337036, Accuracy: 91.45833587646484, Test Loss: 0.31742098927497864, Test Accuracy: 91.75
Epoch 6, Loss: 0.2888678312301636, Accuracy: 92.05999755859375, Test Loss: 0.29518914222717285, Test Accuracy: 92.40999603271484
Epoch 7, Loss: 0.2697764039039612, Accuracy: 92.64666748046875, Test Loss: 0.291316956281662, Test Accuracy: 92.18000030517578
Epoch 8, Loss: 0.2539854943752289, Accuracy: 93.01666259765625, Test Loss: 0.27717989683151245, Test Accuracy: 92.73999786376953
Epoch 9, Loss: 0.24011439085006714, Accuracy: 93.46500396728516, Test Loss: 0.27109673619270325, Test Accuracy: 92.65999603271484
Epoch 10, Loss: 0.22615042328834534, Accuracy: 93.69833374023438, Test Loss: 0.26570791006088257, Test Accuracy: 92.73999786376953
Epoch 11, Loss: 0.21567215025424957, Accuracy: 93.98666381835938, Test Loss: 0.26256102323532104, Test Accuracy: 93.08999633789062
Epoch 12, Loss: 0.20541338622570038, Accuracy: 94.34500122070312, Test Loss: 0.2572975754737854, Test Accuracy: 93.11000061035156
Epoch 13, Loss: 0.19656269252300262, Accuracy: 94.538330078125, Test Loss: 0.250868022441864, Test Accuracy: 93.08000183105469
Epoch 14, Loss: 0.1892404407262802, Accuracy: 94.73333740234375, Test Loss: 0.26545971632003784, Test Accuracy: 93.02999877929688
Epoch 15, Loss: 0.18099074065685272, Accuracy: 95.00833129882812, Test Loss: 0.24252015352249146, Test Accuracy: 93.5999984741211
Epoch 16, Loss: 0.17358624935150146, Accuracy: 95.17499542236328, Test Loss: 0.23873189091682434, Test Accuracy: 93.37000274658203
Epoch 17, Loss: 0.16649213433265686, Accuracy: 95.29167175292969, Test Loss: 0.2428164929151535, Test Accuracy: 93.4000015258789
Epoch 18, Loss: 0.16083897650241852, Accuracy: 95.48332977294922, Test Loss: 0.2358662188053131, Test Accuracy: 93.87999725341797
Epoch 19, Loss: 0.1541549265384674, Accuracy: 95.62666320800781, Test Loss: 0.23503205180168152, Test Accuracy: 93.55999755859375
Epoch 20, Loss: 0.14946123957633972, Accuracy: 95.76000213623047, Test Loss: 0.2402830719947815, Test Accuracy: 93.68000030517578
--------------------------------------------
'''
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
# @tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=True)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
# @tf.function
def test_step(images, labels):
# training=False is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(images, training=False)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
# print("self.conv1", CNN_kernal)
# print("model.kernel.shape=========", model.kernel.shape) #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<,
EPOCHS = 1 # epoch <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
for epoch in range(EPOCHS):
# Reset the metrics at the start of the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
for images, labels in train_ds:
train_step(images, labels)
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
print(
f'Epoch {epoch + 1}, '
f'Loss: {train_loss.result()}, '
f'Accuracy: {train_accuracy.result() * 100}, '
f'Test Loss: {test_loss.result()}, '
f'Test Accuracy: {test_accuracy.result() * 100}'
)
print("model.conv1.kernel=========", model.conv1.kernel)
print("model.conv1.kernel.shape=========", model.conv1.kernel.shape)
如何读取tensorflow 2 model里的卷积核
如何读取tensorflow 2 model里的卷积核
如何读取tensorflow 2 model里的卷积核