模型的训练

import tensorflow as tf
from tensorflow.keras import layers, optimizers, datasets, Sequential
import os
from resnet import resnet18


os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
tf.random.set_seed(2345)

# def preprocess(x,y):
#     # [0~1]
#     x = tf.cast(x, dtype=tf.float32) / 255.
#     y = tf.cast(y, dtype=tf.int32)
#     return x, y



(x, y), (x_test, y_test)= datasets.cifar100.load_data()

x = x/255
x_test = x_test/255

# y = tf.squeeze(y,axis=1)
# y_test= tf.squeeze(y_test,axis=1)
# print (x.shape, y.shape, x_test.shape,y_test.shape)
#
# batchsz=64
# train_db = tf.data.Dataset.from_tensor_slices((x,y))
# train_db = train_db.shuffle(1000).map(preprocess).batch(batchsz)
#
# test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test))
# test_db = test_db.map(preprocess).batch(batchsz)
#
# sample = next(iter(train_db))
# print ('sample:',sample[0].shape, sample[1].shape,
#        tf.reduce_min(sample[0]),tf.reduce_max(sample[0]))





def main():
    #[b, 32, 32, 3]

    model = resnet18()
    model.build(input_shape=(None, 32, 32, 3))
    # optimizer = optimizers.Adam(lr=1e-3)

    model.compile(loss="sparse_categorical_crossentropy",
                  optimizer = tf.keras.optimizers.Adam(0.001),
                  metrics=['acc']
                  )
    model.fit(x, y, epochs=1,verbose=1, validation_split=0.3, shuffle=100)
    model.evaluate(x_test,y_test, verbose=2)
    model.save('my_model.h5')


    # for epoch in range(50):
    #
    #     for step, (x,y) in enumerate(train_db):
    #
    #         with tf.GradientTape() as tape:
    #             logits = model(x)
    #             y_onehot = tf.one_hot(y, depth=100)
    #
    #             loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits =True)
    #             loss = tf.reduce_mean(loss)
    #
    #         grads = tape.gradient(loss, model.trainable_variables)
    #         optimizer.apply_gradients(zip(grads, model.trainable_variables))
    #
    #         if step %10 ==0:
    #             print ('epoch:',epoch,'step:',step, 'loss:',float(loss))
    #
    #
    #     total_num = 0
    #     total_correct=0
    #
    #     for x,y in test_db:
    #
    #         logits = model(x)
    #         prob = tf.nn.softmax(logits, axis=1)
    #         pred = tf.argmax(prob,axis=1)
    #         pred = tf.cast(pred, dtype=tf.int32)
    #
    #         correct = tf.cast(tf.equal(pred, y),dtype=tf.int32)
    #         correct = tf.reduce_sum(correct)
    #
    #         total_num += x.shape[0]
    #         total_correct += int(correct)
    #
    #     acc = total_correct / total_num
    #     print(epoch,'acc:',acc)








if __name__ =='__main__':
    main()

你可能感兴趣的:(模型的训练)