tensorflow-chp06

#coding:utf-8
import tensorflow as tf

def pre_process(x,y):
    x = tf.cast(x,tf.float32)/255.
    x = tf.reshape(x,[-1,28*28])
    y = tf.cast(y,tf.int32)
    y = tf.one_hot(y, depth=10)
    return x,y

def load_dataset():
    (x_train,y_train),(x_test,y_test) = tf.keras.datasets.mnist.load_data()
    train_dataset = tf.data.Dataset.from_tensor_slices((x_train,y_train))
    test_dataset = tf.data.Dataset.from_tensor_slices((x_test,y_test))
    train_dataset = train_dataset.shuffle(10000)
    train_dataset = train_dataset.batch(128)
    train_dataset = train_dataset.map(pre_process)
    test_dataset = test_dataset.map(pre_process)
    return (x_train,y_train,x_test,y_test,train_dataset,test_dataset)

if __name__ == '__main__':
    x_train,y_train,x_test,y_test,train_dataset,test_dataset = load_dataset()
    model = tf.keras.Sequential([
        tf.keras.layers.Dense(256,activation=tf.nn.relu),
        tf.keras.layers.Dense(128,activation=tf.nn.relu),
        tf.keras.layers.Dense(10,activation=None)
    ])
    model.build(input_shape=[None,28*28])
    optimizer = tf.keras.optimizers.SGD(0.001)

    for epoch in range(20):
        for step,(x,y) in enumerate(train_dataset):
            with tf.GradientTape() as tape:
                out = model(x)
                loss = tf.keras.losses.MSE(y,out)
                loss = tf.reduce_mean(loss)
                if step%100 == 0:
                    print("loss:"+str(loss.numpy()))
            grads = tape.gradient(loss,model.trainable_variables)
            optimizer.apply_gradients(zip(grads,model.trainable_variables))

    total_correct = 0
    total = y_test.shape[0]
    for x,y in test_dataset:
        out_test = model(x)
        pred = tf.argmax(out_test,axis=1)
        y_out = tf.argmax(y,axis=0)
        correct = tf.equal(pred,y_out)
        total_correct += tf.reduce_sum(tf.cast(correct,dtype=tf.int32)).numpy()
    print("acc="+str(total_correct/total))

你可能感兴趣的:(机器学习,tensorflow)