tensorflow 2.x Callbacks

Callbacks

We can customize callbacks to
i) control the printer, e.g., print keys of logs, loss values,
2) achieve early stoping,
3) update learning rate in different epoches.

code: refer to: https://www.tensorflow.org/guide/keras/custom_callback?

If you need a stepladder: PSIPHON (for windows)


import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers

import numpy as np


# Define the Keras model to add callbacks to
def get_model():
    model = keras.Sequential()
    model.add(keras.layers.Dense(1, input_dim=784))
    model.compile(
    optimizer=keras.optimizers.RMSprop(learning_rate=0.1),
    loss="mean_squared_error",
    metrics=["mean_absolute_error"],
    )
    return model

def loaddata():
    # Load example MNIST data and pre-process it
    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
    x_train = x_train.reshape(-1, 784).astype("float32") / 255.0
    x_test = x_test.reshape(-1, 784).astype("float32") / 255.0
    # Limit the data to 1000 samples
    x_train = x_train[:1000]
    y_train = y_train[:1000]
    x_test = x_test[:1000]
    y_test = y_test[:1000]
    return x_train, y_train, x_test, y_test



class CustomCallback(keras.callbacks.Callback):
    """
    Monitor keys of logs
    """
    def on_train_begin(self, logs=None):
        keys = list(logs.keys())
        print("Starting training; got log keys: {}".format(keys))
    def on_train_end(self, logs=None):
        keys = list(logs.keys())
        print("Stop training; got log keys: {}".format(keys))

    def on_epoch_begin(self, epoch, logs=None):
        keys = list(logs.keys())
        print("Start epoch {} of training; got log keys: {}".format(epoch, keys))
    def on_epoch_end(self, epoch, logs=None):
        keys = list(logs.keys())
        print("End epoch {} of training; got log keys: {}".format(epoch, keys))

    def on_test_begin(self, logs=None):
        keys = list(logs.keys())
        print("Start testing; got log keys: {}".format(keys))
    def on_test_end(self, logs=None):
        keys = list(logs.keys())
        print("Stop testing; got log keys: {}".format(keys))
    def on_predict_begin(self, logs=None):
        keys = list(logs.keys())
        print("Start predicting; got log keys: {}".format(keys))
    def on_predict_end(self, logs=None):
        keys = list(logs.keys())
        print("Stop predicting; got log keys: {}".format(keys))

    def on_train_batch_begin(self, batch, logs=None):
        keys = list(logs.keys())
        print("...Training: start of batch {}; got log keys: {}".format(batch, keys))
    def on_train_batch_end(self, batch, logs=None):
        keys = list(logs.keys())
        print("...Training: end of batch {}; got log keys: {}".format(batch, keys))
    def on_test_batch_begin(self, batch, logs=None):
        keys = list(logs.keys())
        print("...Evaluating: start of batch {}; got log keys: {}".format(batch, keys))
    def on_test_batch_end(self, batch, logs=None):
        keys = list(logs.keys())
        print("...Evaluating: end of batch {}; got log keys: {}".format(batch, keys))
    def on_predict_batch_begin(self, batch, logs=None):
        keys = list(logs.keys())
        print("...Predicting: start of batch {}; got log keys: {}".format(batch, keys))
    def on_predict_batch_end(self, batch, logs=None):
        keys = list(logs.keys())
        print("...Predicting: end of batch {}; got log keys: {}".format(batch, keys))

class LossAndErrorPrintingCallback(keras.callbacks.Callback):
    """
    Print loss and error at the begining/end of each batch/epoch
    """
    def on_train_batch_end(self, batch, logs=None):
        print("For batch {}, loss is {:7.2f}.".format(batch, logs["loss"]))
    def on_test_batch_end(self, batch, logs=None):
        print("For batch {}, loss is {:7.2f}.".format(batch, logs["loss"]))

    def on_epoch_end(self, epoch, logs=None):
        print(
            "The average loss for epoch {} is {:7.2f} "
            "and mean absolute error is {:7.2f}.".format(
                epoch, logs["loss"], logs["mean_absolute_error"]
            )
        )

class EarlyStoppingAtMinLoss(keras.callbacks.Callback):
    """Stop training when the loss is at its min, i.e. the loss stops decreasing.
    Arguments:
    patience: Number of epochs to wait after min has been hit. After this
    number of no improvement, training stops.
    """
    def __init__(self, patience=20):
        super(EarlyStoppingAtMinLoss, self).__init__()
        self.patience = patience
        # best_weights to store the weights at which the minimum loss occurs.
        self.best_weights = None
    def on_train_begin(self, logs=None):
        # The number of epoch it has waited when loss is no longer minimum.
        self.wait = 0
        # The epoch the training stops at.
        self.stopped_epoch = 0
        # Initialize the best as infinity.
        self.best = np.Inf
    def on_epoch_end(self, epoch, logs=None):
        current = logs.get("loss")
        if np.less(current, self.best):
            self.best = current
            self.wait = 0
            # Record the best weights if current results is better (less).
            self.best_weights = self.model.get_weights()
        else:
            self.wait += 1
            if self.wait >= self.patience:
                self.stopped_epoch = epoch
                self.model.stop_training = True
                print("Restoring model weights from the end of the best epoch.")
                self.model.set_weights(self.best_weights)
    def on_train_end(self, logs=None):
        if self.stopped_epoch > 0:
            print("Epoch %05d: early stopping" % (self.stopped_epoch + 1))


class CustomLearningRateScheduler(keras.callbacks.Callback):
    """Learning rate scheduler which sets the learning rate according to schedule.
    Arguments:
    schedule: a function that takes an epoch index
    (integer, indexed from 0) and current learning rate
    as inputs and returns a new learning rate as output (float).
    """
    def __init__(self, schedule):
        super(CustomLearningRateScheduler, self).__init__()
        self.schedule = schedule
    def on_epoch_begin(self, epoch, logs=None):
        if not hasattr(self.model.optimizer, "lr"):
            raise ValueError('Optimizer must have a "lr" attribute.')
        # Get the current learning rate from model's optimizer.
        lr = float(tf.keras.backend.get_value(self.model.optimizer.learning_rate))
        # Call schedule function to get the scheduled learning rate.
        scheduled_lr = self.schedule(epoch, lr)
        # Set the value back to the optimizer before this epoch starts
        tf.keras.backend.set_value(self.model.optimizer.lr, scheduled_lr)
        print("\nEpoch %05d: Learning rate is %6.4f." % (epoch, scheduled_lr))

LR_SCHEDULE = [
        # (epoch to start, learning rate) tuples
        (3, 0.05),
        (6, 0.01),
        (9, 0.005),
        (12, 0.001),
    ]

def lr_schedule(epoch, lr):
    """Helper function to retrieve the scheduled learning rate based on epoch."""
    if epoch < LR_SCHEDULE[0][0] or epoch > LR_SCHEDULE[-1][0]:
        return lr
    for i in range(len(LR_SCHEDULE)):
        if epoch == LR_SCHEDULE[i][0]:
            return LR_SCHEDULE[i][1]
    return lr

def test():
    model = get_model()
    x_train, y_train, x_test, y_test = loaddata()

    #############################
    # model.fit(
    #     x_train,
    #     y_train,
    #     batch_size=128,
    #     epochs=1,
    #     verbose=0,
    #     validation_split=0.5,
    #     callbacks=[CustomCallback()],
    # )
    # res = model.evaluate(
    #     x_test, y_test, batch_size=128, verbose=0, callbacks=[CustomCallback()]
    # )
    # res = model.predict(x_test, batch_size=128, callbacks=[CustomCallback()])


    #############################
    # model.fit(
    #     x_train,
    #     y_train,
    #     batch_size=128,
    #     epochs=2,
    #     verbose=0,
    #     callbacks=[LossAndErrorPrintingCallback()],
    # )

    #############################
    # model.fit(
    #     x_train,
    #     y_train,
    #     batch_size=65,
    #     steps_per_epoch=5,
    #     epochs=30,
    #     verbose=0,
    #     callbacks=[LossAndErrorPrintingCallback(), EarlyStoppingAtMinLoss()],
    # )

    #############################
    model.fit(
        x_train,
        y_train,
        batch_size=64,
        steps_per_epoch=5,
        epochs=15,
        verbose=0,
        callbacks=[
            LossAndErrorPrintingCallback(),
            CustomLearningRateScheduler(lr_schedule),
        ],
    )


if __name__ == '__main__':
    test()

你可能感兴趣的:(Python)