TensorFlow

文章目录

  • 1. Sequential Model
  • 2. Functional API
  • 3. Model Saving




1. Sequential Model

不做one-hot的话,损失选择sparse_categorical_crossentropy。

import tensorflow as tf
from tensorflow.keras.layers import Dense, Dropout
from sklearn import datasets
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
import numpy as np

# data
digits = datasets.load_digits()
digits_X = digits.data
digits_y = digits.target
X_train, X_test, y_train, y_test = train_test_split(digits_X, digits_y, test_size=0.3, random_state=0)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

# sequential model
model = tf.keras.Sequential()
model.add(Dense(64, input_shape=(64,), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))

model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=10, batch_size=5, validation_data=(X_test, y_test))
loss, acc = model.evaluate(X_test, y_test)

# re-onehot
predictions = model.predict(X_test)
print(np.argmax(predictions[0]), np.argmax(y_test[0]))

# plot
plt.plot(history.epoch, history.history.get('loss'), label='loss')
plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss')
plt.plot(history.epoch, history.history.get('accuracy'), label='accuracy')
plt.plot(history.epoch, history.history.get('val_accuracy'), label='val_accuracy')
plt.legend()
plt.show()




2. Functional API

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, Dropout
from sklearn import datasets
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
import numpy as np

# data
digits = datasets.load_digits()
digits_X = digits.data
digits_y = digits.target
X_train, X_test, y_train, y_test = train_test_split(digits_X, digits_y, test_size=0.3, random_state=0)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

# sequential model
input = keras.Input(shape=(64, ))
dense = Dense(64, activation='relu')(input)
dropout = Dropout(0.5)(dense)
output = Dense(10, activation='softmax')(dropout)
model = keras.Model(input, output)

model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=10, batch_size=5, validation_data=(X_test, y_test))
loss, acc = model.evaluate(X_test, y_test)

# re-onehot
predictions = model.predict(X_test)
print(np.argmax(predictions[0]), np.argmax(y_test[0]))

# plot
plt.plot(history.epoch, history.history.get('loss'), label='loss')
plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss')
plt.plot(history.epoch, history.history.get('accuracy'), label='accuracy')
plt.plot(history.epoch, history.history.get('val_accuracy'), label='val_accuracy')
plt.legend()
plt.show()




3. Model Saving

from tensorflow import keras
from tensorflow.keras.layers import Dense, Dropout
from sklearn import datasets
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical

# data
digits = datasets.load_digits()
digits_X = digits.data
digits_y = digits.target
X_train, X_test, y_train, y_test = train_test_split(digits_X, digits_y, test_size=0.3, random_state=0)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

# sequential model
input = keras.Input(shape=(64, ))
dense = Dense(64, activation='relu')(input)
dropout = Dropout(0.5)(dense)
output = Dense(10, activation='softmax')(dropout)
model = keras.Model(input, output)

model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=10, batch_size=5, validation_data=(X_test, y_test))

# model saving
model.save('my_model.h5')
# model loading
model = keras.models.load_model('my_model.h5')
print(model.evaluate(X_test, y_test))

你可能感兴趣的:(Python)