1.tensorflow保存和读取模型:tf.train.Saver() .save()
#保存模型需要用到save函数
save(
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix='meta',
write_meta_graph=True,
write_state=True
)
'''
sess: 保存模型要求必须有一个加载了计算图的会话,而且所有变量必须已被初始化。
save_path: 模型保存路径及保存名称
global_step: 如果提供的话,这个数字会添加到save_path后面,用于区分不同训练阶段的结果
'''
#例子
import tensorflow as tf
import numpy as np
import os
#用numpy产生数据
x_data = np.linspace(-1,1,300)[:, np.newaxis] #转置
noise = np.random.normal(0,0.05, x_data.shape)
y_data = np.square(x_data)-0.5+noise
#输入层
x_ph = tf.placeholder(tf.float32, [None, 1])
y_ph = tf.placeholder(tf.float32, [None, 1])
#隐藏层
w1 = tf.Variable(tf.random_normal([1,10]))
b1 = tf.Variable(tf.zeros([1,10])+0.1)
wx_plus_b1 = tf.matmul(x_ph, w1) + b1
hidden = tf.nn.relu(wx_plus_b1)
#输出层
w2 = tf.Variable(tf.random_normal([10,1]))
b2 = tf.Variable(tf.zeros([1,1])+0.1)
wx_plus_b2 = tf.matmul(hidden, w2) + b2
y = wx_plus_b2
#损失
loss = tf.reduce_mean(tf.reduce_sum(tf.square(y_ph-y),reduction_indices=[1]))
train_op = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#保存模型对象saver
saver = tf.train.Saver()
#判断模型保存路径是否存在,不存在就创建
if not os.path.exists('tmp/'):
os.mkdir('tmp/')
#初始化
with tf.Session() as sess:
if os.path.exists('tmp/checkpoint'): #判断模型是否存在
saver.restore(sess, 'tmp/model.ckpt') #存在就从模型中恢复变量
else:
init = tf.global_variables_initializer() #不存在就初始化变量
sess.run(init)
for i in range(1000):
_,loss_value = sess.run([train_op,loss], feed_dict={x_ph:x_data, y_ph:y_data})
if(i%50==0):
save_path = saver.save(sess, 'tmp/model.ckpt')
print("迭代次数:%d , 训练损失:%s"%(i, loss_value))
每调用一次保存操作会创建后3个数据文件并创建一个检查点(checkpoint)文件,简单理解就是权重等参数被保存到 .chkp.data 文件中,以字典的形式;图和元数据被保存到 .chkp.meta 文件中,可以被 tf.train.import_meta_graph 加载到当前默认的图。
2.keras保存和读取模型
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
# 载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
# (60000,28,28)
print('x_shape:',x_train.shape)
# (60000)
print('y_shape:',y_train.shape)
# (60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
# 换one hot格式
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)
# 创建模型,输入784个神经元,输出10个神经元
model = Sequential([
Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')
])
# 定义优化器
sgd = SGD(lr=0.2)
# 定义优化器,loss function,训练过程中计算准确率
model.compile(
optimizer = sgd,
loss = 'mse',
metrics=['accuracy'],
)
# 训练模型
model.fit(x_train,y_train,batch_size=64,epochs=5)
# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
# 保存模型
model.save('model.h5')
#载入初次训练的模型再训练
mport numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
from keras.models import load_model
# 载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
# (60000,28,28)
print('x_shape:',x_train.shape)
# (60000)
print('y_shape:',y_train.shape)
# (60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
# 换one hot格式
y_train = np_utils.to_categorical(y_train,num_classes=10)
y_test = np_utils.to_categorical(y_test,num_classes=10)
# 载入模型
model = load_model('model.h5')
# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
# 训练模型
model.fit(x_train,y_train,batch_size=64,epochs=2)
# 评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
# 保存参数,载入参数
model.save_weights('my_model_weights.h5')
model.load_weights('my_model_weights.h5')
# 保存网络结构,载入网络结构
from keras.models import model_from_json
json_string = model.to_json()
model = model_from_json(json_string)
print(json_string)
from __future__ import print_function
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
# 随机数种子,重复性设置
np.random.seed(1671)
# 网络结构和训练的参数
NB_EPOCH = 40
BATCH_SIZE = 128
VERBOSE = 1
NB_CLASSES = 10
OPTIMIZER = SGD()
N_HIDDEN = 128
VALIDATION_SPLIT = 0.2
RESHAPED = 784
# 加载数据
def load_data(path="mnist.npz"):
f = np.load(path)
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
f.close()
return (x_train, y_train), (x_test, y_test)
# 调用函数加载数据
(x_train, y_train), (x_test, y_test) = load_data()
# 数据预处理
(x_train, y_train), (x_test, y_test) = load_data()
# 数据变形、类型转换及归一化
x_train = x_train.reshape(60000, 784).astype('float32') / 255
x_test = x_test.reshape(10000, 784).astype('float32') / 255
# 打印消息
print('Training samples:', x_train.shape)
print('Testing samples:', x_test.shape)
# 将类别转换为one-hot编码
y_train = np_utils.to_categorical(y_train, NB_CLASSES)
y_test = np_utils.to_categorical(y_test, NB_CLASSES)
# 定义网络结构
model = Sequential()
model.add(Dense(N_HIDDEN, input_shape=(RESHAPED, )))
model.add(Activation('relu'))
model.add(Dense(N_HIDDEN))
model.add(Activation('relu'))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
# 编译模型
model.compile(loss='categorical_crossentropy', optimizer=OPTIMIZER, metrics=['accuracy'])
# 设置检查点
filepath = 'saved_models/weights-improvement-{epoch:02d}-{val_acc:.5f}.hdf5'
checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_acc', verbose=VERBOSE,
save_best_only=True, mode='max')
# 训练模型
history = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=NB_EPOCH, verbose=VERBOSE,
validation_split=VALIDATION_SPLIT, callbacks=[checkpoint])
# 评估模型
score = model.evaluate(x_test, y_test, verbose=VERBOSE)
print('Test score:', score[0])
print('Test accuracy:', score[1])
#这种方法可以保存最优的评估结果对应的模型权重,即最后一个文件
数据存储方式:json文件、csv文件、MySQL数据库、Redis数据库以及Mongdb数据库
python用print将数据输出到记事本txt/csv:
f=open('out.txt','w')
for i in range(0,5):
print(i,file=f)
f.close()
#f=open('results/csv/out.csv','w')
#for i in range(0,5):
# print(i,file=f)
#f.close()