最近才开始学机器学习,看Tensorflow实战Google看的真是头疼,全是老代码,都得自己手动解决。现在终于把书上手写体识别的一整套代码都改完了,2.1版本实测能跑,献给报错的同学!
没有注释看不懂的看这个链接! 不同的地方是为了能在2.1跑,以及文件路径要改成我自己的
Tensorflow学习–最佳的深度学习实践案例
要想自己手动升级代码到2.0版本的同学可以输入
tf_upgrade_v2 --infile ***.py --outfile ***_update.py
星号改成文件名,在控制台输入,控制台的位置要在你要升级的文件的位置,这个命令会把文件名.py修改后
,生成一个文件名_update.py的文件,就是修改完的文件。
然后打开修改完的开头加入下面代码,禁用动态就行了。
tf.compat.v1.disable_v2_behavior()
对于找不到tutorials的人,看这个链接!
Tensorflow 2.0 !!! No module named ‘tensorflow.examples.tutorials’
其实就是点进去,找到他的微云,下载tutorials,然后拷贝到\Lib\site-packages\tensorflow_core\examples
这个Lib的位置在哪,要看你怎么装的环境,比如我用的Anaconda3,还装了虚拟环境起名叫tsfgpu就在
Anaconda3\envs\tsfgpu\Lib\site-packages\tensorflow_core\examples
mnist数据集不知道放哪,不知道在哪的
如何导入MNIST数据集
下面是改完的全代码,你想跑的话把涉及到的路径改了就行
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.compat.v1.disable_v2_behavior()
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):
if avg_class == None:
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)
return tf.matmul(layer1, weights2) + biases2
else:
layer1 = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(weights1)) + avg_class.average(biases1))
return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2)
def train(mnist):
x = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, INPUT_NODE], name='x-input')
y_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, OUTPUT_NODE], name='y-input')
weights1 = tf.Variable(tf.random.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))
biases1 = tf.Variable(tf.constant(0.1, shape=[1, LAYER1_NODE]))
weights2 = tf.Variable(tf.random.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))
biases2 = tf.Variable(tf.constant(0.1, shape=[1, OUTPUT_NODE]))
y = inference(x, None, weights1, biases1, weights2, biases2)
global_step = tf.Variable(0, trainable=False)
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.compat.v1.trainable_variables())
average_y = inference(x, variable_averages, weights1, biases1, weights2, biases2)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(input=y_, axis=1))
cross_entropy_mean = tf.reduce_mean(input_tensor=cross_entropy)
regularizer = tf.keras.regularizers.l2(0.5 * (REGULARIZATION_RATE))
regularization = regularizer(weights1) + regularizer(weights2)
loss = cross_entropy_mean + regularization
learning_rate = tf.compat.v1.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE,
LEARNING_RATE_DECAY
)
train_step = tf.compat.v1.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train')
correct_prrediction = tf.equal(tf.argmax(input=average_y, axis=1), tf.argmax(input=y_, axis=1))
accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_prrediction, tf.float32))
with tf.compat.v1.Session() as sess:
tf.compat.v1.initialize_all_variables().run()
validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
test_feed = {x: mnist.test.images, y_: mnist.test.labels}
for i in range(TRAINING_STEPS):
if i % 1000 == 0:
validate_acc = sess.run(accuracy, feed_dict=validate_feed)
print("Aftrr %d training step(s),validation accuancy"
"using average model is %g " % (i, validate_acc))
xs, ys = mnist.train.next_batch(BATCH_SIZE)
sess.run(train_op, feed_dict={x:xs, y_:ys})
test_acc = sess.run(accuracy, feed_dict = test_feed)
print("After %d training strep(s),test accuracy using average model is %g" % (TRAINING_STEPS, test_acc))
def main(argv=None):
mnist = input_data.read_data_sets("./traindata/", one_hot=True)
train(mnist)
if __name__ == '__main__':
tf.compat.v1.app.run()
看不懂的,需要注释的看这个链接!
TensorFlow学习之实现MNIST识别(实现断点重训)-----详细注解版
下面是我自己改完路径,升级完能跑Tensorflow2.1的代码,一样请你改路径使用!
mnist_inference_update
import tensorflow as tf
tf.compat.v1.disable_v2_behavior()
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500
def get_weight_variable(shape, regularizer):
weights = tf.compat.v1.get_variable("weights", shape, initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.1))
if regularizer != None:
tf.compat.v1.add_to_collection('losses', regularizer(weights))
return weights
def inference(input_tensor, regularizer):
with tf.compat.v1.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.compat.v1.get_variable("biases", [LAYER1_NODE], initializer=tf.compat.v1.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)
with tf.compat.v1.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.compat.v1.get_variable("biases", [OUTPUT_NODE], initializer=tf.compat.v1.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases
return layer2
mnist_train_update
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference_update
import mnist_train_update
tf.compat.v1.disable_v2_behavior()
EVAL_INTERVAL_SECS = 10
MODEL_SAVE_PATH = "MNIST_model/"
MODEL_NAME = "mnist_model"
def evaluate(mnist):
with tf.Graph().as_default() as g:
x = tf.compat.v1.placeholder(tf.float32, [None, mnist_inference_update.INPUT_NODE], name='x-input')
y_ = tf.compat.v1.placeholder(tf.float32, [None, mnist_inference_update.OUTPUT_NODE], name='y-input')
validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
y = mnist_inference_update.inference(x, None)
correct_prediction = tf.equal(tf.argmax(input=y, axis=1), tf.argmax(input=y_, axis=1))
accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_prediction, tf.float32))
variable_averages = tf.train.ExponentialMovingAverage(mnist_train_update.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.compat.v1.train.Saver(variables_to_restore)
while True:
with tf.compat.v1.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_train_update.MODEL_SAVE_PATH)
sess.run(tf.compat.v1.initialize_all_variables())
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))
else:
print('no checkpoint file found')
time.sleep(EVAL_INTERVAL_SECS)
def main(argv=None):
mnist = input_data.read_data_sets("../traindata/", one_hot=True)
evaluate(mnist)
if __name__ == '__main__':
tf.compat.v1.app.run()
mnist_eval_update
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference_update
import mnist_train_update
tf.compat.v1.disable_v2_behavior()
EVAL_INTERVAL_SECS = 10
MODEL_SAVE_PATH = "MNIST_model/"
MODEL_NAME = "mnist_model"
def evaluate(mnist):
with tf.Graph().as_default() as g:
x = tf.compat.v1.placeholder(tf.float32, [None, mnist_inference_update.INPUT_NODE], name='x-input')
y_ = tf.compat.v1.placeholder(tf.float32, [None, mnist_inference_update.OUTPUT_NODE], name='y-input')
validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
y = mnist_inference_update.inference(x, None)
correct_prediction = tf.equal(tf.argmax(input=y, axis=1), tf.argmax(input=y_, axis=1))
accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_prediction, tf.float32))
variable_averages = tf.train.ExponentialMovingAverage(mnist_train_update.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.compat.v1.train.Saver(variables_to_restore)
while True:
with tf.compat.v1.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_train_update.MODEL_SAVE_PATH)
sess.run(tf.compat.v1.initialize_all_variables())
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))
else:
print('no checkpoint file found')
time.sleep(EVAL_INTERVAL_SECS)
def main(argv=None):
mnist = input_data.read_data_sets("../traindata/", one_hot=True)
evaluate(mnist)
if __name__ == '__main__':
tf.compat.v1.app.run()
首先我们要弄明白,mnist图片的是啥样的。
他的图片就是黑白的,28*28像素,并且是黑底白字!没错就是黑底白字!
这是我用画图做的黑底白字,
如果你的图片是白底黑字,那么把图片变成一行784列的矩阵之后,比如图片a变成矩阵b了,要用1-b再用。
图片处理的translate函数看不懂的看这个链接!
【手写数字图片预处理】用skimage将自己的手写数字转换为mnist数据集
skimage包没有的要自己安装,我用Anaconda,进入我的虚拟环境tsfgpu后,控制台输入
pip install -U scikit-image -i https://pypi.tuna.tsinghua.edu.cn/simple
记得用清华源
pypi 镜像使用帮助
最后我的全部代码如下,修改自mnist_eval_update,改了路径就能用了,是单个图片的测试
最后输出的是预测结果的矩阵,是个一行10列的。每一列代表0-9,其中最大的说明就是那个数
比如结果
[[18.982655 -3.8378751 1.0982409 -4.460092 -5.644824 -1.3264737
-2.2123477 1.0031121 -8.763886 4.256794 ]]
第一个数是18,10个里面最大,所以是数字0!
import time
import tensorflow as tf
from skimage import io,data,transform,color
import numpy as np
import mnist_inference_update
import mnist_train_update
tf.compat.v1.disable_v2_behavior()
MNIST_SIZE = 28
IMAGE_PATH = "./image/w0.PNG"
EVAL_INTERVAL_SECS = 10
MODEL_SAVE_PATH = "MNIST_model/"
MODEL_NAME = "mnist_model"
def translate(image_path):
img = io.imread(image_path)
img_gray = color.rgb2gray(img)
translated_img = transform.resize(img_gray, (MNIST_SIZE, MNIST_SIZE))
flatten_img = np.reshape(translated_img, 784)
imgarray = np.array([flatten_img])
result = np.around(imgarray, decimals=3)
print(result)
return result
def evaluate():
with tf.Graph().as_default() as g:
x = translate(image_path=IMAGE_PATH)
y = mnist_inference_update.inference(tf.cast(x, tf.float32), None)
variable_averages = tf.train.ExponentialMovingAverage(mnist_train_update.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.compat.v1.train.Saver(variables_to_restore)
with tf.compat.v1.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_train_update.MODEL_SAVE_PATH)
sess.run(tf.compat.v1.initialize_all_variables())
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
outresult = sess.run(y)
print(outresult)
else:
print('no checkpoint file found')
time.sleep(EVAL_INTERVAL_SECS)
def main(argv=None):
evaluate()
if __name__ == '__main__':
tf.compat.v1.app.run()