**
**
需要下载的东西如下:
数据集:17flowers数据集
预训练权重模型:vgg16.npy 提取码:66im
我的做法是从17flowers数据集里面的17个分类的文件夹中,依次分别cut出5张图片作为测试集。
这是因为TFRecord格式是为Tensorflow打造的一种非常高效的数据读取方式,在了解TFRcord 的过程中,楼主看到了超多优秀的资料!比如:你可能无法回避的 TFRecord 文件格式详细讲解
这里给出
// An highlighted block
import os
import tensorflow as tf
from PIL import Image
def creat_tf(imgpath):
cwd = os.getcwd()
classes = os.listdir(cwd + imgpath)
# 定义tfrecords文件存放
writer = tf.python_io.TFRecordWriter("train.tfrecords")
for index, name in enumerate(classes):
class_path = cwd + imgpath + name + "/"
print(class_path)
if os.path.isdir(class_path):
for img_name in os.listdir(class_path):
img_path = class_path + img_name
img = Image.open(img_path)
img = img.resize((224, 224))
img_raw = img.tobytes()
example = tf.train.Example(features=tf.train.Features(feature={
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[int(name)])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
}))
writer.write(example.SerializeToString())
print(img_name)
writer.close()
def read_example():
# 读取
for serialized_example in tf.python_io.tf_record_iterator("train.tfrecords"):
example = tf.train.Example()
example.ParseFromString(serialized_example)
# image = example.features.feature['img_raw'].bytes_list.value
label = example.features.feature['label'].int64_list.value
print(label)
if __name__ == '__main__':
imgpath = '/17flowers/'
creat_tf(imgpath)
import tensorflow as tf
import numpy as np
# 加载预训练模型
data_dict = np.load('./vgg16.npy', encoding='latin1').item()
# 打印每层信息
def print_layer(t):
print(t.op.name, ' ', t.get_shape().as_list(), '\n')
# 定义卷积层
def conv(x, d_out, name, fineturn=False, xavier=False):
d_in = x.get_shape()[-1].value
with tf.name_scope(name) as scope:
# Fine-tuning
if fineturn:
kernel = tf.constant(data_dict[name][0], name="weights")
bias = tf.constant(data_dict[name][1], name="bias")
print("fineturn")
elif not xavier:
kernel = tf.Variable(tf.truncated_normal([3, 3, d_in, d_out], stddev=0.1), name='weights')
bias = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[d_out]),
trainable=True,
name='bias')
print("truncated_normal")
else:
kernel = tf.get_variable(scope + 'weights', shape=[3, 3, d_in, d_out],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer_conv2d())
bias = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[d_out]),
trainable=True,
name='bias')
print("xavier")
conv = tf.nn.conv2d(x, kernel, [1, 1, 1, 1], padding='SAME')
activation = tf.nn.relu(conv + bias, name=scope)
print_layer(activation)
return activation
# 最大池化层
def maxpool(x, name):
activation = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID', name=name)
print_layer(activation)
return activation
# 定义全连接层
def fc(x, n_out, name, fineturn=False, xavier=False):
n_in = x.get_shape()[-1].value
with tf.name_scope(name) as scope:
if fineturn:
weight = tf.constant(data_dict[name][0], name="weights")
bias = tf.constant(data_dict[name][1], name="bias")
print("fineturn")
elif not xavier:
weight = tf.Variable(tf.truncated_normal([n_in, n_out], stddev=0.01), name='weights')
bias = tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[n_out]),
trainable=True,
name='bias')
print("truncated_normal")
else:
weight = tf.get_variable(scope + 'weights', shape=[n_in, n_out],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer_conv2d())
bias = tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[n_out]),
trainable=True,
name='bias')
print("xavier")
activation = tf.nn.relu_layer(x, weight, bias, name=name)
print_layer(activation)
return activation
def VGG16(images, _dropout, n_cls):
conv1_1 = conv(images, 64, 'conv1_1', fineturn=True)
conv1_2 = conv(conv1_1, 64, 'conv1_2', fineturn=True)
pool1 = maxpool(conv1_2, 'pool1')
conv2_1 = conv(pool1, 128, 'conv2_1', fineturn=True)
conv2_2 = conv(conv2_1, 128, 'conv2_2', fineturn=True)
pool2 = maxpool(conv2_2, 'pool2')
conv3_1 = conv(pool2, 256, 'conv3_1', fineturn=True)
conv3_2 = conv(conv3_1, 256, 'conv3_2', fineturn=True)
conv3_3 = conv(conv3_2, 256, 'conv3_3', fineturn=True)
pool3 = maxpool(conv3_3, 'pool3')
conv4_1 = conv(pool3, 512, 'conv4_1', fineturn=True)
conv4_2 = conv(conv4_1, 512, 'conv4_2', fineturn=True)
conv4_3 = conv(conv4_2, 512, 'conv4_3', fineturn=True)
pool4 = maxpool(conv4_3, 'pool4')
conv5_1 = conv(pool4, 512, 'conv5_1', fineturn=True)
conv5_2 = conv(conv5_1, 512, 'conv5_2', fineturn=True)
conv5_3 = conv(conv5_2, 512, 'conv5_3', fineturn=True)
pool5 = maxpool(conv5_3, 'pool5')
flatten = tf.reshape(pool5, [-1, 7 * 7 * 512])
fc6 = fc(flatten, 4096, 'fc6', xavier=True)
dropout1 = tf.nn.dropout(fc6, _dropout)
fc7 = fc(dropout1, 4096, 'fc7', xavier=True)
dropout2 = tf.nn.dropout(fc7, _dropout)
fc8 = fc(dropout2, n_cls, 'fc8', xavier=True)
return fc8
# coding=utf-8
import tensorflow as tf
import numpy as np
import pdb
from datetime import datetime
from VGG16 import *
batch_size = 5
lr = 0.0001
n_cls = 17
max_steps = 100
counter = 0
def read_and_decode(filename):
# 根据文件名生成一个队列
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue) # 返回文件名和文件
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string),
})
img = tf.decode_raw(features['img_raw'], tf.uint8)
img = tf.reshape(img, [224, 224, 3])
# 转换为float32类型,并做归一化处理
img = tf.cast(img, tf.float32) # * (1. / 255)
label = tf.cast(features['label'], tf.int64)
return img, label
def train():
image_summary = tf.summary.image
scalar_summary = tf.summary.scalar
histogram_summary = tf.summary.histogram
merge_summary = tf.summary.merge
SummaryWriter = tf.summary.FileWriter
x = tf.placeholder(dtype=tf.float32, shape=[None, 224, 224, 3], name='input')
y = tf.placeholder(dtype=tf.float32, shape=[None, n_cls], name='label')
keep_prob = tf.placeholder(tf.float32)
output = VGG16(x, keep_prob, n_cls)
# probs = tf.nn.softmax(output)
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y))
loss_summary = scalar_summary('loss', loss)
train_step = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(loss)
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(output, 1), tf.argmax(y, 1)), tf.float32))
acc_summary = scalar_summary('accuracy', accuracy)
merged = merge_summary([loss_summary, acc_summary])
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y))
# # train_step = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss)
# train_step = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(loss)
#
# accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(output, 1), tf.argmax(y, 1)), tf.float32))
images, labels = read_and_decode('./train.tfrecords')
img_batch, label_batch = tf.train.shuffle_batch([images, labels],
batch_size=batch_size,
capacity=392,
min_after_dequeue=200)
label_batch = tf.one_hot(label_batch, n_cls, 1, 0)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
#将loss和accuracy数据写入logs的文件下备用!!
writer = SummaryWriter('logs', sess.graph)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(max_steps):
batch_x, batch_y = sess.run([img_batch, label_batch])
# print batch_x, batch_x.shape
# print batch_y
# pdb.set_trace()
_, loss_val,summary = sess.run([train_step, loss,merged], feed_dict={x: batch_x, y: batch_y, keep_prob: 0.8})
writer.add_summary(summary, i)
if i % 10 == 0:
train_arr = accuracy.eval(feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0})
print("%s: Step [%d] Loss : %f, training accuracy : %g" % (datetime.now(), i, loss_val, train_arr))
# 只指定了训练结束后保存模型,可以修改为每迭代多少次后保存模型
if (i + 1) == max_steps:
# checkpoint_path = os.path.join(FLAGS.train_dir, './model/model.ckpt')
saver.save(sess, './model/model.ckpt', global_step=i)
coord.request_stop()
coord.join(threads)
# saver.save(sess, 'model/model.ckpt')
if __name__ == '__main__':
train()
import tensorflow as tf
import numpy as np
import pdb
from datetime import datetime
from VGG16 import *
import cv2
import os
def test(path):
x = tf.placeholder(dtype=tf.float32, shape=[None, 224, 224, 3], name='input')
keep_prob = tf.placeholder(tf.float32)
output = VGG16(x, keep_prob, 17)
score = tf.nn.softmax(output)
f_cls = tf.argmax(score, 1)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
# 训练好的模型位置
saver.restore(sess, '/model/model.ckpt-4999')
for i in os.listdir(path):
imgpath = os.path.join(path, i)
im = cv2.imread(imgpath)
im = cv2.resize(im, (224, 224)) # * (1. / 255)
im = np.expand_dims(im, axis=0)
# 测试时,keep_prob设置为1.0
pred, _score = sess.run([f_cls, score], feed_dict={x: im, keep_prob: 1.0})
prob = round(np.max(_score), 4)
print("{} flowers class is: {}, score: {}".format(i, int(pred), prob))
sess.close()
if __name__ == '__main__':
# 测试图片保存在文件夹中了,图片前面数字为所属类别
path = '/home/vision/Tensorflowlearning/fine-tune/test/'
test(path)
关于代码改改路径就可以用!我是所有的代码都跑通了。
然后这个可以调用tensorboard查看loss下降曲线和accuracy曲线
方法就是:将工程文件在终端中打开后,键入“tensorboard --logdir=/logs”
其中logs是工程文件下保存loss和accuracyd数据的文件夹!
可以根据曲线判断训练结果是否收敛!