百度云链接:链接:https://pan.baidu.com/s/1KWYrGVVS6He7lO7skyhgQQ
提取码:p2dd
复制这段内容后打开百度网盘手机App,操作更方便哦
数据集划分.py
import os, random, shutil
def moveFile(fileDir):
pathDir = os.listdir(fileDir) #取图片的原始路径
filenumber=len(pathDir)
rate=0.2 #自定义抽取图片的比例,比方说100张抽10张,那就是0.1
picknumber=int(filenumber*rate) #按照rate比例从文件夹中取一定数量图片
sample = random.sample(pathDir, picknumber) #随机选取picknumber数量的样本图片
print (sample)
for name in sample:
shutil.move(fileDir+name, tarDir+name)
return
if __name__ == '__main__':
fileDir = "D:\\神经网络\\Alexnet\猫狗数据集\\train\\" #源图片文件夹路径
tarDir = 'D:\\神经网络\\Alexnet\\猫狗数据集\\test6\\' #移动到新的文件夹路径
moveFile(fileDir)
因为猫狗大战的测试集没有label所以说我们就从训练集中抽取20%作为测试集,代码如上图所示
我刚开始训练的时候没有把训练集和测试集的图片规范化化为224x224,导致我测试的时候精度只有70%-80%,后来我寻找原因,把训练时候的图片截取出来,发现大部分训练集的图片在训练时候被reshape为224x224导致图片只被截取了一部分,比如说有的图片只有一条猫腿,所以这样训练肯定是不行的,将图片预处理之后,精度得到了显著的提高
import cv2
import os
def rebuild(file_dir, save_dir):
""" 将图片尺寸resize为224*224 """
print('Start to resize images...')
for file in os.listdir(file_dir):
file_path = os.path.join(file_dir, file)
try:
image = cv2.imread(file_path)
image_resized = cv2.resize(image, (224, 224))
save_path = save_dir + file
cv2.imwrite(save_path, image_resized)
except:
print(file_path)
os.remove(file_path)
print('Finished!')
rebuild('./train','./finaltrain/')
##./train代表初始训练集的地址,./finaltrain代表预处理之后图片存放的地址
将训练集预处理之后,同样方法预处理测试集
input_data.py
import tensorflow as tf
import os
import numpy as np
#import model
os.environ['TF_CPP_MIN_LOG_LEVEL']='0'
def get_files(file_dir):
cats = []
label_cats = []
dogs = []
label_dogs = []
for file in os.listdir(file_dir):
name = file.split(sep='.')
if 'cat' in name[0]:
cats.append(file_dir + file)
label_cats.append(0)
else:
if 'dog' in name[0]:
dogs.append(file_dir + file)
label_dogs.append(1)
image_list = np.hstack((cats,dogs))
label_list = np.hstack((label_cats,label_dogs))
#print('There are %d cats\nThere are %d dogs' %(len(cats), len(dogs)))
# 多个种类分别的时候需要把多个种类放在一起,打乱顺序,这里不需要
# 把标签和图片都放倒一个 temp 中 然后打乱顺序,然后取出来
temp = np.array([image_list,label_list])
temp = temp.transpose()
# 打乱顺序
np.random.shuffle(temp)
# 取出第一个元素作为 image 第二个元素作为 label
image_list = list(temp[:,0])
label_list = list(temp[:,1])
label_list = [int(i) for i in label_list]
return image_list,label_list
# 测试 get_files
'''
imgs , label = get_files('D:\\神经网络\\Alexnet\\猫狗数据集\\finaltrain')
for i in imgs:
print("img:",i)
for i in label:
print('label:',i)
# 测试 get_files end
'''
# image_W ,image_H 指定图片大小,batch_size 每批读取的个数 ,capacity队列中 最多容纳元素的个数
def get_batch(image,label,image_W,image_H,batch_size,capacity):
# 转换数据为 ts 能识别的格式
image = tf.cast(image,tf.string)
label = tf.cast(label, tf.int32)
# 将image 和 label 放倒队列里
input_queue = tf.train.slice_input_producer([image,label])
label = input_queue[1]
# 读取图片的全部信息
image_contents = tf.read_file(input_queue[0])
# 把图片解码,channels =3 为彩色图片, r,g ,b 黑白图片为 1 ,也可以理解为图片的厚度
image = tf.image.decode_jpeg(image_contents,channels =3)
# 将图片以图片中心进行裁剪或者扩充为 指定的image_W,image_H
image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H)
# 对数据进行标准化,标准化,就是减去它的均值,除以他的方差
image = tf.image.per_image_standardization(image)
# 生成批次 num_threads 有多少个线程根据电脑配置设置 capacity 队列中 最多容纳图片的个数 tf.train.shuffle_batch 打乱顺序,
image_batch, label_batch = tf.train.batch([image, label],batch_size = batch_size, num_threads = 64, capacity = capacity)
# 重新定义下 label_batch 的形状
label_batch = tf.reshape(label_batch , [batch_size])
# 转化图片
image_batch = tf.cast(image_batch,tf.float32)
return image_batch, label_batch
def one_hot(labels):
'''one-hot 编码'''
n_sample=len(labels)
n_class=max(labels)+1
onehot_labels=np.zeros((n_sample,n_class))
onehot_labels[np.arange(n_sample),labels]=1
return onehot_labels
'''
# test get_batch
import matplotlib.pyplot as plt
BATCH_SIZE = 40
CAPACITY = 256
IMG_W = 224
IMG_H = 224
train_dir = 'D:\\神经网络\\Alexnet\\猫狗数据集\\finaltrain\\'
image_list, label_list = get_files(train_dir)
image_batch, label_batch = get_batch(image_list, label_list, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
with tf.Session() as sess:
i = 0
# Coordinator 和 start_queue_runners 监控 queue 的状态,不停的入队出队
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
#coord.should_stop() 返回 true 时也就是 数据读完了应该调用 coord.request_stop()
try:
while not coord.should_stop() and i<1:
# 测试一个步
img, label = sess.run([image_batch, label_batch])
for j in np.arange(BATCH_SIZE):
print('label: %d' %label[j])
#print(img)
# 因为是个4D 的数据所以第一个为 索引 其他的为冒号就行了
plt.imshow(img[j,:,:,:])
plt.show()
i+=1
# 队列中没有数据
except tf.errors.OutOfRangeError:
print('done!')
finally:
coord.request_stop()
coord.join(threads)
sess.close()
'''
train.py
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
#import creat_and_read_TFReacod as reader
import os
import input_data
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.InteractiveSession(config=config)
train_dir = 'D:\\神经网络\\Alexnet\\猫狗数据集\\finaltrain\\' #训练集的地址
x_train,y_train=input_data.get_files(train_dir)
image_batch,label_batch=input_data.get_batch(x_train,y_train,224,224,50,512)
def conv2d(x,W,b,strides,pad):
conv=tf.nn.conv2d(x, W,[1,strides,strides,1], padding=pad)
conv=tf.nn.bias_add(conv,b)
conv=batch_norm(conv,True)
return tf.nn.relu(conv)
def maxpool2d(x,k,s):
return tf.nn.max_pool(x,ksize=[1,k,k,1], strides=[1,s,s,1],padding='SAME')
#Batch_Normalization正则化
def batch_norm(inputs,is_train,is_conv_out=True,decay=0.999):
scale=tf.Variable(tf.ones([inputs.get_shape()[-1]]))
beta = tf.Variable(tf.zeros([inputs.get_shape()[-1]]))
pop_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), trainable=False)
pop_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]), trainable=False)
if is_train:
if is_conv_out:
batch_mean, batch_var = tf.nn.moments(inputs, [0, 1, 2])
else:
batch_mean, batch_var = tf.nn.moments(inputs, [0])
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(inputs,
batch_mean, batch_var, beta, scale, 0.001)
else:
return tf.nn.batch_normalization(inputs,
pop_mean, pop_var, beta, scale, 0.001)
# 模型参数
learning_rate = 1e-5
training_iters = 8000
batch_size = 50
display_step = 10
n_classes = 2
n_fc1 = 4096
n_fc2 = 2048
# 构建模型
x = tf.placeholder(tf.float32, [None, 224, 224, 3],name='x')
y = tf.placeholder(tf.float32, [None, n_classes],name='y')
#定义权重
weights={
'wc1_1':tf.Variable(tf.random_normal([3,3,3,64],stddev=0.01)),
'wc1_2':tf.Variable(tf.random_normal([3,3,64,64],stddev=0.01)),
'wc2_1':tf.Variable(tf.random_normal([3,3,64,128],stddev=0.01)),
'wc2_2':tf.Variable(tf.random_normal([3,3,128,128],stddev=0.01)),
'wc3_1':tf.Variable(tf.random_normal([3,3,128,256],stddev=0.01)),
'wc3_2':tf.Variable(tf.random_normal([3,3,256,256],stddev=0.01)),
'wc3_3':tf.Variable(tf.random_normal([3,3,256,256],stddev=0.01)),
'wc4_1':tf.Variable(tf.random_normal([3,3,256,512],stddev=0.01)),
'wc4_2':tf.Variable(tf.random_normal([3,3,512,512],stddev=0.01)),
'wc4_3':tf.Variable(tf.random_normal([3,3,512,512],stddev=0.01)),
'wc5_1':tf.Variable(tf.random_normal([3,3,512,512],stddev=0.01)),
'wc5_2':tf.Variable(tf.random_normal([3,3,512,512],stddev=0.01)),
'wc5_3':tf.Variable(tf.random_normal([3,3,512,512],stddev=0.01)),
'wd1':tf.Variable(tf.random_normal([7*7*512,4096],stddev=0.005)),
#输出设为1024,原为4096
'wd2':tf.Variable(tf.random_normal([4096,2048],stddev=0.005)),
'wd3':tf.Variable(tf.random_normal([2048,1000],stddev=0.005)),
#输出为10
'wd4':tf.Variable(tf.random_normal([1000,2],stddev=0.005))
}
#定义偏置
biases={
'bc1_1':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[64])),
'bc1_2':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[64])),
'bc2_1':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[128])),
'bc2_2':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[128])),
'bc3_1':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[256])),
'bc3_2':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[256])),
'bc3_3':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[256])),
'bc4_1':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[512])),
'bc4_2':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[512])),
'bc4_3':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[512])),
'bc5_1':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[512])),
'bc5_2':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[512])),
'bc5_3':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[512])),
'bd1':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[4096])),
'bd2':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[2048])),
'bd3':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[1000])),
'bd4':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[2]))
}
x_image = tf.reshape(x, [-1, 224, 224, 3])
# 卷积层 1
def VGGnet(x):
x=tf.reshape(x,[-1,224,224,3])
conv1=conv2d(x,weights['wc1_1'],biases['bc1_1'],1,'SAME')
conv2=conv2d(conv1,weights['wc1_2'],biases['bc1_2'],1,'SAME')
conv2=maxpool2d(conv2,2,2)
conv3=conv2d(conv2,weights['wc2_1'],biases['bc2_1'],1,'SAME')
conv4=conv2d(conv3,weights['wc2_2'],biases['bc2_2'],1,'SAME')
conv4=maxpool2d(conv4,2,2)
conv5=conv2d(conv4,weights['wc3_1'],biases['bc3_1'],1,'SAME')
conv6=conv2d(conv5,weights['wc3_2'],biases['bc3_2'],1,'SAME')
conv7=conv2d(conv6,weights['wc3_3'],biases['bc3_3'],1,'SAME')
conv7=maxpool2d(conv7,2,2)
conv8=conv2d(conv7,weights['wc4_1'],biases['bc4_1'],1,'SAME')
conv9=conv2d(conv8,weights['wc4_2'],biases['bc4_2'],1,'SAME')
conv10=conv2d(conv9,weights['wc4_3'],biases['bc4_3'],1,'SAME')
conv10=maxpool2d(conv10,2,2)
conv11=conv2d(conv10,weights['wc5_1'],biases['bc5_1'],1,'SAME')
conv12=conv2d(conv11,weights['wc5_2'],biases['bc5_2'],1,'SAME')
conv13=conv2d(conv12,weights['wc5_3'],biases['bc5_3'],1,'SAME')
conv13=maxpool2d(conv13,2,2)
fc1=tf.reshape(conv13,[-1,7*7*512])
fc1=tf.add(tf.matmul(fc1,weights['wd1']),biases['bd1'])
fc1=tf.nn.relu(fc1)
fc1=tf.nn.dropout(fc1,0.8)
fc2=tf.add(tf.matmul(fc1,weights['wd2']),biases['bd2'])
fc2==tf.nn.relu(fc2)
fc2=tf.nn.dropout(fc2,0.8)
fc3=tf.add(tf.matmul(fc2,weights['wd3']),biases['bd3'])
fc3==tf.nn.relu(fc3)
fc3=tf.nn.dropout(fc3,0.8)
out=tf.add(tf.matmul(fc3,weights['wd4']),biases['bd4'])
return out
# 定义损失
out=VGGnet(x)
tf.add_to_collection('network',out)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=out))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# 评估模型
correct_pred = tf.equal(tf.argmax(out,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
def onehot(labels):
'''one-hot 编码'''
n_sample = len(labels)
n_class = max(labels) + 1
onehot_labels = np.zeros((n_sample, n_class))
onehot_labels[np.arange(n_sample), labels] = 1
return onehot_labels
save_model = ".//modele//VGGNetModel.ckpt"
def train(opech):
with tf.Session() as sess:
sess.run(init)
train_writer = tf.summary.FileWriter(".//log", sess.graph) # 输出日志的地方
saver = tf.train.Saver()
c = []
start_time = time.time()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
step = 0
for i in range(opech):
step = i
image, label = sess.run([image_batch, label_batch])
labels = onehot(label)
#acc=[]
sess.run(optimizer, feed_dict={
x: image, y: labels})
loss_record = sess.run(loss, feed_dict={
x: image, y: labels})
acc=sess.run(accuracy,feed_dict={
x:image,y:labels})
print("now the loss is %.6f " % loss_record)
print("now the accuracy is %.6f "%acc)
c.append(loss_record)
end_time = time.time()
print('time: ', (end_time - start_time))
start_time = end_time
print("---------------%d onpech is finished-------------------" % i)
if step%200==0:
saver.save(sess, save_model,global_step=step)
print("Model Save Finished!")
print("Optimization Finished!")
# checkpoint_path = os.path.join(".//model", 'model.ckpt') # 输出模型的地方
#saver.save(sess, save_model)
#print("Model Save Finished!")
coord.request_stop()
coord.join(threads)
plt.plot(c)
plt.xlabel('Iter')
plt.ylabel('loss')
plt.title('lr=%f, ti=%d, bs=%d' % (learning_rate, training_iters, batch_size))
plt.tight_layout()
plt.savefig('cat_and_dog_AlexNet.jpg', dpi=200)
train(training_iters)
训练完之后模型会保存在model文件夹之中,我们要做的就是提取模型训练
import cv2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
import os
import input_data
from PIL import Image
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.InteractiveSession(config=config)
testfile = 'D:\\神经网络\\Alexnet\\猫狗数据集\\test6\\'
def conv2d(x,W,b,strides,pad):
conv=tf.nn.conv2d(x, W,[1,strides,strides,1], padding=pad)
conv=tf.nn.bias_add(conv,b)
conv=batch_norm(conv,True)
return tf.nn.relu(conv)
def maxpool2d(x,k,s):
return tf.nn.max_pool(x,ksize=[1,k,k,1], strides=[1,s,s,1],padding='SAME')
#Batch_Normalization正则化
def batch_norm(inputs,is_train,is_conv_out=True,decay=0.999):
scale=tf.Variable(tf.ones([inputs.get_shape()[-1]]))
beta = tf.Variable(tf.zeros([inputs.get_shape()[-1]]))
pop_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), trainable=False)
pop_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]), trainable=False)
if is_train:
if is_conv_out:
batch_mean, batch_var = tf.nn.moments(inputs, [0, 1, 2])
else:
batch_mean, batch_var = tf.nn.moments(inputs, [0])
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(inputs,
batch_mean, batch_var, beta, scale, 0.001)
else:
return tf.nn.batch_normalization(inputs,
pop_mean, pop_var, beta, scale, 0.001)
n_classes = 2
n_fc1 = 4096
n_fc2 = 2048
# 构建模型
x = tf.placeholder(tf.float32, [None, 224, 224, 3],name='x')
y = tf.placeholder(tf.float32, [None, n_classes],name='y')
weights={
'wc1_1':tf.Variable(tf.random_normal([3,3,3,64],stddev=0.01)),
'wc1_2':tf.Variable(tf.random_normal([3,3,64,64],stddev=0.01)),
'wc2_1':tf.Variable(tf.random_normal([3,3,64,128],stddev=0.01)),
'wc2_2':tf.Variable(tf.random_normal([3,3,128,128],stddev=0.01)),
'wc3_1':tf.Variable(tf.random_normal([3,3,128,256],stddev=0.01)),
'wc3_2':tf.Variable(tf.random_normal([3,3,256,256],stddev=0.01)),
'wc3_3':tf.Variable(tf.random_normal([3,3,256,256],stddev=0.01)),
'wc4_1':tf.Variable(tf.random_normal([3,3,256,512],stddev=0.01)),
'wc4_2':tf.Variable(tf.random_normal([3,3,512,512],stddev=0.01)),
'wc4_3':tf.Variable(tf.random_normal([3,3,512,512],stddev=0.01)),
'wc5_1':tf.Variable(tf.random_normal([3,3,512,512],stddev=0.01)),
'wc5_2':tf.Variable(tf.random_normal([3,3,512,512],stddev=0.01)),
'wc5_3':tf.Variable(tf.random_normal([3,3,512,512],stddev=0.01)),
'wd1':tf.Variable(tf.random_normal([7*7*512,4096],stddev=0.005)),
#输出设为1024,原为4096
'wd2':tf.Variable(tf.random_normal([4096,2048],stddev=0.005)),
'wd3':tf.Variable(tf.random_normal([2048,1000],stddev=0.005)),
#输出为10
'wd4':tf.Variable(tf.random_normal([1000,2],stddev=0.005))
}
#定义偏置
biases={
'bc1_1':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[64])),
'bc1_2':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[64])),
'bc2_1':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[128])),
'bc2_2':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[128])),
'bc3_1':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[256])),
'bc3_2':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[256])),
'bc3_3':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[256])),
'bc4_1':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[512])),
'bc4_2':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[512])),
'bc4_3':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[512])),
'bc5_1':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[512])),
'bc5_2':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[512])),
'bc5_3':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[512])),
'bd1':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[4096])),
'bd2':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[2048])),
'bd3':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[1000])),
'bd4':tf.Variable(tf.constant(0.1, dtype=tf.float32, shape=[2]))
}
x_image = tf.reshape(x, [-1, 224, 224, 3])
# 卷积层 1
def VGGnet(x):
x=tf.reshape(x,[-1,224,224,3])
conv1=conv2d(x,weights['wc1_1'],biases['bc1_1'],1,'SAME')
conv2=conv2d(conv1,weights['wc1_2'],biases['bc1_2'],1,'SAME')
conv2=maxpool2d(conv2,2,2)
conv3=conv2d(conv2,weights['wc2_1'],biases['bc2_1'],1,'SAME')
conv4=conv2d(conv3,weights['wc2_2'],biases['bc2_2'],1,'SAME')
conv4=maxpool2d(conv4,2,2)
conv5=conv2d(conv4,weights['wc3_1'],biases['bc3_1'],1,'SAME')
conv6=conv2d(conv5,weights['wc3_2'],biases['bc3_2'],1,'SAME')
conv7=conv2d(conv6,weights['wc3_3'],biases['bc3_3'],1,'SAME')
conv7=maxpool2d(conv7,2,2)
conv8=conv2d(conv7,weights['wc4_1'],biases['bc4_1'],1,'SAME')
conv9=conv2d(conv8,weights['wc4_2'],biases['bc4_2'],1,'SAME')
conv10=conv2d(conv9,weights['wc4_3'],biases['bc4_3'],1,'SAME')
conv10=maxpool2d(conv10,2,2)
conv11=conv2d(conv10,weights['wc5_1'],biases['bc5_1'],1,'SAME')
conv12=conv2d(conv11,weights['wc5_2'],biases['bc5_2'],1,'SAME')
conv13=conv2d(conv12,weights['wc5_3'],biases['bc5_3'],1,'SAME')
conv13=maxpool2d(conv13,2,2)
fc1=tf.reshape(conv13,[-1,7*7*512])
fc1=tf.add(tf.matmul(fc1,weights['wd1']),biases['bd1'])
fc1=tf.nn.relu(fc1)
fc1=tf.nn.dropout(fc1,0.3)
fc2=tf.add(tf.matmul(fc1,weights['wd2']),biases['bd2'])
fc2==tf.nn.relu(fc2)
fc2=tf.nn.dropout(fc2,0.3)
fc3=tf.add(tf.matmul(fc2,weights['wd3']),biases['bd3'])
fc3==tf.nn.relu(fc3)
fc3=tf.nn.dropout(fc3,0.3)
out=tf.add(tf.matmul(fc3,weights['wd4']),biases['bd4'])
return out
out=VGGnet(x)
saver = tf.train.Saver()
with tf.device('/gpu:1'):
def Evaluate(testfile):
count = 0
sums = 0
start_time = time.time()
with tf.Session() as sess:
# sess.run(tf.initialize_all_variables())
saver.restore(sess, './modele/VGGNetModel.ckpt-3200')#这里是模型的地址
for root, sub_folders, files in os.walk(testfile):
for name in files:
sums += 1
imagefile = os.path.join(root, name)
print(imagefile)
image = Image.open(imagefile)
image = image.resize([224, 224])
image_array = np.array(image)
image = tf.cast(image_array, tf.float32)
image = tf.image.per_image_standardization(image)
image = tf.reshape(image, [1, 224, 224, 3])
image = sess.run(image)
prediction = sess.run(out,feed_dict={
x: image})
end_time = time.time()
print('time: ', (end_time - start_time))
start_time = end_time
max_index = np.argmax(prediction)
if max_index==0:
print("猫")
else:
print("狗")
if max_index == 0 and name.split('.')[0] == 'cat':
count += 1
if max_index == 1 and name.split('.')[0] == 'dog':
count += 1
print(" The accuracy is: ", count,sums)
print(" The accuracy is: ", count / sums)
print(" The accuracy is: ", count,sums)
Evaluate(testfile)
训练完之后model文件夹中会有很多文件像这样
我们可以选取不同的模型测试精度,大概在3000次左右效果最好能达到91%,但是差距还是很大我没有深入挖掘,没有在对此进行进一步的改进因为还要写论文什么的,loss和代数的图像大概这样
可以看到大概在2600-3000次达到收敛
因为做的比较仓促,所以精度就在90%-92%左右不算太高,有同学不懂得话可以评论我都会一一解答。如果有大佬有好的改进方式还请赐教