本文所有代码已经放在GitHub上https://github.com/zhuzhuxia1994/CK-TensorFlow
双流卷积神经网络我最初是在做行为识别的时候接触到的,双流指的是时间流和空间流,再具体一点就是,时间流指的是对光流图片进行卷积处理,然后空间流指的是对RGB图片进行卷积处理,然后进行融合操作。这样往往比单纯对RGB图片进行卷积效果好,特别是在视频行为识别等方面,因为引入了时间信息。话不多说,放上代码及讲解。
以下代码是对表情识别CK+数据库进行实验,CK=数据库的地址如下http://www.consortium.ri.cmu.edu/ckagree/ 需要填写信息,如果有同学需要可以在博客下面留言,我会放上百度云地址~(先赞再跟我要哦,不然不理你,嘿嘿嘿)
import tensorflow as tf
#%%
def inference(s_images,T_images, batch_size, n_classes):
'''Build the model
Args:
images: image batch, 4D tensor, tf.float32, [batch_size, width, height, channels]
Returns:
output tensor with the computed logits, float, [batch_size, n_classes]
'''
#conv1, shape = [kernel size, kernel size, channels, kernel numbers]
# one stream space
with tf.variable_scope('s_conv1') as scope:
weights = tf.get_variable('weights',
shape = [3,3,3, 16],
dtype = tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[16],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(s_images, weights, strides=[1,1,1,1], padding='SAME')
pre_activation = tf.nn.bias_add(conv, biases)
s_conv1 = tf.nn.relu(pre_activation, name= scope.name)
#pool1 and norm1
with tf.variable_scope('s_pooling1_lrn') as scope:
pool1 = tf.nn.max_pool(s_conv1, ksize=[1,3,3,1],strides=[1,2,2,1],
padding='SAME', name='s_pooling1')
norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001/9.0,
beta=0.75,name='s_norm1')
#conv2
with tf.variable_scope('s_conv2') as scope:
weights = tf.get_variable('weights',
shape=[3,3,16,16],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[16],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(norm1, weights, strides=[1,1,1,1],padding='SAME')
pre_activation = tf.nn.bias_add(conv, biases)
s_conv2 = tf.nn.relu(pre_activation, name='s_conv2')
#pool2 and norm2
with tf.variable_scope('s_pooling2_lrn') as scope:
norm2 = tf.nn.lrn(s_conv2, depth_radius=4, bias=1.0, alpha=0.001/9.0,
beta=0.75,name='s_norm2')
pool2 = tf.nn.max_pool(norm2, ksize=[1,3,3,1], strides=[1,1,1,1],
padding='SAME',name='s_pooling2')
#local3
with tf.variable_scope('s_local3') as scope:
reshape = tf.reshape(pool2, shape=[batch_size, -1])
dim = reshape.get_shape()[1].value
weights = tf.get_variable('weights',
shape=[dim,128],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[128],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
s_local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
#local4
#with tf.variable_scope('s_local4') as scope:
# weights = tf.get_variable('weights',
# shape=[128,128],
# dtype=tf.float32,
# initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))
# biases = tf.get_variable('biases',
# shape=[128],
# dtype=tf.float32,
# initializer=tf.constant_initializer(0.1))
# local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='s_local4')
# another stream temporal
with tf.variable_scope('T_conv1') as scope:
weights = tf.get_variable('weights',
shape = [3,3,3, 16],
dtype = tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[16],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(T_images, weights, strides=[1,1,1,1], padding='SAME')
pre_activation = tf.nn.bias_add(conv, biases)
T_conv1 = tf.nn.relu(pre_activation, name= scope.name)
#pool1 and norm1
with tf.variable_scope('T_pooling1_lrn') as scope:
pool1 = tf.nn.max_pool(T_conv1, ksize=[1,3,3,1],strides=[1,2,2,1],
padding='SAME', name='T_pooling1')
norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001/9.0,
beta=0.75,name='T_norm1')
#conv2
with tf.variable_scope('T_conv2') as scope:
weights = tf.get_variable('weights',
shape=[3,3,16,16],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[16],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(norm1, weights, strides=[1,1,1,1],padding='SAME')
pre_activation = tf.nn.bias_add(conv, biases)
T_conv2 = tf.nn.relu(pre_activation, name='T_conv2')
#pool2 and norm2
with tf.variable_scope('T_pooling2_lrn') as scope:
norm2 = tf.nn.lrn(T_conv2, depth_radius=4, bias=1.0, alpha=0.001/9.0,
beta=0.75,name='T_norm2')
pool2 = tf.nn.max_pool(norm2, ksize=[1,3,3,1], strides=[1,1,1,1],
padding='SAME',name='T_pooling2')
#local3
with tf.variable_scope('T_local3') as scope:
reshape = tf.reshape(pool2, shape=[batch_size, -1])
dim = reshape.get_shape()[1].value
weights = tf.get_variable('weights',
shape=[dim,128],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[128],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
T_local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
local3 = s_local3 + T_local3
#local4
with tf.variable_scope('local4') as scope:
weights = tf.get_variable('weights',
shape=[128,128],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[128],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='local4')
# softmax
with tf.variable_scope('softmax_linear') as scope:
weights = tf.get_variable('softmax_linear',
shape=[128, n_classes],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.005,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[n_classes],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear')
return softmax_linear
#%%
def losses(logits, labels):
'''Compute loss from logits and labels
Args:
logits: logits tensor, float, [batch_size, n_classes]
labels: label tensor, tf.int32, [batch_size]
Returns:
loss tensor of float type
'''
with tf.variable_scope('loss') as scope:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits\
(logits=logits, labels=labels, name='xentropy_per_example')
loss = tf.reduce_mean(cross_entropy, name='loss')
tf.summary.scalar(scope.name+'/loss', loss)
return loss
#%%
def trainning(loss, learning_rate):
'''Training ops, the Op returned by this function is what must be passed to
'sess.run()' call to cause the model to train.
Args:
loss: loss tensor, from losses()
Returns:
train_op: The op for trainning
'''
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step= global_step)
return train_op
#%%
def evaluation(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
with tf.variable_scope('accuracy') as scope:
correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.cast(correct, tf.float16)
accuracy = tf.reduce_mean(correct)
#correct_prediction = tf.equal(tf.argmax(logits,1),tf.argmax(labels,1))
#accuracy = tf.reduce_mean(tf.cast(correct_prediction,"float"))
tf.summary.scalar(scope.name+'/accuracy', accuracy)
return accuracy
代码中写的S_conv等以S开头的是空间流,也就是对RGB进行训练,T_conv等以T开头的是时间流,对光流图片进行训练。并将结果在全连接层进行融合。以上代码是双流卷积模型的代码,其中的层数都可以修改。
import tensorflow as tf
import numpy as np
import os
#train_dir = '/home/hrz/projects/tensorflow/My-TensorFlow-tutorials/cats_vs_dogs/data/train/'
def get_files(file_dir):
angry = []
label_angry = []
happy = []
label_happy = []
surprised = []
label_surprised = []
disgusted = []
label_disgusted = []
fearful = []
label_fearful = []
sadness = []
label_sadness = []
for sub_file_dir in os.listdir(file_dir):
if sub_file_dir == 'angry':
for name in os.listdir(file_dir+'/'+sub_file_dir):
angry.append(file_dir+'/'+sub_file_dir+'/'+name)
label_angry.append(0)
elif sub_file_dir == 'disgusted':
for name in os.listdir(file_dir+'/'+sub_file_dir):
disgusted.append(file_dir+'/'+sub_file_dir+'/'+name)
label_disgusted.append(1)
elif sub_file_dir == 'fearful':
for name in os.listdir(file_dir+'/'+sub_file_dir):
fearful.append(file_dir+'/'+sub_file_dir+'/'+name)
label_fearful.append(2)
elif sub_file_dir == 'happy':
for name in os.listdir(file_dir+'/'+sub_file_dir):
happy.append(file_dir+'/'+sub_file_dir+'/'+name)
label_happy.append(3)
elif sub_file_dir == 'sadness':
for name in os.listdir(file_dir+'/'+sub_file_dir):
sadness.append(file_dir+'/'+sub_file_dir+'/'+name)
label_sadness.append(4)
elif sub_file_dir == 'surprised':
for name in os.listdir(file_dir+'/'+sub_file_dir):
surprised.append(file_dir+'/'+sub_file_dir+'/'+name)
label_surprised.append(5)
print('Already!!',len(label_angry))
image_list = np.hstack((angry,disgusted,fearful,happy,sadness,surprised))
label_list = np.hstack((label_angry,label_disgusted,label_fearful,label_happy,label_sadness,label_surprised))
temp = np.array([image_list,label_list])
temp = temp.transpose()
np.random.shuffle(temp)
image_list = list(temp[:, 0])
label_list = list(temp[:, 1])
label_list = [int(i) for i in label_list]
return image_list, label_list
def get_batch(image,label,image_W,image_H,batch_size,capacity):
'''
Args:
image: list type
label: list type
image_W: image width
image_H: image height
batch_size: batch size
capacity: the maximum elements in queue
Returns:
image_batch: 4D tensor [batch_size, width, height, 3], dtype=tf.float32
label_batch: 1D tensor [batch_size], dtype=tf.int32
'''
image = tf.cast(image,tf.string)
label = tf.cast(label,tf.int32)
input_queue = tf.train.slice_input_producer([image,label])
label = input_queue[1]
image_contents = tf.read_file(input_queue[0])
image = tf.image.decode_jpeg(image_contents,channels=3)
image = tf.image.resize_image_with_crop_or_pad(image,image_W,image_H)
image = tf.image.per_image_standardization(image)
image_batch,label_batch = tf.train.batch([image,label],
batch_size = batch_size,
num_threads = 64,
capacity = capacity)
label_batch = tf.reshape(label_batch,[batch_size])
image_batch = tf.cast(image_batch,tf.float32)
return image_batch,label_batch
import os
import numpy as np
import tensorflow as tf
import input_data
import model
#%%
N_CLASSES = 6
IMG_W = 256 # resize the image, if the input image is too large, training will be very slow.
IMG_H = 256
BATCH_SIZE = 8
CAPACITY = 10000
MAX_STEP = 800 # with current parameters, it is suggested to use MAX_STEP>10k
learning_rate = 0.0001 # with current parameters, it is suggested to use learning rate<0.0001
#%%
def run_training():
# you need to change the directories to yours.
s_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+YuanTu'
T_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+/CK+X_mid'
logs_train_dir = '/home/hrz/projects/tensorflow/emotion/ck+'
s_train, s_train_label = input_data.get_files(s_train_dir)
s_train_batch, s_train_label_batch = input_data.get_batch(s_train,
s_train_label,
IMG_W,
IMG_H,
BATCH_SIZE,
CAPACITY)
T_train, T_train_label = input_data.get_files(T_train_dir)
T_train_batch, T_train_label_batch = input_data.get_batch(T_train,
T_train_label,
IMG_W,
IMG_H,
BATCH_SIZE,
CAPACITY)
train_logits = model.inference(s_train_batch,T_train_batch, BATCH_SIZE, N_CLASSES)
train_loss = model.losses(train_logits, s_train_label_batch)
train_op = model.trainning(train_loss, learning_rate)
train__acc = model.evaluation(train_logits, s_train_label_batch)
summary_op = tf.summary.merge_all() #汇总操作
sess = tf.Session() #定义sess
train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) #
saver = tf.train.Saver() #保存操作
sess.run(tf.global_variables_initializer())#初始化所有变量
coord = tf.train.Coordinator() #设置多线程协调器
threads = tf.train.start_queue_runners(sess=sess, coord=coord) #开始Queue Runners(队列运行器)
#开始训练过程
try:
for step in np.arange(MAX_STEP):
if coord.should_stop():
break
_, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])
if step % 50 == 0:
print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))
#运行汇总操作,写入汇总
summary_str = sess.run(summary_op)
train_writer.add_summary(summary_str, step)
if step % 800 == 0 or (step + 1) == MAX_STEP:
#保存当前模型和权重到 logs_train_dir,global_step为当前迭代次数
checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
sess.close()
run_training()
上面就是训练的过程比较简单易懂,不再赘述,如果有问题可以问我,还有test.py没有贴出了,一并放在我的GitHub中,地址https://github.com/zhuzhuxia1994/CK-TensorFlow