在Windows下安装tensorflow-gpu,可参考windows+tensorflow-gpu+anaconda3+cuda8.0+cudnn安装指南https://blog.csdn.net/hdd0411/article/details/71305931?locationNum=8&fps=1。
2.1具体调用指南:
新建vs2013工程,复制anaconda3文件夹下的libs和include文件拷贝到.sln目录下,并添加到工程属性包含目录和库目录下;复制libs文件夹下的python36.lib,命名为python36_d.lib,添加到属性-》连接器中;复制anaconda3文件夹下的python36.dll文件放在debug文件下,使.exe,python36.dll在同一个debug文件夹中。
C++代码:(调用表情识别模型)
DWORD WINAPI testImage(LPVOID lParam)
{
char msg[256] = "11111 ";
PyObject* pFunc = NULL;
PyObject* pArg = NULL;
PyObject* module = NULL;
Py_Initialize();
module = PyImport_ImportModule("predictd");//myModel:Python文件名
if (!module) {
printf("cannot open module!");
//Py_Finalize();
}
pFunc = PyObject_GetAttrString(module, "test_one_image");//test_one_image:Python文件中的函数名
if (!pFunc) {
printf("cannot open FUNC!");
//Py_Finalize();
}
//开始调用model
pArg = Py_BuildValue("(s)", "00028.jpg");
if (module != NULL) {
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
PyEval_CallObject(pFunc, pArg);
PyGILState_Release(gstate);
}
return 0;
}
int main()
{
cout << "创建线程" << endl;
CreateThread(NULL, 0, testImage, 0, 0, NULL);
cout << "创建成功" << endl;
system("pause");
return 0;
}
Python代码:建立predictd.py文件,将predictd.py文件和python36.dll文件放在同一个文件夹中。
import tensorflow as tf
import numpy as np
import os,glob,cv2
import sys,argparse
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# First, pass the path of the image
def test_one_image(test_dir):
stdout_backup = sys.stdout
log_file = open("log.txt", "w")
sys.stdout = log_file
print(test_dir)
image_size=96#跟face-expression-model.meta中的大小一致。
num_channels=3
images = []
# Reading the image using OpenCV
image = cv2.imread(test_dir)
print(image)
# Resizing the image to our desired size and preprocessing will be done exactly as done during training
image = cv2.resize(image, (image_size, image_size), 0, 0, cv2.INTER_LINEAR)
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = np.multiply(images, 1.0 / 255.0)
# The input to the network is of shape [None image_size image_size num_channels]. Hence we reshape.
x_batch = images.reshape(1, image_size, image_size, num_channels)
## Let us restore the saved model
sess = tf.Session()
# Step-1: Recreate the network graph. At this step only graph is created.
saver = tf.train.import_meta_graph('face-expression-model.meta')
# Step-2: Now let's load the weights saved using the restore method.
saver.restore(sess, tf.train.latest_checkpoint('./'))
# Accessing the default graph which we have restored
graph = tf.get_default_graph()
# Now, let's get hold of the op that we can be processed to get the output.
# In the original network y_pred is the tensor that is the prediction of the network
y_pred = graph.get_tensor_by_name('y_pred:0')
## Let's feed the images to the input placeholders
x = graph.get_tensor_by_name('x:0')
y_true = graph.get_tensor_by_name("y_true:0")
y_test_images = np.zeros((1, len(os.listdir('training_data'))))
### Creating the feed_dict that is required to be fed to calculate y_pred
feed_dict_testing = {x: x_batch, y_true: y_test_images}
result = sess.run(y_pred, feed_dict=feed_dict_testing)
# result is of this format [probabiliy_of_rose probability_of_sunflower]
print(result)
#print(feed_dict_testing)
log_file.close()
sys.stdout = stdout_backup
sess.close()
将result保存到log.txt文件中;
2、2读取log.txt中的结果值实现代码:
#include "stdafx.h"
#include "string.h"
#include "stdlib.h"
#include
int main(){
FILE *fp;
fp = fopen("data.txt","r");
float x;
char a[100] = "0";
char str1[100] = "0";
char ch;
int i,j,len;
double b[100] = {0};
for (i =j= 0; (ch = fgetc(fp)) != EOF; i++)
{
if (ch=='['||ch==']')
{
continue;
}
a[j] = ch;
j++;
}
printf("%s\n", a);
len = strlen(a);
int n = 0;
for ( i=j = 0; i <= len; i++)
{
if (a[i] != ' ')
{
str1[j++] = a[i];
}
else
{
str1[j] = 0;
if (j>0)
{
b[n++] = atof(str1);
}
j = 0;
}
}
if (j>0)
{
str1[j] = 0;
b[n++] = atof(str1);
}
for ( i = 0; i { printf("%lf\n", b[i]); } return 0; }