dlib是一个C++编写的工具集,相比于深度学习库而言,dlib内部更多的是封装了很多传统机器学习计算函数,例如回归分析、支撑向量机、聚类,开箱即用,对外提供了C++和python两种接口。
本文通过一个C++调用dlib的demo来体验一下dlib这个强大的工具库。
从官网dlib官网或者github地址dlib源码下载最新源码
这里用的是dlib-19.17
支持windows和linux双平台编译
为了避免引入过多的依赖,解压源码后,打开dlib-19.17/dlib/CMakeLists.txt找到以下几行进行修改
if (DLIB_ISO_CPP_ONLY)
option(DLIB_JPEG_SUPPORT ${DLIB_JPEG_SUPPORT_STR} OFF)
option(DLIB_LINK_WITH_SQLITE3 ${DLIB_LINK_WITH_SQLITE3_STR} OFF)
option(DLIB_USE_BLAS ${DLIB_USE_BLAS_STR} OFF)
option(DLIB_USE_LAPACK ${DLIB_USE_LAPACK_STR} OFF)
option(DLIB_USE_CUDA ${DLIB_USE_CUDA_STR} OFF)
option(DLIB_PNG_SUPPORT ${DLIB_PNG_SUPPORT_STR} OFF)
option(DLIB_GIF_SUPPORT ${DLIB_GIF_SUPPORT_STR} OFF)
#option(DLIB_USE_FFTW ${DLIB_USE_FFTW_STR} OFF)
option(DLIB_USE_MKL_FFT ${DLIB_USE_MKL_FFT_STR} OFF)
else()
option(DLIB_JPEG_SUPPORT ${DLIB_JPEG_SUPPORT_STR} ON)
option(DLIB_LINK_WITH_SQLITE3 ${DLIB_LINK_WITH_SQLITE3_STR} ON)
option(DLIB_USE_BLAS ${DLIB_USE_BLAS_STR} ON)
option(DLIB_USE_LAPACK ${DLIB_USE_LAPACK_STR} ON)
option(DLIB_USE_CUDA ${DLIB_USE_CUDA_STR} ON)
option(DLIB_PNG_SUPPORT ${DLIB_PNG_SUPPORT_STR} ON)
option(DLIB_GIF_SUPPORT ${DLIB_GIF_SUPPORT_STR} ON)
#option(DLIB_USE_FFTW ${DLIB_USE_FFTW_STR} ON)
option(DLIB_USE_MKL_FFT ${DLIB_USE_MKL_FFT_STR} ON)
endif()
改为
if (DLIB_ISO_CPP_ONLY)
option(DLIB_JPEG_SUPPORT ${DLIB_JPEG_SUPPORT_STR} OFF)
option(DLIB_LINK_WITH_SQLITE3 ${DLIB_LINK_WITH_SQLITE3_STR} OFF)
option(DLIB_USE_BLAS ${DLIB_USE_BLAS_STR} OFF)
option(DLIB_USE_LAPACK ${DLIB_USE_LAPACK_STR} OFF)
option(DLIB_USE_CUDA ${DLIB_USE_CUDA_STR} OFF)
option(DLIB_PNG_SUPPORT ${DLIB_PNG_SUPPORT_STR} OFF)
option(DLIB_GIF_SUPPORT ${DLIB_GIF_SUPPORT_STR} OFF)
#option(DLIB_USE_FFTW ${DLIB_USE_FFTW_STR} OFF)
option(DLIB_USE_MKL_FFT ${DLIB_USE_MKL_FFT_STR} OFF)
else()
option(DLIB_JPEG_SUPPORT ${DLIB_JPEG_SUPPORT_STR} ON)
option(DLIB_LINK_WITH_SQLITE3 ${DLIB_LINK_WITH_SQLITE3_STR} ON)
option(DLIB_USE_BLAS ${DLIB_USE_BLAS_STR} OFF)
option(DLIB_USE_LAPACK ${DLIB_USE_LAPACK_STR} OFF)
option(DLIB_USE_CUDA ${DLIB_USE_CUDA_STR} OFF)
option(DLIB_PNG_SUPPORT ${DLIB_PNG_SUPPORT_STR} ON)
option(DLIB_GIF_SUPPORT ${DLIB_GIF_SUPPORT_STR} ON)
#option(DLIB_USE_FFTW ${DLIB_USE_FFTW_STR} ON)
option(DLIB_USE_MKL_FFT ${DLIB_USE_MKL_FFT_STR} OFF)
endif()
显式地禁止使用blas,lapack,cuda和mkl依赖
另外,确保编译器能支持C++11全部特性
1,用cmake编译静态库
要用release版,计算效率快
windows下推荐用官网的命令来编译,也可以用cmake的gui工具
cd dlib-19.17
mkdir build_x64
cd build_x64
cmake -G "Visual Studio 14 2015 Win64" -T host=x64 ..
cmake --build . --config Release
会在build/dlib/Release目录生成静态链接库dlib19.17.0_release_64bit_msvc1900.lib,将其改名为dlib.lib
2,替换config.h
这一步非常重要,解决调用dlib时USER_ERROR__inconsistent_build_configuration__see_dlib_faq_2这个报错
需要将build/dlib/config.h文件拷贝到源码目录dlib-19.17/dlib进行覆盖
1,用cmake编译静态库
要用release版,计算效率快
cd dlib-19.17
mkdir build
cd build
cmake ..
cmake --build . --config Release
会在build/dlib/Release目录生成静态链接库libdlib.a
2,替换config.h
这一步非常重要,解决调用dlib时USER_ERROR__inconsistent_build_configuration__see_dlib_faq_2这个报错
需要将build/dlib/config.h文件拷贝到源码目录dlib-19.17/dlib进行覆盖
创建项目,用cmake构建跨平台项目
目录结构
dlib_test
├── CMakeLists.txt
└── src
└── main.cpp
其中
CMakeLists.txt
用cmake编译,运行,注意demo程序也是采用64位release模式进行编译运行
project(dlib_test)
cmake_minimum_required(VERSION 2.8)
add_definitions(-std=c++11)
if (UNIX)
include_directories(
/home/user/codetest/dlib-19.17
)
else()
include_directories(
D:/Programs/dlib-19.17
)
endif()
aux_source_directory(./src DIR_SRCS)
if (UNIX)
link_directories(
/home/user/codetest/dlib-19.17/build/dlib
)
else()
link_directories(
D:/Programs/dlib-19.17/build_x64/dlib/Release
)
endif()
add_executable(dlib_test ${DIR_SRCS})
target_link_libraries(dlib_test dlib)
main.cpp
#include
#include "dlib/svm.h"
using namespace std;
using namespace dlib;
int main()
{
typedef matrix<double, 2, 1> sample_type;
typedef radial_basis_kernel<sample_type> kernel_type;
// Now we make objects to contain our samples and their respective labels.
std::vector<sample_type> samples;
std::vector<double> labels;
// Now let's put some data into our samples and labels objects. We do this
// by looping over a bunch of points and labeling them according to their
// distance from the origin.
for (int r = -20; r <= 20; ++r)
{
for (int c = -20; c <= 20; ++c)
{
sample_type samp;
samp(0) = r;
samp(1) = c;
samples.push_back(samp);
// if this point is less than 10 from the origin
if (sqrt((double)r*r + c*c) <= 10)
labels.push_back(+1);
else
labels.push_back(-1);
}
}
vector_normalizer<sample_type> normalizer;
// Let the normalizer learn the mean and standard deviation of the samples.
normalizer.train(samples);
// now normalize each sample
for (unsigned long i = 0; i < samples.size(); ++i)
samples[i] = normalizer(samples[i]);
randomize_samples(samples, labels);
// here we make an instance of the svm_c_trainer object that uses our kernel
// type.
svm_c_trainer<kernel_type> trainer;
cout << "doing cross validation" << endl;
for (double gamma = 0.00001; gamma <= 1; gamma *= 5)
{
for (double C = 1; C < 100000; C *= 5)
{
// tell the trainer the parameters we want to use
trainer.set_kernel(kernel_type(gamma));
trainer.set_c(C);
cout << "gamma: " << gamma << " C: " << C;
// Print out the cross validation accuracy for 3-fold cross validation using
// the current gamma and C. cross_validate_trainer() returns a row vector.
// The first element of the vector is the fraction of +1 training examples
// correctly classified and the second number is the fraction of -1 training
// examples correctly classified.
cout << " cross validation accuracy: "
<< cross_validate_trainer(trainer, samples, labels, 3);
}
}
trainer.set_kernel(kernel_type(0.15625));
trainer.set_c(5);
typedef decision_function<kernel_type> dec_funct_type;
typedef normalized_function<dec_funct_type> funct_type;
// Here we are making an instance of the normalized_function object. This
// object provides a convenient way to store the vector normalization
// information along with the decision function we are going to learn.
funct_type learned_function;
learned_function.normalizer = normalizer; // save normalization information
learned_function.function = trainer.train(samples, labels); // perform the actual SVM training and save the results
// print out the number of support vectors in the resulting decision function
cout << "\nnumber of support vectors in our learned_function is "
<< learned_function.function.basis_vectors.size() << endl;
// Now let's try this decision_function on some samples we haven't seen before.
sample_type sample;
sample(0) = 3.123;
sample(1) = 2;
cout << "This is a +1 class example, the classifier output is " << learned_function(sample) << endl;
sample(0) = 3.123;
sample(1) = 9.3545;
cout << "This is a +1 class example, the classifier output is " << learned_function(sample) << endl;
sample(0) = 13.123;
sample(1) = 9.3545;
cout << "This is a -1 class example, the classifier output is " << learned_function(sample) << endl;
sample(0) = 13.123;
sample(1) = 0;
cout << "This is a -1 class example, the classifier output is " << learned_function(sample) << endl;
// We can also train a decision function that reports a well conditioned
// probability instead of just a number > 0 for the +1 class and < 0 for the
// -1 class. An example of doing that follows:
typedef probabilistic_decision_function<kernel_type> probabilistic_funct_type;
typedef normalized_function<probabilistic_funct_type> pfunct_type;
pfunct_type learned_pfunct;
learned_pfunct.normalizer = normalizer;
learned_pfunct.function = train_probabilistic_decision_function(trainer, samples, labels, 3);
// Now we have a function that returns the probability that a given sample is of the +1 class.
// print out the number of support vectors in the resulting decision function.
// (it should be the same as in the one above)
cout << "\nnumber of support vectors in our learned_pfunct is "
<< learned_pfunct.function.decision_funct.basis_vectors.size() << endl;
sample(0) = 3.123;
sample(1) = 2;
cout << "This +1 class example should have high probability. Its probability is: "
<< learned_pfunct(sample) << endl;
sample(0) = 3.123;
sample(1) = 9.3545;
cout << "This +1 class example should have high probability. Its probability is: "
<< learned_pfunct(sample) << endl;
sample(0) = 13.123;
sample(1) = 9.3545;
cout << "This -1 class example should have low probability. Its probability is: "
<< learned_pfunct(sample) << endl;
sample(0) = 13.123;
sample(1) = 0;
cout << "This -1 class example should have low probability. Its probability is: "
<< learned_pfunct(sample) << endl;
serialize("saved_function.dat") << learned_pfunct;
// Now let's open that file back up and load the function object it contains.
deserialize("saved_function.dat") >> learned_pfunct;
cout << "\ncross validation accuracy with only 10 support vectors: "
<< cross_validate_trainer(reduced2(trainer, 10), samples, labels, 3);
// Let's print out the original cross validation score too for comparison.
cout << "cross validation accuracy with all the original support vectors: "
<< cross_validate_trainer(trainer, samples, labels, 3);
// When you run this program you should see that, for this problem, you can
// reduce the number of basis vectors down to 10 without hurting the cross
// validation accuracy.
// To get the reduced decision function out we would just do this:
learned_function.function = reduced2(trainer, 10).train(samples, labels);
// And similarly for the probabilistic_decision_function:
learned_pfunct.function = train_probabilistic_decision_function(reduced2(trainer, 10), samples, labels, 3);
return 0;
}
跑一个简单的svm的例子,借鉴自官方的example里面svm_c_ex.cpp
运行结果
doing cross validation
gamma: 1e-05 C: 1 cross validation accuracy: 0 1
gamma: 1e-05 C: 5 cross validation accuracy: 0 1
gamma: 1e-05 C: 25 cross validation accuracy: 0 1
gamma: 1e-05 C: 125 cross validation accuracy: 0 1
gamma: 1e-05 C: 625 cross validation accuracy: 0 1
gamma: 1e-05 C: 3125 cross validation accuracy: 0 1
gamma: 1e-05 C: 15625 cross validation accuracy: 0 1
gamma: 1e-05 C: 78125 cross validation accuracy: 0 1
gamma: 5e-05 C: 1 cross validation accuracy: 0 1
gamma: 5e-05 C: 5 cross validation accuracy: 0 1
gamma: 5e-05 C: 25 cross validation accuracy: 0 1
gamma: 5e-05 C: 125 cross validation accuracy: 0 1
gamma: 5e-05 C: 625 cross validation accuracy: 0 1
gamma: 5e-05 C: 3125 cross validation accuracy: 0 1
gamma: 5e-05 C: 15625 cross validation accuracy: 0 1
gamma: 5e-05 C: 78125 cross validation accuracy: 0 1
gamma: 0.00025 C: 1 cross validation accuracy: 0 1
gamma: 0.00025 C: 5 cross validation accuracy: 0 1
gamma: 0.00025 C: 25 cross validation accuracy: 0 1
gamma: 0.00025 C: 125 cross validation accuracy: 0 1
gamma: 0.00025 C: 625 cross validation accuracy: 0 1
gamma: 0.00025 C: 3125 cross validation accuracy: 0 1
gamma: 0.00025 C: 15625 cross validation accuracy: 0 1
gamma: 0.00025 C: 78125 cross validation accuracy: 0.990476 0.991189
gamma: 0.00125 C: 1 cross validation accuracy: 0 1
gamma: 0.00125 C: 5 cross validation accuracy: 0 1
gamma: 0.00125 C: 25 cross validation accuracy: 0 1
gamma: 0.00125 C: 125 cross validation accuracy: 0 1
gamma: 0.00125 C: 625 cross validation accuracy: 0 1
gamma: 0.00125 C: 3125 cross validation accuracy: 0.980952 0.994126
gamma: 0.00125 C: 15625 cross validation accuracy: 0.980952 0.991924
gamma: 0.00125 C: 78125 cross validation accuracy: 0.984127 0.99486
gamma: 0.00625 C: 1 cross validation accuracy: 0 1
gamma: 0.00625 C: 5 cross validation accuracy: 0 1
gamma: 0.00625 C: 25 cross validation accuracy: 0 1
gamma: 0.00625 C: 125 cross validation accuracy: 0.980952 0.99486
gamma: 0.00625 C: 625 cross validation accuracy: 0.980952 0.991924
gamma: 0.00625 C: 3125 cross validation accuracy: 0.980952 0.995595
gamma: 0.00625 C: 15625 cross validation accuracy: 0.987302 0.994126
gamma: 0.00625 C: 78125 cross validation accuracy: 0.990476 0.99486
gamma: 0.03125 C: 1 cross validation accuracy: 0 1
gamma: 0.03125 C: 5 cross validation accuracy: 0.971429 0.996329
gamma: 0.03125 C: 25 cross validation accuracy: 0.974603 0.992658
gamma: 0.03125 C: 125 cross validation accuracy: 0.980952 0.996329
gamma: 0.03125 C: 625 cross validation accuracy: 0.987302 0.99486
gamma: 0.03125 C: 3125 cross validation accuracy: 0.990476 0.99486
gamma: 0.03125 C: 15625 cross validation accuracy: 0.95873 0.995595
gamma: 0.03125 C: 78125 cross validation accuracy: 0.996825 0.995595
gamma: 0.15625 C: 1 cross validation accuracy: 0.952381 0.998532
gamma: 0.15625 C: 5 cross validation accuracy: 0.993651 0.996329
gamma: 0.15625 C: 25 cross validation accuracy: 0.990476 0.995595
gamma: 0.15625 C: 125 cross validation accuracy: 0.980952 0.99486
gamma: 0.15625 C: 625 cross validation accuracy: 0.949206 0.997797
gamma: 0.15625 C: 3125 cross validation accuracy: 0.993651 0.998532
gamma: 0.15625 C: 15625 cross validation accuracy: 0.987302 1
gamma: 0.15625 C: 78125 cross validation accuracy: 0.990476 0.997797
gamma: 0.78125 C: 1 cross validation accuracy: 0.952381 0.997797
gamma: 0.78125 C: 5 cross validation accuracy: 0.974603 0.997797
gamma: 0.78125 C: 25 cross validation accuracy: 0.974603 1
gamma: 0.78125 C: 125 cross validation accuracy: 0.984127 1
gamma: 0.78125 C: 625 cross validation accuracy: 0.987302 1
gamma: 0.78125 C: 3125 cross validation accuracy: 0.987302 1
gamma: 0.78125 C: 15625 cross validation accuracy: 0.987302 0.997797
gamma: 0.78125 C: 78125 cross validation accuracy: 0.980952 0.998532
number of support vectors in our learned_function is 209
This is a +1 class example, the classifier output is 2.71477
This is a +1 class example, the classifier output is -0.0102314
This is a -1 class example, the classifier output is -4.36211
This is a -1 class example, the classifier output is -2.16552
number of support vectors in our learned_pfunct is 209
This +1 class example should have high probability. Its probability is: 1
This +1 class example should have high probability. Its probability is: 0.465781
This -1 class example should have low probability. Its probability is: 3.05246e-11
This -1 class example should have low probability. Its probability is: 5.78323e-06
cross validation accuracy with only 10 support vectors: 0.993651 0.99486
cross validation accuracy with all the original support vectors: 0.993651 0.996329