cublas。
官方指南:https://docs.nvidia.com/cuda/cublas/
头文件 cublas_v2.h
官方例子:https://github.com/NVIDIA/CUDALibrarySamples/
cuDNN。
cuDNN官方指南:https://docs.nvidia.com/deeplearning/cudnn/developer-guide/index.html
入门例子:http://www.goldsborough.me/cuda/ml/cudnn/c++/2017/10/01/14-37-23-convolutions_with_cudnn/ (API已过时,看个思路就好)
cuBLAS library uses column-major storage, and 1-based indexing.
c++ 对接cublas需要把二维数组转置。一般c++的矩阵都是 0-based indexing,也就是row-major storage。可以用Sgeam的api,m * n 的矩阵,转置。
cublasSgeam(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, &alpha, in_addr, n, &beta, out_addr, m, out_addr, m)
但是,如果后面的操作带cublasOperation_t的对输入的操作的话,不用单独先转置。
比如,可以直接调用matmul:
const int lda = k;
const int ldb = n;
const int ldc = m;
cublasGemmEx(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T, m, n, k, &alpha, a_addr, type_a, lda, b_addr, type_b, ldb, &beta, c_addr, type_c, ldc, type_compute, CUBLAS_GEMM_DEFAULT_TENSOR_OP))
const int lda = (trans_a == CUBLAS_OP_N) ? k : m;
const int ldb = (trans_b == CUBLAS_OP_N) ? n : k;
const int ldc = n;
cublasGemmEx(cublas_handle, trans_b, trans_a, n, m, k, &alpha, b_addr, type_b, ldb, a_addr, type_a, lda, &beta, c_addr, type_c, ldc, type_compute, CUBLAS_GEMM_DEFAULT_TENSOR_OP)
cudnnConvolutionForward API
// kernel_descriptor_ 和convolution_descriptor_ 可以重复使用,对于动态batchsize的算子,这俩永远固定的
CUDNN_CHECK(cudnnCreateFilterDescriptor(&kernel_descriptor_));
CUDNN_CHECK(cudnnSetFilter4dDescriptor(kernel_descriptor_, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NHWC, n, k, h, w));
CUDNN_CHECK(cudnnCreateConvolutionDescriptor(&convolution_descriptor_));
CUDNN_CHECK(cudnnSetConvolution2dDescriptor(convolution_descriptor_, pad_h, pad_w, u, v, dilation_h, dilation_w, CUDNN_CONVOLUTION, CUDNN_DATA_FLOAT));
// 因为动态batchsize,只有runtime才能知道具体的dim信息,需要重新set descriptor
cudnnTensorDescriptor_t runtime_input_descriptor{nullptr};
cudnnTensorDescriptor_t runtime_output_descriptor{nullptr};
CUDNN_CHECK(cudnnCreateTensorDescriptor(&runtime_input_descriptor));
CUDNN_CHECK(cudnnCreateTensorDescriptor(&runtime_output_descriptor));
CUDNN_CHECK(
cudnnSetTensor4dDescriptor(runtime_input_descriptor, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, n, c, h, w));
CUDNN_CHECK(
cudnnSetTensor4dDescriptor(runtime_output_descriptor, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, n, c, h, w));
int returned_algo_count = 0;
// 1是因为只取最优的算法即可
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm_v7(cudnn_handle_, runtime_input_descriptor, kernel_descriptor_,
convolution_descriptor_, runtime_output_descriptor, 1,
&returned_algo_count, cudnn_algos_));
// cudnn_algos_[0].memory 最优算法需要的workspace
// cudnn_algos_[0].alog 最优算法
// 申请workspace的内存
CUDA_CHECK(cudaMalloc(&workspace_run_, cudnn_algos_[0].memory));
// 两个卷积推理api:
// 只卷积
CUDNN_CHECK(cudnnConvolutionForward(cudnn_handle_, &alpha, runtime_input_descriptor, inputs[0], kernel_descriptor_,
inputs[1], convolution_descriptor_, cudnn_algos_[0].algo, current_workspace,
workspace_bytes_, &beta, runtime_output_descriptor, outputs[0]));
// 卷积 + biasadd + activation
CUDNN_CHECK(cudnnConvolutionBiasActivationForward(
cudnn_handle_, &alpha, runtime_input_descriptor, inputs[0], kernel_descriptor_, inputs[1],
convolution_descriptor_, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM, current_workspace, workspace_bytes_,
&beta, runtime_output_descriptor, outputs[0], bias_descriptor_, inputs[kBiasIndex], cudnn_activation_
/*activation*/, runtime_output_descriptor, outputs[0]));
// 注意,各种descriptor都需要destroy,cudnnDestroyTensorDescriptor,cudnnDestroyFilterDescriptor,cudnnDestroyConvolutionDescriptor