注意,本文并不使用nv预编译的包来安装,仅供参考:
NVIDIA Collective Communications Library (NCCL) | NVIDIA Developer
这里是nv开源的nccl源代码,功能完整,不需要有任何疑虑:
GitHub - NVIDIA/nccl: Optimized primitives for collective multi-GPU communication
这里是官方教程,本文示例是根据其中的example改写的:
Using NCCL — NCCL 2.19.3 documentation
git clone --recursive https://github.com/NVIDIA/nccl.git
cd nccl
make -j src.build
或者为了节省编译时间和硬盘空间,可以指定gpu的架构,以sm_70为例:
make -j src.build NVCC_GENCODE="-gencode=arch=compute_70,code=sm_70"
安装打包deb的工具:
sudo apt install build-essential devscripts debhelper fakeroot
打包:
make pkg.debian.build
ls build/pkg/deb/
其中,deb包的文件名中包含了cuda版本号,以自己生成的安装包的名字为准:
sudo dpkg -i build/pkg/deb/libnccl2_2.19.4-1+cuda12.1_amd64.deb
sudo dpkg -i build/pkg/deb/libnccl-dev_2.19.4-1+cuda12.1_amd64.deb
这里的示例是单机单线程多卡的示例,本文使用了双gpu显卡为例,即,在一个进程中迭代操作两个gpu 显卡,实现 allreduce操作, 四个 float vector, S0,S1,R0,R1,数学效果如下:
R0 = S0 + S1;R[0] = S0[0] + S1[0];
R1 = S0 + S1;R[0] = S0[0] + S1[0];
对应代码,其中 :
sendbuff[0] 是gpu-0 显存里边的要给vector,
sendbuff[1] 是gpu-1 显存里边的要给vector,
实现的数学效果为
显存 recvbuff[0] = sendbuff[0] + sendbuff[1];
显存 recvbuff[1] = sendbuff[0] + sendbuff[1];
#include
#include
#include "cuda_runtime.h"
#include "nccl.h"
#include
#include
#define CUDACHECK(cmd) do { \
cudaError_t err = cmd; \
if (err != cudaSuccess) { \
printf("Failed: Cuda error %s:%d '%s'\n", \
__FILE__,__LINE__,cudaGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
} while(0)
#define NCCLCHECK(cmd) do { \
ncclResult_t res = cmd; \
if (res != ncclSuccess) { \
printf("Failed, NCCL error %s:%d '%s'\n", \
__FILE__,__LINE__,ncclGetErrorString(res)); \
exit(EXIT_FAILURE); \
} \
} while(0)
void get_seed(long long &seed)
{
struct timeval tv;
gettimeofday(&tv, NULL);
seed = (long long)tv.tv_sec * 1000*1000 + tv.tv_usec;//only second and usecond;
printf("useconds:%lld\n", seed);
}
void init_vector(float* A, int n)
{
long long seed = 0;
get_seed(seed);
srand(seed);
for(int i=0; i
参考Makefile中的如下一条:
single_thread_allreduce: single_thread_allreduce.cpp
g++ -g $< -o $@ $(LD_FLAGS)
Makefile:
LD_FLAGS := -lnccl -L/usr/local/cuda/lib64 -lcudart -I/usr/local/cuda/include
MPI_FLAGS := -I /usr/lib/x86_64-linux-gnu/openmpi/include -L /usr/lib/x86_64-linux-gnu/openmpi/lib -lmpi -lmpi_cxx
EXE := single_thread_allreduce oneServer_multiDevice_multiThread mpi_test
all: $(EXE)
single_thread_allreduce: single_thread_allreduce.cpp
g++ -g $< -o $@ $(LD_FLAGS)
oneServer_multiDevice_multiThread: oneServer_multiDevice_multiThread.cpp
g++ -g $< -o $@ $(LD_FLAGS) $(MPI_FLAGS)
mpi_test: mpi_test.cpp
g++ -g $< -o $@ $(LD_FLAGS) $(MPI_FLAGS)
.PHONY: clean
clean:
-rm $(EXE)
这里没有使用 mpi,故可以直接编译运行
make && ./single_thread_allreduce
稍微注释一下上图:
实现了数学目标:
R0 = S0 + S1;R[0] = S0[0] + S1[0];
R1 = S0 + S1;R[0] = S0[0] + S1[0];
本例是