# modified from https://github.com/xfbs/docker-openpcdet
FROM nvidia/cuda:11.0.3-cudnn8-devel-ubuntu18.04
RUN gpg --keyserver keyserver.ubuntu.com --recv A4B469963BF863CC
RUN gpg --export --armor A4B469963BF863CC | apt-key add -
RUN apt update && apt-get install -y vim
RUN apt install -y python3.6 python3-pip apt-transport-https ca-certificates gnupg software-properties-common wget git ninja-build libboost-dev build-essential
RUN apt-get update && apt-get install -y ruby zlib1g zlib1g.dev libjpeg-dev
RUN pip3 install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
# Install CMake
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - > /etc/apt/trusted.gpg.d/kitware.gpg
RUN apt-add-repository 'deb https://apt.kitware.com/ubuntu/ bionic main'
RUN apt-get update && apt install -y cmake
# Install spconv
COPY spconv /code/spconv
WORKDIR /code/spconv
ENV SPCONV_FORCE_BUILD_CUDA=1
RUN git checkout v1.2.1
RUN pip3 install pccm
RUN python3 setup.py bdist_wheel
RUN pip3 install dist/*.whl
# Install LLVM 10
WORKDIR /code
RUN wget https://apt.llvm.org/llvm.sh && chmod +x llvm.sh && ./llvm.sh 10
# OpenPCDet dependencies fail to install unless LLVM 10 exists on the system
# and there is a llvm-config binary available, so we have to symlink it here.
RUN ln -s /usr/bin/llvm-config-10 /usr/bin/llvm-config
RUN pip3 install --upgrade pip
ARG TORCH_CUDA_ARCH_LIST="5.2 6.0 6.1 7.0 7.5+PTX 8.0"
# Install CenterPoint
COPY CenterPoint /code/CenterPoint-dev
WORKDIR /code/CenterPoint-dev
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 0
RUN pip3 install -r requirements.txt
RUN pip3 uninstall opencv-python --yes
RUN pip3 install opencv-python-headless
RUN bash setup.sh
RUN chmod -R +777 /code
WORKDIR /code/CenterPoint-dev
ENV PYTHONPATH "${PYTHONPATH}:/code/CenterPoint-dev"
dockerfile在CenterPoint根目录下
同时需要准备spconv在外面(与centerpoint并列)
docker build -t centerpoint:init -f Dockerfile ..
其他docker命令
#xxx为容器ID 后面repo
docker commit xxx autoware_fix_bug
#查看现在的容器
docker ps
#查看所有镜像 发现autoware_fix_bug
docker images
还可以删除镜像 xxx = id
docker rmi xxx
进入某容器 xxx为容器ID
docker exec -it xxx /tmp/entrypoint.sh
sudo docker run -it 14.14.14.100:5000/pytorch/pytorch:1.1.0-cuda10-py36 /bin/bash
其中, 14.14.14.100:5000/pytorch/pytorch:1.1.0-cuda10-py36 为 容器名+容器tag
参数 -t 让Docker分配一个伪终端并绑定在容器的标准输入上,-i 让容器的标准输入保持打开。
使用docker run命令来启动容器,docker在后台运行的标准操作包括
1.检查本地是否存在指定的镜像,不存在则从公有仓库下载
2.使用镜像创建并启动容器
3.分配一个文件系统,并在只读的镜像层外面挂载一层可读可写层
4.从宿主主机配置的网桥接口中桥接一个虚拟接口道容器中去
5.从地址池分配一个ip地址给容器
6.执行用户指定的应用程序
7.执行完毕之后容器被终止
具体命令:
docker commit 2ec7cf215b52 new_pytorch:v1
2ec7cf215b52 为容器的ID
new_pytorch 为保存成镜像的名
v1 为保存成镜像的标签
#前面时文件名 后面镜像
docker save -o /tmp/new_pytoch:v1.tar new_pytorch:v1
#-v 挂载目录和映射路径 xxxx为容器ID
docker run --gpus all -e NVIDIA_DRIVER_CAPABILITIES=compute,utility -e NVIDIA_VISIBLE_DEVICES=all -it -v /DataSet/nuscenes:/nuScenes xxxxxx
vscode安装docker和remote containers插件
安装完成后左下角启动remote window
启动Remote-Containers->Attach to Running Container...
编译bash setup.bash时出现错误, 因为架构太新
解决办法:
export TORCH_CUDA_ARCH_LIST="8.0"
注意切换成国内源否则特别慢
vim /etc/apt/sources.list
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-updates main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-backports main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ bionic-security main restricted universe multiverse