1.启用GPU的限制
# Manually load the required NVIDIA modules
sudo cat >> /etc/rc.modules <
2.启用GPU
sudo yum update -y
sudo reboot
sudo yum groupinstall -y "Development tools"
sudo yum install -y kernel-devel-`uname -r`
wget http://us.download.nvidia.com/.../NVIDIA-Linux-x86_64-.run
export NVIDIA_DRIVER_VERSION=
chmod 755 ./NVIDIA-Linux-x86_64-$NVIDIA_DRIVER_VERSION.run
./NVIDIA-Linux-x86_64-$NVIDIA_DRIVER_VERSION.run -asq
/usr/bin/nvidia-smi
wget https://github.com/NVIDIA/nvidia-docker/releases/download/v1.0.1/nvidia-docker-1.0.1-1.x86_64.rpm
sudo yum install -y nvidia-docker-1.0.1-1.x86_64.rpm
systemctl start nvidia-docker
systemctl enable nvidia-docker
sudo nvidia-docker run --rm nvidia/cuda:9.1 nvidia-smi
/var/lib/nvidia-docker/volumes/nvidia_driver/$NVIDIA_DRIVER_VERSION/
sudo docker run --net host \
--device=/dev/nvidiactl \
--device=/dev/nvidia-uvm \
--device=/dev/nvidia0 \
-v /var/lib/nvidia-docker/volumes/nvidia_driver/$NVIDIA_DRIVER_VERSION/:/usr/local/nvidia/ \
-it nvidia/cuda:9.1 \
/usr/local/nvidia/bin/nvidia-smi
cdsw restart
cdsw reset
cdsw join
cdsw status
FROM docker.repository.cloudera.com/cdsw/engine:4
RUN NVIDIA_GPGKEY_SUM=d1be581509378368edeec8c1eb2958702feedf3bc3d17011adbf24efacce4ab5 && \
NVIDIA_GPGKEY_FPR=ae09fe4bbd223a84b2ccfce3f60f4b3d7fa2af80 && \
apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/7fa2af80.pub && \
apt-key adv --export --no-emit-version -a $NVIDIA_GPGKEY_FPR | tail -n +5 > cudasign.pub && \
echo "$NVIDIA_GPGKEY_SUM cudasign.pub" | sha256sum -c --strict - && rm cudasign.pub && \
echo "deb http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64 /" > /etc/apt/sources.list.d/cuda.list
ENV CUDA_VERSION 8.0.61
LABEL com.nvidia.cuda.version="${CUDA_VERSION}"
ENV CUDA_PKG_VERSION 8-0=$CUDA_VERSION-1
RUN apt-get update && apt-get install -y --no-install-recommends \
cuda-nvrtc-$CUDA_PKG_VERSION \
cuda-nvgraph-$CUDA_PKG_VERSION \
cuda-cusolver-$CUDA_PKG_VERSION \
cuda-cublas-8-0=8.0.61.2-1 \
cuda-cufft-$CUDA_PKG_VERSION \
cuda-curand-$CUDA_PKG_VERSION \
cuda-cusparse-$CUDA_PKG_VERSION \
cuda-npp-$CUDA_PKG_VERSION \
cuda-cudart-$CUDA_PKG_VERSION && \
ln -s cuda-8.0 /usr/local/cuda && \
rm -rf /var/lib/apt/lists/*
RUN echo "/usr/local/cuda/lib64" >> /etc/ld.so.conf.d/cuda.conf && \
ldconfig
RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \
echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH}
ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64
RUN echo "deb http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list
ENV CUDNN_VERSION 6.0.21
LABEL com.nvidia.cudnn.version="${CUDNN_VERSION}"
RUN apt-get update && apt-get install -y --no-install-recommends \
libcudnn6=$CUDNN_VERSION-1+cuda8.0 && \
rm -rf /var/lib/apt/lists/*
docker build --network host -t /cdsw-cuda:2 . -f cuda.Dockerfile
docker push /cdsw-cuda:2
3.TensorFlow示例
!pip install tensorflow-gpu
!pip3 install tensorflow-gpu
import tensorflow as tf
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# Runs the operation.
print(sess.run(c))
# Prints a list of GPUs available
from tensorflow.python.client import device_lib
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
print get_available_gpus()
大数据视频推荐:
CSDN
大数据语音推荐:
企业级大数据技术应用
大数据机器学习案例之推荐系统
自然语言处理
大数据基础
人工智能:深度学习入门到精通