一个用于验证在GPU上训练模型比在CPU上快的代码||TensorFlow||神经网络

import time
import tensorflow as tf
from keras import layers

# 创建一个大规模模型
model = tf.keras.Sequential()
model.add(layers.Dense(1000, activation='relu', input_shape=(10000,)))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(500, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(10, activation='softmax'))

# 生成随机数据
x_train = tf.random.normal((10000, 10000))
y_train = tf.random.normal((10000, 10))


# 在CPU上训练模型
with tf.device("/CPU:0"):
    print("Training on CPU...")
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    start_time = time.time()
    model.fit(x_train, y_train, epochs=10, batch_size=64)
    end_time = time.time()
    print("Total time with CPU: {:.2f} seconds".format(end_time - start_time))


# 在GPU上训练模型
with tf.device("/GPU:0"):
    print("Training on GPU...")
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    start_time = time.time()
    model.fit(x_train, y_train, epochs=10, batch_size=64)
    end_time = time.time()
    print("Total time with GPU: {:.2f} seconds".format(end_time - start_time))

注:需要安装并配置好了适当的GPU驱动和CUDA环境。

我的环境:TensorFlow2.10.0;python3.10;GPU:RTX3050;CUDA:11.7;cuDNN:v8.5.0;NVIDIA-SMI 517.20

你可能感兴趣的:(机器学习,tensorflow,人工智能,python)