[机器学习]利用TensorFlow训练一个简单的神经网络

利用TensorFlow训练一个简单的神经网络

我们在这里利用TensorFlow的Eager Execution 来构建模型,这样不用像以前一样创建Graph和Session了,可以使神经网络的训练更加方便快捷,下面以Iris数据集为例来训练一个神经网络,代码来自谷歌的教程。

#先导入相关的库
from __future__ import absolute_import,division,print_function
import os
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.contrib.eager as tfe

tf.enable_eager_execution() #采用eager_execution
#查看版本信息并检查采用eager_execution是否打开
print('TensorFlow Version:{}'.format(tf.VERSION))
print('Eager execution:{}'.format(tf.executing_eagerly()))

TensorFlow Version:1.8.0
Eager execution:True

#获取数据集并显示在本地的保存位置
train_dataset_url='http://download.tensorflow.org/data/iris_training.csv'
train_dataset_fp=tf.keras.utils.get_file(fname=os.path.basename(train_dataset_url),origin=train_dataset_url)
print('Local copy of the dataset file:{}'.format(train_dataset_fp))

Downloading data from http://download.tensorflow.org/data/iris_training.csv
8192/2194 [================================================================================================================] - 0s 0us/step
Local copy of the dataset file:C:\Users\Frank.keras\datasets\iris_training.csv

#对表格文件中的每一行进行解析,每行有5个元素,前面4个是特征,最后一个是标记
def parse_csv(line):
    example_defaults=[[0.],[0.],[0.],[0.],[0]]
    parsed_line=tf.decode_csv(line,example_defaults)
    features=tf.reshape(parsed_line[:-1],shape=(4,))
    label=tf.reshape(parsed_line[-1],shape=())
    return features,label

train_dataset=tf.data.TextLineDataset(train_dataset_fp) #读取csv转换为dataset
train_dataset=train_dataset.skip(1) #跳过标题行
train_dataset=train_dataset.map(parse_csv)  #对每一行都进行映射
train_dataset=train_dataset.shuffle(buffer_size=1000)   #随机打乱
train_dataset=train_dataset.batch(32)   #分批

#打印一组训练数据
features,label=iter(train_dataset).next()
print('example features:',features[0])
print('example label:',label[0])

example features: tf.Tensor([6.8 3. 5.5 2.1], shape=(4,), dtype=float32)
example label: tf.Tensor(2, shape=(), dtype=int32)

#建立神经网络模型,两个隐藏层
model=tf.keras.Sequential([
    tf.keras.layers.Dense(10,activation='relu',input_shape=(4,)),
    tf.keras.layers.Dense(10,activation='relu'),
    tf.keras.layers.Dense(3)
])

#定义损失函数为softmax后的cross_entropy,返回一个损失函数的对象
def loss(model,x,y):
    y_=model(x)
    return tf.losses.sparse_softmax_cross_entropy(labels=y,logits=y_)

#返回一个梯度对象
def grad(model,inputs,targets):
    with tf.GradientTape() as tape:
        loss_value=loss(model,inputs,targets)
    return tape.gradient(loss_value,model.variables)    #返回梯度对象,传入损失函数和优化对象作为构造函数的参数

optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01)

train_loss_results=[]
train_accuracy_results=[]
num_epochs=201

#优化过程迭代201次
for epoch in range(num_epochs):
    epoch_loss_avg=tfe.metrics.Mean()   #交叉熵的平均误差对象
    epoch_accuracy=tfe.metrics.Accuracy()   #准确率对象

    for x,y in train_dataset:
        grads=grad(model,x,y)
        optimizer.apply_gradients(zip(grads, model.variables),  #将梯度对应的模型变量分组
                              global_step=tf.train.get_or_create_global_step())
        epoch_loss_avg(loss(model,x,y))
        epoch_accuracy(tf.argmax(model(x),axis=1,output_type=tf.int32),y)
    train_loss_results.append(epoch_loss_avg.result())
    train_accuracy_results.append(epoch_accuracy.result())
    if epoch % 50 == 0:
        print("Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format(epoch,epoch_loss_avg.result(),epoch_accuracy.result()

Epoch 000: Loss: 1.217, Accuracy: 30.833%
Epoch 050: Loss: 0.524, Accuracy: 93.333%
Epoch 100: Loss: 0.261, Accuracy: 96.667%
Epoch 150: Loss: 0.169, Accuracy: 97.500%
Epoch 200: Loss: 0.133, Accuracy: 97.500%

#训练过程损失函数和准确率的可视化                                                               
fig,axes=plt.subplots(2,sharex=True,figsize=(12,8))
fig.suptitle('Training Metrics')
axes[0].set_ylabel("Loss", fontsize=14)
axes[0].plot(train_loss_results)

axes[1].set_ylabel("Accuracy", fontsize=14)
axes[1].set_xlabel("Epoch", fontsize=14)
axes[1].plot(train_accuracy_results)

plt.show()

[机器学习]利用TensorFlow训练一个简单的神经网络_第1张图片

#在测试集上测试模型表现
test_url = "http://download.tensorflow.org/data/iris_test.csv"

test_fp = tf.keras.utils.get_file(fname=os.path.basename(test_url),
                                  origin=test_url)

test_dataset = tf.data.TextLineDataset(test_fp)
test_dataset = test_dataset.skip(1)
test_dataset = test_dataset.map(parse_csv)
test_dataset = test_dataset.shuffle(1000)
test_dataset = test_dataset.batch(32)

test_accuracy = tfe.metrics.Accuracy()

for (x, y) in test_dataset:
  prediction = tf.argmax(model(x), axis=1, output_type=tf.int32)
  test_accuracy(prediction, y)

print("Test set accuracy: {:.3%}".format(test_accuracy.result()))

Test set accuracy: 100.000%

#使用模型来进行预测
class_ids = ["Iris setosa", "Iris versicolor", "Iris virginica"]
predict_dataset = tf.convert_to_tensor([
    [5.1, 3.3, 1.7, 0.5,],
    [5.9, 3.0, 4.2, 1.5,],
    [6.9, 3.1, 5.4, 2.1]
])

predictions = model(predict_dataset)

for i, logits in enumerate(predictions):
  class_idx = tf.argmax(logits).numpy()
  name = class_ids[class_idx]
  print("Example {} prediction: {}".format(i, name))

Example 0 prediction: Iris setosa
Example 1 prediction: Iris versicolor
Example 2 prediction: Iris virginica

你可能感兴趣的:(machine,learning)