本文章通过一个示例展示了怎样将 CSV 格式的数据加载进 tf.data.Dataset
。使用的数据是Titanic passenger的数据。模型会根据passenger的age、gender、ticket class和whether the person was traveling alone等特征来预测乘客生还的可能性。
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import pandas as pd
TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv"
train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL)
test_file_path = tf.keras.utils.get_file("eval.csv", TEST_DATA_URL)
# 让 numpy 数据更易读。
# threshold=np.nan 控制台输出所有的值,不需要省略号
# precision=4 控制输出的小数点个数是4
# threshold=5 控制输出的值的个数为6,其余以...代替
# 当suppress=True,表示小数不需要以科学计数法的形式输出
np.set_printoptions(precision=3, suppress=True)
1. 首先打印csv文件的前几行
# 显示所有列
df = pd.read_csv(train_file_path)
# 显示所有行(参数设置为None代表显示所有行,也可以自行设置数字)
pd.set_option('display.max_columns',None)
# 显示所有列
pd.set_option('display.max_rows',None)
# 设置数据的显示长度,默认为50
pd.set_option('max_colwidth',200)
# 禁止自动换行(设置为Flase不自动换行,True反之)
pd.set_option('expand_frame_repr', False)
print(df.head())
survived sex age n_siblings_spouses parch fare class deck embark_town alone 0 0 male 22.0 1 0 7.2500 Third unknown Southampton n 1 1 female 38.0 1 0 71.2833 First C Cherbourg n 2 1 female 26.0 0 0 7.9250 Third unknown Southampton y 3 1 female 35.0 1 0 53.1000 First C Southampton n 4 0 male 28.0 0 0 8.4583 Third unknown Queenstown y
2. 使用tf.data.experimental.make_cvs_dataset()将加载csv文件,并转换为dataset对象
通过显示,可以看到CSV 文件的每列都会有一个列名。dataset 的构造函数会自动识别这些列名。如果文件的第一行不包含列名,那么需要将列名通过字符串列表传给 make_csv_dataset
函数的 column_names
参数。
CVS_COLUMN_NAMES = ["survived","sex","age","n_siblings_spouses","parch","fare","class","deck","embark_town","alone"]
# 将cvs文件加载成dataset
dataset = tf.data.experimental.make_cvs_dataset(...,column_names=CSV_COLUMN_NAMES,...)
如果我们要有选择地加载csv文件中地某些列作为训练地属性,可以使用make_cvs_dataset函数中地select_columns参数进行指定。
dataset = tf.data.experimental.make_csv_dataset(
...,
select_columns = columns_to_use,
...)
如果cvs文件列中包含我们预测地类别属性,需要使用make_cvs_dataset函数中地label_name进行指定。
3. 使用tf.data.experimental.make_cvs_dataset()加载Titanic passenger数据
LABEL_COLUMN = "survived"
LABELS = [0,1]
def get_datset(file_path,**kwargs):
dataset = tf.data.experimental.make_csv_dataset(train_file_path,
bath_size=5,
label_name=LABEL_COLUMN,
na_value="?",
num_epochs=1,
ignore_errors=True,
**kwargs)
return dataset
train_dataset = get_dataset(train_file_path)
test_dataset = get_dataset(test_file_path)
显示单个batch中数据
def show_batch(dataset):
for batch, label in dataset.take(1):
for key, value in batch.items():
print("{:20s}: {}".format(key,value.numpy()))
接下来,我们会对类别型数据和数值型数据进行处理。CSV 数据中的有些列是分类的列。也就是说,这些列只能在有限的集合中取值。
1. 类别型属性数据
CATEGORIES = {
'sex': ['male', 'female'],
'class' : ['First', 'Second', 'Third'],
'deck' : ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'],
'embark_town' : ['Cherbourg', 'Southhampton', 'Queenstown'],
'alone' : ['y', 'n']
}
使用 tf.feature_column
API 创建一个 tf.feature_column.indicator_column
集合,个 tf.feature_column.indicator_column
对应一个分类的列。
categorical_columns = []
for feature, vocab in CATEGORIES.items():
cat_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=feature, vocabulary_list=vocab)
categorical_columns.append(tf.feature_column.indicator_column(cat_col))
print(categorical_columns)
[ IndicatorColumn(categorical_column=VocabularyListCategoricalColumn(key='class', vocabulary_list=('First', 'Second', 'Third'), dtype=tf.string, default_value=-1, num_oov_buckets=0)), IndicatorColumn(categorical_column=VocabularyListCategoricalColumn(key='sex', vocabulary_list=('male', 'female'), dtype=tf.string, default_value=-1, num_oov_buckets=0)), IndicatorColumn(categorical_column=VocabularyListCategoricalColumn(key='alone', vocabulary_list=('y', 'n'), dtype=tf.string, default_value=-1, num_oov_buckets=0)), IndicatorColumn(categorical_column=VocabularyListCategoricalColumn(key='deck', vocabulary_list=('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'), dtype=tf.string, default_value=-1, num_oov_buckets=0)), IndicatorColumn(categorical_column=VocabularyListCategoricalColumn(key='embark_town', vocabulary_list=('Cherbourg', 'Southhampton', 'Queenstown'), dtype=tf.string, default_value=-1, num_oov_buckets=0)) ]
2. 数值型属性
连续数据需要标准化。
def process_continuous_data(mean, data):
# 标准化数据
data = tf.cast(data, tf.float32) * 1/(2*mean)
return tf.reshape(data, [-1, 1])
现在创建一个数值列的集合。tf.feature_columns.numeric_column
API 会使用 normalizer_fn
参数。在传参的时候使用 functools.partial
,functools.partial
由使用每个列的均值进行标准化的函数构成。
MEANS = {
'age' : 29.631308,
'n_siblings_spouses' : 0.545455,
'parch' : 0.379585,
'fare' : 34.385399
}
numerical_columns = []
for feature in MEANS.keys():
num_col = tf.feature_column.numeric_column(feature, normalizer_fn=functools.partial(process_continuous_data, MEANS[feature]))
numerical_columns.append(num_col)
print(numerical_columns)
[NumericColumn(key='age', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=functools.partial(
, 29.631308)), NumericColumn(key='fare', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=functools.partial(
, 34.385399)), NumericColumn(key='n_siblings_spouses', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=functools.partial(
, 0.545455)) NumericColumn(key='parch', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=functools.partial(
, 0.379585))]
这里使用标准化的方法需要提前知道每列的均值。如果需要计算连续的数据流的标准化的值可以使用 TensorFlow Transform。
将这两个特征列的集合相加,并且传给 tf.keras.layers.DenseFeatures
从而创建一个进行预处理的输入层。
preprocessing_layer = tf.keras.layers.DenseFeatures(categorical_columns+numerical_columns)
model = tf.keras.Sequential([
preprocessing_layer,
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid'),
])
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
train_data = train_dataset.shuffle(500)
test_data = test_dataset
model.fit(train_data, epochs=20)
当模型训练完成的时候,你可以在测试集 test_data
上检查准确性。
test_loss, test_accuracy = model.evaluate(test_data)
print('\n\nTest Loss {}, Test Accuracy {}'.format(test_loss, test_accuracy))
使用 tf.keras.Model.predict
推断一个批次或多个批次的标签。
predictions = model.predict(test_data)
# 显示部分结果
for prediction, survived in zip(predictions[:10], list(test_data)[0][1][:10]):
print("Predicted survival: {:.2%}".format(prediction[0]),
" | Actual outcome: ",
("SURVIVED" if bool(survived) else "DIED"))
Predicted survival: 55.93% | Actual outcome: SURVIVED
Predicted survival: 52.15% | Actual outcome: DIED
Predicted survival: 42.62% | Actual outcome: DIED
Predicted survival: 8.34% | Actual outcome: DIED
Predicted survival: 69.12% | Actual outcome: DIED
Predicted survival: 10.32% | Actual outcome: DIED
Predicted survival: 94.82% | Actual outcome: SURVIVED
Predicted survival: 2.17% | Actual outcome: DIED
Predicted survival: 17.52% | Actual outcome: DIED
Predicted survival: 45.75% | Actual outcome: SURVIVED