在进行多输入与多输出的时候,一定要注意shape的类型要对应
下面给出一个多输入和多输出的示例:
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
#为了能在notebook中显示图像
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras
from sklearn.datasets import fetch_california_housing #从sklearn中引用加州的房价数据
housing = fetch_california_housing()
print(housing.DESCR)
print(housing.data.shape)
print(housing.target.shape)
#引用train_test_split对数据集进行拆分
# test_size 控制切分比例,默认切分比例3:1
from sklearn.model_selection import train_test_split
#拆分数据集,加载数据集后返回训练集以及测试集
x_train_all, x_test, y_train_all, y_test = train_test_split(housing.data, housing.target, random_state = 1)
#将训练集进行一次拆分为验证集和测试集
x_train, x_valid, y_train, y_valid = train_test_split(x_train_all, y_train_all, random_state=2)
print(x_train.shape, y_train.shape)
print(x_valid.shape, y_valid.shape)
print(x_test.shape, y_test.shape)
(11610, 8) (11610,)
(3870, 8) (3870,)
(5160, 8) (5160,)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
#对数据进行归一化处理
#由于transform处理处理数据时二维数组,所以要将数据转化一下
#x_train: [none, 28, 28] -> [none, 784]
#对于使用fit_transform 和transform 请参考我的TensorFlow中的博客
x_train_scaled = scaler.fit_transform(x_train)
x_valid_scaled = scaler.transform(x_valid)
x_test_scaled = scaler.transform(x_test)
#注意在归一化数据后,之后使用的数据要使用新的归一化数据
#这里由于是多输出,所以我们将归一化的数据在分为两个输出
x_train_scaled_wide = x_train_scaled[:, :5]
x_train_scaled_deep = x_train_scaled[:, 2:]
x_valid_scaled_wide = x_valid_scaled[:, :5]
x_valid_scaled_deep = x_valid_scaled[:, 2:]
x_test_scaled_wide = x_test_scaled[:, :5]
x_test_scaled_deep = x_test_scaled[:, 2:]
#函数式API实现wide&deep模型
#输入
input_wide = keras.layers.Input(shape = [5])
input_deep = keras.layers.Input(shape = [6])
#deep层构建
hidden1 = keras.layers.Dense(30, activation='relu')(input_deep)
hidden2 = keras.layers.Dense(30, activation= 'relu')(hidden1)
#拼接wide&deep结果
concat = keras.layers.concatenate([input_wide, hidden2])
#输出结果
output = keras.layers.Dense(1)(concat)
output2 = keras.layers.Dense(1)(hidden2)
#固化模型(Model)
model = keras.models.Model(inputs = [input_wide, input_deep],
outputs = [output, output2])
model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_2 (InputLayer) [(None, 6)] 0
__________________________________________________________________________________________________
dense (Dense) (None, 30) 210 input_2[0][0]
__________________________________________________________________________________________________
input_1 (InputLayer) [(None, 5)] 0
__________________________________________________________________________________________________
dense_1 (Dense) (None, 30) 930 dense[0][0]
__________________________________________________________________________________________________
concatenate (Concatenate) (None, 35) 0 input_1[0][0]
dense_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 1) 36 concatenate[0][0]
__________________________________________________________________________________________________
dense_3 (Dense) (None, 1) 31 dense_1[0][0]
==================================================================================================
Total params: 1,207
Trainable params: 1,207
Non-trainable params: 0
__________________________________________________________________________________________________
#编译compile
model.compile(loss = "mean_squared_error", #损失函数:使用均方根误差
optimizer = "adam", #优化函数
)
#使用回调函数
callbacks = [
keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3),
]
#训练模型会,返回一个结果保存在history中
#注意shape要对应
history = model.fit([x_train_scaled_wide, x_train_scaled_deep],
(y_train,y_train),
epochs =10,
validation_data = ([x_valid_scaled_wide, x_valid_scaled_deep], [y_valid, y_valid]),
callbacks=callbacks) #使用回调函数
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0.2,2)
plt.show
plot_learning_curves(history)
model.evaluate([x_test_scaled_wide, x_test_scaled_deep] ,[y_test, y_test])