版本号 keras-1.0.7
from keras.optimizers import SGD
import numpy as np
from keras.layers import Input, merge
from keras.models import Model
from keras.layers.core import Flatten, Dense, Dropout
def get_top_model():
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
input_lhs = Input(shape=(1,))
input_rhs = Input(shape=(1,))
# we can then concatenate the two vectors:
merged_vector = merge([input_lhs, input_rhs], mode='concat', concat_axis=-1)
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(64, activation='sigmoid')(merged_vector)
x = Dense(64, activation='sigmoid')(x)
predictions = Dense(1, activation='sigmoid')(x)
# this creates a model that includes
topModel = Model(input=[input_lhs, input_rhs], output=predictions)
topModel.compile(optimizer=sgd,
loss='mean_squared_error',
metrics=['accuracy'])
return topModel
if __name__ == "__main__":
label_data = np.array(([0, 0, 1, 1]));
topModel = get_top_model()
topModel.fit([np.array([1,0,1,0]), np.array([1,0,0,1])], label_data, nb_epoch=10000, batch_size=4)
print topModel.predict([np.array([1,0,1,0]), np.array([1,0,0,1])])
结果
[[ 0.01064401]
[ 0.00852218]
[ 0.99042308]
[ 0.99032754]]
```
实现2
```python
import numpy as np
from keras.models import Sequential
from keras.layers.core import Activation, Dense
from keras.optimizers import SGD
X = np.zeros((4, 2), dtype='uint8')
y = np.zeros(4, dtype='uint8')
X[0] = [0, 0]
y[0] = 0
X[1] = [0, 1]
y[1] = 1
X[2] = [1, 0]
y[2] = 1
X[3] = [1, 1]
y[3] = 0
model = Sequential()
model.add(Dense(2, input_dim=2))
model.add(Activation('sigmoid'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
history = model.fit(X, y, nb_epoch=10000, batch_size=4, show_accuracy=True, verbose=2)
print model.predict(X)
结果
[[ 0.01088379]
[ 0.0088246 ]
[ 0.99008536]
[ 0.99009061]]