input_shape=(128,40)#第一位是时间序列timesteps,第二个40是特征数,之前搞反了训练很慢,调回来准确率就高了。
最好不要用reshape调,用np.transpose()转置调,reshape不懂会不会打乱数据的时序和特征
附上LSTM_model
def LSTM_model():
n_classes = 6
inp=Input(shape=(128,40))#第一个是timesteps,第二个是mfcc特征
#inps=Reshape((40,1))(inp)
lstm1=LSTM(
input_shape=(128,40),
output_dim=256,
activation='tanh',
return_sequences=False)(inp)
dl1=Dropout(0.3)(lstm1)
# lstm2=LSTM(
# 512,activation='tanh',
# return_sequences=False)(lstm1)
# dl2=Dropout(0.5)(lstm2)
# print("dl2=",dl1)
#g2=concatenate([g,dl1],axis=1)
# d10=Dense(1500,activation="relu")(dl2)
# d10=Dropout(0.5)(d10)
#model.add(Activation('relu'))
#l10=LeakyReLU(alpha=0.33)(d10)
d11=Dense(100,activation="relu")(dl1)
#model.add(Activation('relu'))
#l11=LeakyReLU(alpha=0.33)(d11)
l11=Dropout(0.3)(d11)
l12=Dense(n_classes, activation='softmax')(l11)
model=Model(inputs=inp,outputs=l12)
model.summary()
#编译model
adam = keras.optimizers.Adam(lr = 0.0005, beta_1=0.95, beta_2=0.999,epsilon=1e-08)
#adam = keras.optimizers.Adam(lr = 0.001, beta_1=0.95, beta_2=0.999,epsilon=1e-08)
#sgd = keras.optimizers.SGD(lr = 0.001, decay = 1e-06, momentum = 0.9, nesterov = False)
#reduce_lr = ReduceLROnPlateau(monitor = 'loss', factor = 0.1, patience = 2,verbose = 1, min_lr = 0.00000001, mode = 'min')
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
return model