本文主要是采用蚁群算法ACO优化LSTM超参数
蚁群算法ACO优化算法,在此不对具体原理进行分析,针对代码实操.
代码介绍
class ACO:
def __init__(self, parameters):
"""
Ant Colony Optimization
parameter: a list type, like [NGEN, pop_size, var_num_min, var_num_max]
"""
# 初始化
self.NGEN = parameters[0] # 迭代的代数
self.pop_size = parameters[1] # 种群大小
self.var_num = len(parameters[2]) # 变量个数
self.bound = [] # 变量的约束范围
self.bound.append(parameters[2])
self.bound.append(parameters[3])
self.pop_x = np.zeros((self.pop_size, self.var_num)) # 所有蚂蚁的位置
self.g_best = np.zeros((1, self.var_num)) # 全局蚂蚁最优的位置
# 初始化第0代初始全局最优解
temp = -1
for i in range(self.pop_size):
for j in range(self.var_num):
self.pop_x[i][j] = np.random.uniform(self.bound[0][j], self.bound[1][j])
fit = self.fitness(self.pop_x[i])
if fit > temp:
self.g_best = self.pop_x[i]
temp = fit
def main(self):
popobj = []
best = np.zeros((1, self.var_num))[0]
for gen in range(1, self.NGEN + 1):
if gen == 1:
tmax, t = self.update_operator(gen, np.array(list(map(self.fitness, self.pop_x))),
np.max(np.array(list(map(self.fitness, self.pop_x)))))
else:
tmax, t = self.update_operator(gen, t, tmax)
popobj.append(self.fitness(self.g_best))
print('############ Generation {} ############'.format(str(gen)))
print(self.g_best)
print(self.fitness(self.g_best))
if self.fitness(self.g_best) > self.fitness(best):
best = self.g_best.copy()
print('最好的位置:{}'.format(best))
print('最大的函数值:{}'.format(self.fitness(best)))
print("---- End of (successful) Searching ----")
def build_model(neurons1, neurons2, dropout):
X_train, y_train, X_test, y_test = process_data()
# X_train, y_train = create_dataset(X_train, y_train, steps)
# X_test, y_test = create_dataset(X_test, y_test, steps)
nb_features = X_train.shape[2]
input1 = X_train.shape[1]
model1 = Sequential()
model1.add(LSTM(
input_shape=(input1, nb_features),
units=neurons1,
return_sequences=True))
model1.add(Dropout(dropout))
model1.add(LSTM(
units=neurons2,
return_sequences=False))
model1.add(Dropout(dropout))
model1.add(Dense(units=1))
model1.add(Activation("linear"))
model1.compile(loss='mse', optimizer='Adam', metrics='mae')
return model1, X_train, y_train, X_test, y_test
if __name__ == '__main__':
'''
神经网络第一层神经元个数
神经网络第二层神经元个数
dropout比率
batch_size
'''
UP = [51, 6, 0.055, 9]
DOWN = [50, 5, 0.05, 8]
NGEN = 100
popsize = 100
parameters = [NGEN, popsize, DOWN, UP]
# 开始优化
aco = ACO(parameters)
aco.main()
# 训练模型 使用ssa找到的最好的神经元个数
neurons1 = int(aco.g_best[0])
neurons2 = int(aco.g_best[1])
dropout = aco.g_best[2]
batch_size = int(aco.g_best[3])
# neurons1 = 64
# neurons2 = 64
# dropout = 0.01
# batch_size = 32
model, X_train, y_train, X_test, y_test = build_model(neurons1, neurons2, dropout)
history1 = model.fit(X_train, y_train, epochs=150, batch_size=batch_size, validation_split=0.2, verbose=1,
callbacks=[EarlyStopping(monitor='val_loss', patience=9, restore_best_weights=True)])
# 测试集预测
y_score = model.predict(X_test)
# 反归一化
y_score = scaler.inverse_transform(y_score.reshape(-1, 1))
y_test = scaler.inverse_transform(y_test.reshape(-1, 1))
print("==========evaluation==============\n")
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error #平方绝对误差
import math
MAE = mean_absolute_error(y_test, y_score)
print('MAE: %.4f ' % MAE)
RMSE = math.sqrt(mean_squared_error(y_test, y_score))
print('RMSE: %.4f ' % (RMSE))
蚁群算法优化,算是比较老点的算法,但其仍然具有一定的价值
备注:
需要源代码和数据集,或者想要沟通交流,请私聊,谢谢.