Traceback (most recent call last):
File "E:/python/pycharm/course2_week2/main.py", line 278, in <module>
parameters = model(train_X, train_Y, layers_dims, beta=0.9,optimizer="momentum",is_plot=True)
File "E:/python/pycharm/course2_week2/main.py", line 240, in model
A3,cache = opt_utils.forward_propagation(minibatch_X,parameters)
File "E:\python\pycharm\course2_week2\opt_utils.py", line 100, in forward_propagation
W1 = parameters["W1"]
TypeError: tuple indices must be integers or slices, not str
以上报错是我在学习吴恩达深度学习时,自己打的代码出现的报错,解决方案就是将
def model(X,Y,layers_dims,optimizer,learning_rate = 0.0007,mini_batch_size = 64,beta = 0.9,beta1 = 0.9,beta2 = 0.999,epsilon = 1e-8,num_epoches = 10000,print_cost = True,is_plot = True):
L = len(layers_dims)
costs = []
t = 0 #每学完一个mini_batch 就增加1
seed = 10 # 随机种子
parameters = opt_utils.initialize_parameters(layers_dims)
if optimizer == 'gd':
pass # 不使用任何优化器,直接使用梯度下降法
elif optimizer == 'momentum':
v = initialize_velocity(parameters) #使用动量
elif optimizer == 'adam' :
v,s = initialize_adam(parameters)#使用adam
else :
print("optimizer参数错误,程序退出")
exit(1)
#开始学习
for i in range(num_epoches):
seed = seed + 1 #定义随机minibatches,在每次遍历数据集之后增加种子以重新排列数据集,使每次数据的顺序都不同
minibatches = random_mini_batches(X,Y,mini_batch_size,seed)
for minibatch in minibatches:
(minibatch_X,minibatch_Y) = minibatch
A3,cache = opt_utils.forward_propagation(minibatch_X,parameters)
cost = opt_utils.compute_cost(A3,minibatch_Y)
grads = opt_utils.backward_propagation(minibatch_X,minibatch_Y,cache)
if optimizer == 'gd':
parameters = update_parameters_with_gd(parameters,grads,learning_rate)
elif optimizer == 'momentum':
parameters,v = update_parameters_with_momentun(parameters,grads,v,beta,learning_rate)
elif optimizer == 'adam' :
t += 1
parameters,v,s = update_parameters_with_adam(parameters,grads,v,s,t,learning_rate,beta1,beta2,epsilon)
if i % 100 == 0:
costs.append(cost)
if print_cost and i %1000 == 0:
print("第" + str(i) + "次遍历整个数据集,当前误差值:" + str(cost))
if is_plot:
plt.plot(costs)
plt.ylabel("cost")
plt.xlabel("epochs(per 100)")
plt.title("learning rate = " + str(learning_rate))
plt.show()
return parameters
中更新参数时候的选择momentum和adam时
parameters = update_parameters_with_momentun(parameters,grads,v,beta,learning_rate)
parameters = update_parameters_with_adam(parameters,grads,v,s,t,learning_rate,beta1,beta2,epsilon)
修改为一下:(再引用函数的时候,参数写少了)
parameters,v = update_parameters_with_momentun(parameters,grads,v,beta,learning_rate)
parameters,v,s = update_parameters_with_adam(parameters,grads,v,s,t,learning_rate,beta1,beta2,epsilon)