python 深度学习实战_75个有关神经网络建模、强化学习与迁移学习的解决方案

python 深度学习实战_75个有关神经网络建模、强化学习与迁移学习的解决方案

  • 第一章程序
    • page 8
    • page 11
    • page12
    • page16

第一章程序

学习python 深度学习实战_75个有关神经网络建模、强化学习与迁移学习的解决方案中,在这里贴出书中的例程

page 8

// An highlighted block
import tensorflow as tf
import numpy as np
x_input = np.array([[1,2,3,4,5]]) # 提供了一个虚拟数据集
y_input = np.array([[10]])
x = tf.placeholder(tf.float32,[None,5])# 创建一个占位符
y = tf.placeholder(tf.float32,[None,1])
W = tf.Variable(tf.zeros([5,1]))# 使用一些变量对占位符进行操作
b = tf.Variable(tf.zeros([1]))
y_pred = tf.matmul(x,W)+b
loss = tf.reduce_sum(tf.pow((y - y_pred),2))  # 定义一个损失函数
train = tf.train.GradientDescentOptimizer(0.0001).minimize(loss) # 指定优化器和想要最小化的变量
init = tf.global_variables_initializer() # 初始化所有变量,创建一个名为init的变量
sess = tf.Session()  #创建一个回话,病运行10个周期训练数据
sess.run(init)
for i in range(10):
    feed_dict = {x:x_input, y:y_input}
    #sess.run(train, feed_dict = feed_dict)
    _, loss_value = sess.run([train,loss], feed_dict = feed_dict)
    print(loss_value)

输出结果:

// An highlighted block
100.0
97.77255
95.594696
93.46538
91.38347
89.34794
87.357765
85.41191
83.5094
81.64925

page 11

// An highlighted block
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
x_input = np.array([[1,2,3,4,5]]) # 提供一个虚拟数据集
y_input =  np.array([[10]]) 
model = Sequential() # 使用一个具有32个神经元的隐形层和一个神经元的输出层
model.add(Dense(units = 32,input_dim = x_input.shape[1]))
model.add(Dense(units = 1))
model .compile(loss = 'mse',optimizer = 'sgd', #对模型进行编译,配置不同的设置,如损失函数、优化器和测度标准
              metrics = ['accuracy'])
model.summary() #可以轻松输出显示的模型摘要
history = model.fit(x_input,y_input,epochs = 10,batch_size = 32) #直接训练模型,将结果保存
pred = model.predict(x_input,batch_size = 128)#预测为函数可以在训练后使用


输出结果

// An highlighted block

_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_7 (Dense)              (None, 32)                192       
_________________________________________________________________
dense_8 (Dense)              (None, 1)                 33        
=================================================================
Total params: 225
Trainable params: 225
Non-trainable params: 0
_________________________________________________________________
Epoch 1/10
1/1 [==============================] - 0s 137ms/step - loss: 128.1939 - acc: 0.0000e+00
Epoch 2/10
1/1 [==============================] - 0s 2ms/step - loss: 1510.0778 - acc: 0.0000e+00
Epoch 3/10
1/1 [==============================] - 0s 2ms/step - loss: 1042477.8750 - acc: 0.0000e+00
Epoch 4/10
1/1 [==============================] - 0s 2ms/step - loss: 559640412684288.0000 - acc: 0.0000e+00
Epoch 5/10
1/1 [==============================] - 0s 2ms/step - loss: inf - acc: 0.0000e+00
Epoch 6/10
1/1 [==============================] - 0s 2ms/step - loss: nan - acc: 0.0000e+00
Epoch 7/10
1/1 [==============================] - 0s 2ms/step - loss: nan - acc: 0.0000e+00
Epoch 8/10
1/1 [==============================] - 0s 1ms/step - loss: nan - acc: 0.0000e+00
Epoch 9/10
1/1 [==============================] - 0s 2ms/step - loss: nan - acc: 0.0000e+00
Epoch 10/10
1/1 [==============================] - 0s 1ms/step - loss: nan - acc: 0.0000e+00

page12

// An highlighted block
import torch
batch_size = 32 #设定随机训练数据的大小
input_shape = 5
output_shape = 10
torch.set_default_tensor_type('torch.cuda.FloatTensor') #为了启动GPU,讲使用如下张量  这将保证所有的计算将使用附加的GPU
from torch.autograd import Variable  # 用它来生成随机训练数据
x = Variable(torch.randn(batch_size,input_shape))
y = Variable(torch.randn(batch_size,output_shape),requires_grad = False)
#使用一个简单的神经网络,其中一个有32个神经元的隐藏层和一个神经元的输出层  #使用.cuda()扩展保证模型在GPU上运行
model = torch.nn.Sequential(torch.nn.Linear(input_shape,32),torch.nn.Linear(32,output_shape)).cuda() 
loss_function = torch.nn.MSELoss() # 定义MSE损失函数
learning_rate = 0.001# 训练模型,迭代10for i in range(10):
    y_pred = model(x)
    loss = loss_function(y_pred,y)
    print(loss.item())
    # 零梯度
    model.zero_grad()
    loss.backward()
    
    # 更新权值
    for param in model.parameters():
        param.data -= learning_rate * param.grad.data


输出结果:

// An highlighted block

1.1000885963439941
1.0996534824371338
1.0992190837860107
1.0987858772277832
1.0983537435531616
1.097922682762146
1.0974926948547363
1.0970635414123535
1.0966354608535767
1.0962083339691162

page16

// An highlighted block
import mxnet as mx
import numpy as np #创建一些分配给GPUCPU的简单虚拟数据
x_input = mx.nd.empty((1,5),mx.gpu())
x_input[:] = np.array([[1,2,3,4,5]],np.float32)
y_input = mx.nd.empty((1,5),mx.cpu())
y_input[:] = np.array([[10,15,20,22.5,25]],np.float32)
x_input   # 可以很容易地复制和调整数据
w_input = x_input
z_input = x_input.copyto(mx.cpu())
x_input += 1
w_input /= 2
z_input *= 2
print(x_input.asnumpy()) #输出显示
print(w_input.asnumpy())
print(z_input.asnumpy())
batch_size = 1 #创建一个迭代器
train_iter = mx.io.NDArrayIter(x_input,y_input,batch_size,
                               shuffle = True,data_name = 'input',label_name = 'target')
X = mx.sym.Variable('input') # 为模型创建符号
Y = mx.symbol.Variable('target')
fc1 = mx.sym.FullyConnected(data =X,name = 'fc1', num_hidden = 5)
lin_reg = mx.sym.LinearRegressionOutput(data = fc1, label = Y,name = "lin_reg")
model = mx.mod.Module(symbol = lin_reg, data_names = ['input'],label_names = ['target']) # 开始训练之前需要定义模型
model.fit(train_iter,optimizer_params={'learning_rate':0.01,'momentum':0.9},num_epoch = 100,
          batch_end_callback = mx.callback.Speedometer(batch_size,2))
model.predict(train_iter).asnumpy()



输出结果:

// An highlighted block
[[1.  1.5 2.  2.5 3. ]]
[[1.  1.5 2.  2.5 3. ]]
[[ 2.  4.  6.  8. 10.]]

array([[10.046575, 15.069991, 20.093437, 22.605223, 25.116766]],
      dtype=float32)


你可能感兴趣的:(python,python,深度学习实战,75个有关神经网络建模,强化学习与迁移学习的解决方案)