实例2-手写简单神经网络

 

'''
Created on 2019年x月x日

@author: xxx
'''
import numpy as np  
  
def nonlin(x,deriv=False):  
    if(deriv==True):  
        return x*(1-x)   #反向传播需求导
  
    return 1/(1+np.exp(-x))  
      
X = np.array([[0,0,1],  
            [0,1,1],  
            [1,0,1],  
            [1,1,1]]) 
print (X.shape )
                  
y = np.array([[0],  
            [1],  
            [1],  
            [0]])  
print (y.shape)
np.random.seed(1)  
  
# randomly initialize our weights with mean 0  
w0 = 2*np.random.random((3,4)) - 1   #在+1和-1之间
w1 = 2*np.random.random((4,1)) - 1
print (w0)
print (w1)
print (w0.shape)
print (w1.shape)
  
for j in range(60000):  #python3中不是xrange
  
     
    l0 = X  
    l1 = nonlin(np.dot(l0,w0))  #X->乘w0->nonlin,默认前向传播
    l2 = nonlin(np.dot(l1,w1))  
  
      
    l2_error = y - l2  
      
    if (j% 10000) == 0:  #每一万次打印一次
        print ("Error:" + str(np.mean(np.abs(l2_error))))  
          
    #开始反向传播了
    l2_delta = l2_error*nonlin(l2,deriv=True)  #对应乘而非矩阵乘
  
     
    l1_error = l2_delta.dot(w1.T)  
      
      
    l1_delta = l1_error * nonlin(l1,deriv=True)  
    #更新w
    w1 += l1.T.dot(l2_delta)  #“+=”的原因是前面是“y - l2 ”
    w0 += l0.T.dot(l1_delta)  
    

结果:会发现Error越来越小,

(4, 3)
(4, 1)
[[-0.16595599  0.44064899 -0.99977125 -0.39533485]
 [-0.70648822 -0.81532281 -0.62747958 -0.30887855]
 [-0.20646505  0.07763347 -0.16161097  0.370439  ]]
[[-0.5910955 ]
 [ 0.75623487]
 [-0.94522481]
 [ 0.34093502]]
(3, 4)
(4, 1)
Error:0.496410031903
Error:0.00858452565325
Error:0.00578945986251
Error:0.00462917677677
Error:0.00395876528027
Error:0.00351012256786

 

你可能感兴趣的:(深度学习)