说明:此代码并非个人原创,是学习其他深度学习视频教程后总结所得。 总体思路: 1. 用手写数字识别作实例进行分析。 2. 具体的思路我不是非常清楚,就是用一个深度神经网络,选定n多参数,然后就可以在一定程度上模拟任何有规律的方程或者其他现象。 3. 求解的过程就是不断让真实值贴近预测的值,这时如果差别较大,可以改变参数的权重,还有偏向值。 4. 也就是cost(w, b) = |y - output|^2,此时的w, b赋值比较随机,所以就相当于在抛物线的两侧,若想下降到O点,也就是cost函数值最小,每次可以把x(也就是w, b)的值减去一个值(斜率的倍数,在左为负,在右为正),使值逼近O点,直接上图 cost函数 抛物线 更新值 5. 此方法美其名曰梯度下降算法 结合代码分析: 文章末尾有完整代码 1.初始化权重和偏向,使用numpy.random.randn(m, n),具体意思我想你们应该懂吧 self.weights = [np.random.randn(m, n) for m, n in zip(sizes[1:], sizes[:-1])] self.biases = [np.random.randn(k, 1) for k in sizes[1:]] 2.每一轮epochs后打乱重排 random.shuffle(training_data) 3.更新值 self.update_mini_batch(mini_batch, eta) 4. backpropagation计算偏导值,也就是下图中减号后的偏导部分,不包括前面的伊塔参数,具体自己看吧 delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(zs[-1]) nabla_w[-l] = np.dot(delta, activations[-l-1].transpose()) 5. 更新w和b值,文章中的代码 self.weights = [w - (eta/len(mini_batch))*nw for w, nw in zip(self.weights, nabla_w)] self.biases = [b - (eta/len(mini_batch))*nb for b, nb in zip(self.biases, nabla_b)] 6.其他部分比较简单,可以看英文注释,就是求偏导和sigmoid函数等,你们自己看吧。 # author: dragon # date: 2018-7-21 # -*- coding: utf-8 -*- import random import numpy as np class Network(object): def __init__(self, sizes): """ initialization parameter--weights and biases :param sizes: """ self.num_layers = len(sizes) self.sizes = sizes self.weights = [np.random.randn(m, n) for m, n in zip(sizes[1:], sizes[:-1])] self.biases = [np.random.randn(k, 1) for k in sizes[1:]] def feedforward(self, a): """ function value :param a: the input :return: sigmoid number """ for w, b in zip(self.weights, self.biases): a = sigmoid(np.dot(w, a) + b) return a def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None): """ realize stochastic gradient descent :param train_data: :param epochs: the number of cycling :param mini_batch_size: each cycling sizes :param eta: learning rate :param test_data: :return:None """ if test_data: n_test = len(test_data) n = len(training_data) for j in xrange(epochs): random.shuffle(training_data) mini_batches = [training_data[k: k+mini_batch_size] for k in xrange(0, n, mini_batch_size)] for mini_batch in mini_batches: self.update_mini_batch(mini_batch, eta) if test_data: #evaluate(test_data) is using to evaluate the accuracy print "Epoch {0}:{1} / {2}".format(j, self.evaluate(test_data), n_test) else: print "Epoch {0} complete".format(j) def update_mini_batch(self, mini_batch, eta): """ uodate the weights and biases :param mini_batch: a lisr of tuple (x, y) :param eta: learning rate :return: None """ nabla_w = [np.zeros(w.shape) for w in self.weights] nabla_b = [np.zeros(b.shape) for b in self.biases] for x, y in mini_batch: #backpropagation funtion is for the sum of derivate delta_nabla_b, delta_nabla_w = self.backprop(x, y) nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)] nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)] self.weights = [w - (eta/len(mini_batch))*nw for w, nw in zip(self.weights, nabla_w)] self.biases = [b - (eta/len(mini_batch))*nb for b, nb in zip(self.biases, nabla_b)] def backprop(self, x, y): """return a tuple (nabla_b, nabla_w)""" nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] # feedforward activation = x activations = [x] # list to store all the activations, layer by layer zs = [] # list to store all the z vectors, layer by layer for b, w in zip(self.biases, self.weights): z = np.dot(w, activation) + b zs.append(z) activation = sigmoid(z) activations.append(activation) # backward pass delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(zs[-1]) nabla_b[-1] = delta nabla_w[-1] = np.dot(delta, activations[-2].transpose()) for l in xrange(2, self.num_layers): z = zs[-l] sp = sigmoid_prime(z) delta = np.dot(self.weights[-l+1].transpose(), delta) * sp nabla_b[-l] = delta nabla_w[-l] = np.dot(delta, activations[-l-1].transpose()) return (nabla_b, nabla_w) def evaluate(self,test_data): """ network's output is assumed to be the index of whichever neuron in the final layer has the highest activation :param test_data: :return: the number of correct """ test_result = [(np.argmax(self.feedforward(x)), y) for (x, y) in test_data] return sum(int(x == y) for (x, y) in test_result) def cost_derivative(self, output_activation, y): """ return the derivative """ return (output_activation - y) def sigmoid(z): """ the sigmoid function. """ return 1.0/(1.0+np.exp(-z)) def sigmoid_prime(z): return sigmoid(z)*(1-sigmoid(z))