如下程序为经典BP算法,BP算法的主要工作为:
此程序主要对手写数据集mnist进行训练,欢迎大家一起讨论!(点击数据集下载)
import numpy
#sigmoid function:scipy.special.expit
import scipy.special
import matplotlib.pyplot
%matplotlib inline
class neuralNetwork:
#initialise the neural network
def __init__(self,inputnodes,hiddennodes,outputnodes,learningrate):
#set nodes and learningrate
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
self.lr = learningrate
#set weight,incloud weight_input_hidden,weight_hidden_output (random)
self.wih = numpy.random.normal(0.0,pow(self.hnodes,-0.5),(self.hnodes,self.inodes))
self.who = numpy.random.normal(0.0,pow(self.onodes,-0.5),(self.onodes,self.hnodes))
#sigmoid function
self.activation_function = lambda x: scipy.special.expit(x)
pass
def train(self,inputs_list,targets_list):
#convert inputs list to 2d array
inputs = numpy.array(inputs_list,ndmin=2).T
targets = numpy.array(targets_list,ndmin=2).T
#calculate signals into hidden layer
hidden_inputs = numpy.dot(self.wih,inputs)
hidden_outputs = self.activation_function(hidden_inputs)
#calculate signals into output layer
final_inputs = numpy.dot(self.who,hidden_outputs)
final_outputs = self.activation_function(final_inputs)
output_errors = targets - final_outputs
#hidden_errors = who.T * output_errors
hidden_errors = numpy.dot(self.who.T,output_errors)
#update the weights for the links between the hidden and output layers
self.who += self.lr*numpy.dot((output_errors*final_outputs*(1.0-final_outputs)),numpy.transpose(hidden_outputs))
#updata the weights for the links between the input and hidden layers
self.wih += self.lr*numpy.dot((hidden_errors*hidden_outputs*(1.0-hidden_outputs)),numpy.transpose(inputs))
pass
def query(self,inputs_list):
inputs = numpy.array(inputs_list,ndmin=2).T
hidden_inputs = numpy.dot(self.wih,inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = numpy.dot(self.who,hidden_outputs)
final_outputs = self.activation_function(final_inputs)
return final_outputs
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
learning_rate = 0.1
#create instance of neural network
n = neuralNetwork(input_nodes,hidden_nodes,output_nodes,learning_rate)
#load train data
training_data_file = open('mnist_dataset/mnist_train_100.csv','r')
training_data_list = training_data_file.readlines()
training_data_file.close()
#train the neural network
#train times
epochs = 5
for e in range(epochs):
for record in training_data_list:
all_values = record.split(',')
inputs = (numpy.asfarray(all_values[1:])/255.0*0.99)+0.01
targets = numpy.zeros(output_nodes) +0.01
targets[int(all_values[0])] = 0.99
n.train(inputs,targets)
pass
pass
#load test data
test_data_file = open('mnist_dataset/mnist_test_10.csv','r')
test_data_list = test_data_file.readlines()
test_data_file.close()
scorecard = []
#test the neural network
for record in test_data_list:
all_values = record.split(',')
correct_label = int(all_values[0])
inputs = (numpy.asfarray(all_values[1:])/255.0*0.99)+0.01
outputs = n.query(inputs)
label = numpy.argmax(outputs)
print('correct_label = ',correct_label,'predict_label = ',label)
if(label == correct_label):
scorecard.append(1)
else:
scorecard.append(0)
pass
pass
scorecard_array = numpy.asarray(scorecard)
print("rate = ",scorecard_array.sum()/scorecard_array.size)
如需转载请注明出处!