深度学习-全连接层python底层实现

DNN算法反向传播机制可以看这篇,这里不再说明

开门见山,直接贴代码 

"""
全连接的底层python实现,隐藏层层数以及每层的神经元数量自己定义。
 #author:AloneBird
 #time:2019.08.13
"""
import numpy as np

class Full_Dense():

    def __init__(self,input_data,label,mutil_classes,layer_size):
        """
        我们初始化必须的量
        :param size: 输入量的shape
        :param layer_num: 全连接的层数
        :param cell_num: 每层的神经元数量
        """
        self.input_data = input_data
        self.label = np.array(label)
        layer_size.insert(0, len(self.input_data[0]))
        self.layer_size = layer_size
        if mutil_classes == False:
            self.layer_size.append(2)
        else:
            self.layer_size.append(len(self.label[0]))
        print(self.layer_size)
        self.weights = [np.random.randn(j,i) for i,j in zip(layer_size[:-1],layer_size[1:])]
        self.bias = [np.random.randn(j,1) for j in layer_size[1:]]

    def sigmoid(self,cell_data):
        """
        sigmoid激活函数
        :param cell_data: 未激活前数值
        :return: 激活后数值
        """
        return 1.0/(1.0+np.exp(-cell_data))

    def sigmoid_prime(self,cell_data):
        """
        sigmoid函数的导数,在求得中间误差时使用
        :param cell_data: 未激活前数值
        :return: 对激活函数的求导
        """
        return self.sigmoid(cell_data)*(1-self.sigmoid(cell_data))

    def Forward(self,input_data):
        """
        前向传播算法
        :param input_data: 输入数值
        :return: 在当前权重下的输出
        """
        N_activedata = np.array(input_data).reshape(len(input_data),1)
        N_activegraph = []
        N_activegraph.append(N_activedata)
        for i in range(len(self.weights)):
            N_activedata = self.sigmoid(N_activedata)  # 激活一下
            N_activedata = np.dot(self.weights[i],N_activedata)+self.bias[i]
            N_activegraph.append(N_activedata)         #我们构建了一个存储了未激活数值的图
        return N_activegraph

    def Back(self,input_data,label,batch_size,learn_rate):
        for num, train_data in enumerate(input_data):
            self.N_activegraph = self.Forward(train_data)
            Finallloss = self.sigmoid(self.N_activegraph[-1]) - np.array(label[num]).reshape(len(label[num]),1)
            self.delta_loss = [np.zeros((i,1)) for i in self.layer_size[1:]] #delta误差的初始值 - 必须为0
            for i in range(len(self.N_activegraph)-1):
                layer_index = -(i + 1)  # 第几层,从最后一层开始,到图的第一层。(而不是第0层)
                if i == 0:
                    self.delta_loss[layer_index] = self.delta_loss[layer_index]+Finallloss * self.sigmoid_prime(self.N_activegraph[layer_index])   #我们要求得最后一层的均方差和中间误差。
                else:
                    self.delta_loss[layer_index] = self.delta_loss[layer_index]+np.dot(self.weights[layer_index+1].T,self.delta_loss[layer_index+1])*self.sigmoid(self.N_activegraph[layer_index])
        for index in range(len(self.weights)):
            self.weights[index] = self.weights[index] - learn_rate/batch_size*np.dot(self.delta_loss[index],self.sigmoid(self.N_activegraph[index].T)) #graph[layer-1]是其上一层的未激活的值
            self.bias[index] = self.bias[index] - learn_rate/batch_size*self.delta_loss[index]
    def fit(self,test_data,test_label,learn_rate,epochs,batch_size):

        for epoch in range(epochs):
            for k in range(0,len(self.input_data),batch_size):
                train_datas = self.input_data[k:k+batch_size]
                train_labels = self.label[k:k+batch_size]
                self.Back(train_datas,train_labels,batch_size,learn_rate)
                print("Epoch{0}: accuracy is {1}/{2}".format(epoch + 1, self.evaluate(test_data,test_label), len(test_data)))

    def evaluate(self,test_data,test_label):
        """
        验证函数
        :param test_data: 测试数据
        :param test_label: 测试标签
        :return: 正确的数量
        """
        result = 0
        for data,label in zip(test_data, test_label):
            predict_label = self.sigmoid(self.Forward(data)[-1])
            if np.argmax(predict_label) == np.argmax(label):
                result += 1
        return result

 

 

你可能感兴趣的:(python,深度学习,全连接)