BP神经网络
import numpy as np
def loaddataset(filename):
fp = open(filename)
#存放数据
dataset = []
#存放标签
labelset = []
for i in fp.readlines():
a = i.strip().split()
#每个数据行的最后一个是标签数据
dataset.append([float(j) for j in a[:len(a)-1]])
labelset.append(int(float(a[-1])))
return dataset, labelset
#x为输入层神经元个数,y为隐层神经元个数,z输出层神经元个数
def parameter_initialization(x, y, z):
#隐层阈值
value1 = np.random.randint(-5, 5, (1, y)).astype(np.float64)
#输出层阈值
value2 = np.random.randint(-5, 5, (1, z)).astype(np.float64)
#输入层与隐层的连接权重
weight1 = np.random.randint(-5, 5, (x, y)).astype(np.float64)
#隐层与输出层的连接权重
weight2 = np.random.randint(-5, 5, (y, z)).astype(np.float64)
return weight1, weight2, value1, value2
#返回sigmoid函数值
def sigmoid(z):
#********** Begin **********#
return 1 / (1 + np.exp(-z))
#********** End **********#
# '''
# weight1:输入层与隐层的连接权重
# weight2:隐层与输出层的连接权重
# value1:隐层阈值
# value2:输出层阈值
# '''
def trainning(dataset, labelset, weight1, weight2, value1, value2):
#dataset:数据集 labelset:标签数据
#x为步长
x = 0.01
for i in range(len(dataset)):
#输入数据
inputset = np.mat(dataset[i]).astype(np.float64)
#数据标签
outputset = np.mat(labelset[i]).astype(np.float64)
#隐层输入
input1 = np.dot(inputset, weight1).astype(np.float64)
#隐层输出
#********** Begin **********#
output2 = sigmoid(input1 - value1).astype(np.float64)
#********** End **********#
#输出层输入
input2 = np.dot(output2, weight2).astype(np.float64)
#输出层输出
output3 = sigmoid(input2 - value2).astype(np.float64)
#更新公式由矩阵运算表示
a = np.multiply(output3, 1 - output3)
g = np.multiply(a, outputset - output3)
b = np.dot(g, np.transpose(weight2))
c = np.multiply(output2, 1 - output2)
e = np.multiply(b, c)
value1_change = -x * e
value2_change = -x * g
weight1_change = x * np.dot(np.transpose(inputset), e)
weight2_change = x * np.dot(np.transpose(output2), g)
#更新连接权重、阈值参数value1、value2、weight1、weight2
#********** Begin **********#
value1 += value1_change
value2 += value2_change
weight1 += weight1_change
weight2 += weight2_change
#********** End **********#
return weight1, weight2, value1, value2
def testing(dataset, labelset, weight1, weight2, value1, value2):
#记录预测正确的个数
rightcount = 0
for i in range(len(dataset)):
#计算每一个样例通过该神经网路后的预测值
inputset = np.mat(dataset[i]).astype(np.float64)
outputset = np.mat(labelset[i]).astype(np.float64)
output2 = sigmoid(np.dot(inputset, weight1) - value1)
output3 = sigmoid(np.dot(output2, weight2) - value2)
#确定其预测标签:输出大于 0.5 置 flag 为 1,否则置 flag 为 0
#********** Begin **********#
if output3 > 0.5:
flag = 1
else:
flag = 0
#********** End **********#
if labelset[i] == flag:
rightcount += 1
#返回正确率
return rightcount / len(dataset)
import numpy as np
import random
from random import randint
from matplotlib import pyplot as plt
#根据Hebb学习规则计算神经元之间的连接权值
def calcWeight(savedsample):
N = len(savedsample[0])
P = len(savedsample)
mat = [0]*N
returnMat = []
for i in range(N):
m = mat[:]
returnMat.append(m)
for i in range(N):
for j in range(N):
if i==j:
continue
sum = 0
for u in range(P):
sum += savedsample[u][i] * savedsample[u][j]
returnMat[i][j] = sum/float(N)
return returnMat
#根据神经元的输入计算神经元的输出(静态突触)
def calcXi(inMat , weighMat):
#假设计算第t次循环后神经元的输出时,输入的参数inMat表示第t-1次循环后神经元的输出。即用上一次循环的输出做本次循环的输入。
#weighMat权值矩阵
returnMat = inMat
choose = []
for i in range(len(inMat)//5):
#随机改变N/5个神经元的值,该参数可调,也可同时改变所有神经元的值
choose.append(random.randint(0,len(inMat)-1))
for i in choose:
sum = 0
#网络动态演变
#********** Begin **********#
for j in range(len(inMat)):
sum += weighMat[i][j] * inMat[j]
if sum>=0:
returnMat[i] = 1
else: returnMat[i] = -1
#********** End **********#
return returnMat
#加噪函数,在记忆样本的基础上增加30%的噪声:
def addnoise(mytest_data,n):
#mytest_data:网络矩阵 n:节点数
#********** Begin **********#
for x in range(n):
for y in range(n):
if random.randint(0, 10) > 7:
mytest_data[x * n + y] = -mytest_data[x * n + y]
#********** End **********#
return mytest_data
#标准输出函数:data 为联想记忆后的矩阵,N 为方阵行数:
def regularout(data,N):
for j in range(N):
ch = ""
for i in range(N):
#矩阵元素值为 1 则输出“ X ”,否则输出“ ”
#********** Begin **********#
ch += " " if data[j*N+i] == -1 else "X"
#********** Begin **********#
print(ch)
import numpy as np
def my_conv(inputmatrix,kernel):
output_size = (len(inputmatrix)-len(kernel)+1)
res = np.zeros([output_size,output_size],np.float32)
for i in range(len(res)):
for j in range(len(res)):
res[i][j] = compute_conv(inputmatrix,kernel,i,j)
return res
#两个矩阵的卷积算法
def compute_conv(inputmatrix,kernel,i,j):
#inputmatrix为输入矩阵,kernel为卷积核,i,j分别是每次卷积运算的首元素地址
res = 0
#********** Begin **********#
for kk in range(3):
for k in range(3):
res +=inputmatrix[i+kk][j+k]*kernel[kk][k] #这句是关键代码,实现了两个矩阵的点乘操作
#********** End **********#
return res