搭建BP神经网络

1.数据集下载
2.C语言代码
BP.h

#ifndef BP_H_INCLUDED
#define BP_H_INCLUDED
const int INPUT_LAYER = 784; //输入层维度
const int HIDDEN_LAYER = 40; //隐含层维度
const int OUTPUT_LAYER = 10; //输出层维度
const double LEARN_RATE = 0.3; //学习率
const int TRAIN_TIMES = 10; //迭代训练次数
class BP
{
private:
    int input_array[INPUT_LAYER]; //输入向量
    int aim_array[OUTPUT_LAYER]; //目标结果
    double weight1_array[INPUT_LAYER][HIDDEN_LAYER]; //输入层与隐含层之间的权重
    double weight2_array[HIDDEN_LAYER][OUTPUT_LAYER]; //隐含层与输出层之间的权重
    double output1_array[HIDDEN_LAYER]; //隐含层输出
    double output2_array[OUTPUT_LAYER]; //输出层输出
    double deviation1_array[HIDDEN_LAYER]; //隐含层误差
    double deviation2_array[OUTPUT_LAYER]; //输出层误差
    double threshold1_array[HIDDEN_LAYER]; //隐含层阈值
    double threshold2_array[OUTPUT_LAYER]; //输出层阈值
public:
    void Init(); //初始化各参数
    double Sigmoid(double x); //sigmoid激活函数
    void GetOutput1(); //得到隐含层输出
    void GetOutput2(); //得到输出层输出
    void GetDeviation1(); //得到隐含层误差
    void GetDeviation2(); //得到输出层误差
    void Feedback1(); //反馈输入层与隐含层之间的权重
    void Feedback2(); //反馈隐含层与输出层之间的权重
    void Train(); //训练
    void Test(); //测试
};
#endif // BP_H_INCLUDED

BP.cpp

#include 
#include
#include 
#include 
#include "BP.h"
using namespace std;
//初始化各参数
void BP::Init()
{
	srand(time(NULL));
	for (int i = 0; i < INPUT_LAYER; i++)
		for (int j = 0; j < HIDDEN_LAYER; j++)
			weight1_array[i][j] = rand() / (double)(RAND_MAX) * 2 - 1;
	for (int i = 0; i < HIDDEN_LAYER; i++)
		for (int j = 0; j < OUTPUT_LAYER; j++)
			weight2_array[i][j] = rand() / (double)(RAND_MAX) * 2 - 1;
	for (int i = 0; i < HIDDEN_LAYER; i++)
		threshold1_array[i] = rand() / (double)(RAND_MAX) * 2 - 1;
	for (int i = 0; i < OUTPUT_LAYER; i++)
		threshold2_array[i] = rand() / (double)(RAND_MAX) * 2 - 1;
}
//sigmoid激活函数
double BP::Sigmoid(double x)
{
	return 1.0 / (1.0 + exp(-x));
}
//得到隐含层输出
void BP::GetOutput1()
{
	for (int j = 0; j < HIDDEN_LAYER; j++)
	{
		double total = threshold1_array[j];
		for (int i = 0; i < INPUT_LAYER; i++)
			total += input_array[i] * weight1_array[i][j];
		output1_array[j] = Sigmoid(total);
	}
}
//得到输出层输出
void BP::GetOutput2()
{
	for (int j = 0; j < OUTPUT_LAYER; j++)
	{
		double total = threshold2_array[j];
		for (int i = 0; i < HIDDEN_LAYER; i++)
			total += output1_array[i] * weight2_array[i][j];
		output2_array[j] = Sigmoid(total);
	}
}
//得到隐含层误差
void BP::GetDeviation1()
{
	for (int i = 0; i < HIDDEN_LAYER; i++)
	{
		double total = 0;
		for (int j = 0; j < OUTPUT_LAYER; j++)
			total += weight2_array[i][j] * deviation2_array[j];
		deviation1_array[i] = (output1_array[i]) * (1.0 - output1_array[i]) * total;
	}
}
//得到输出层误差
void BP::GetDeviation2()
{
	for (int i = 0; i < OUTPUT_LAYER; i++)
		deviation2_array[i] = (output2_array[i]) * (1.0 - output2_array[i]) * (output2_array[i] - aim_array[i]);
}
//反馈输入层与隐含层之间的权重
void BP::Feedback1()
{
	for (int j = 0; j < HIDDEN_LAYER; j++)
	{
		threshold1_array[j] -= LEARN_RATE * deviation1_array[j];
		for (int i = 0; i < INPUT_LAYER; i++)
			weight1_array[i][j] = weight1_array[i][j] - LEARN_RATE * input_array[i] * deviation1_array[j];
	}
}
//反馈隐含层与输出层之间的权重
void BP::Feedback2()
{
	for (int j = 0; j < OUTPUT_LAYER; j++)
	{
		threshold2_array[j] = threshold2_array[j] - LEARN_RATE * deviation2_array[j];
		for (int i = 0; i < HIDDEN_LAYER; i++)
			weight2_array[i][j] = weight2_array[i][j] - LEARN_RATE * output1_array[i] * deviation2_array[j];
	}
}
//训练
void BP::Train()
{
	FILE *train_images;
	FILE *train_labels;
	errno_t err1, err2;

	err1 = fopen_s(&train_images, "train-images.idx3-ubyte", "rb");
	if (err1 != 0) {
		perror("Failed to open train-images.idx3-ubyte");
		return; // 或者采取其他错误处理措施
	}

	err2 = fopen_s(&train_labels, "train-labels.idx1-ubyte", "rb");
	if (err2 != 0) {
		perror("Failed to open train-labels.idx1-ubyte");
		fclose(train_images); // 关闭图像文件
		return; // 或者采取其他错误处理措施
	}

	unsigned char image[INPUT_LAYER];
	unsigned char label[OUTPUT_LAYER];
	unsigned char temp[100];
	//读取文件开头
	fread(temp, 1, 16, train_images);
	fread(temp, 1, 8, train_labels);
	int times = 0; //当前训练了几次
	cout << "开始训练..." << endl << endl;

	

	while (!feof(train_images) && !feof(train_labels))
	{
		fread(image, 1, INPUT_LAYER, train_images);
		fread(label, 1, 1, train_labels);
		//设置输入向量
		for (int i = 0; i < INPUT_LAYER; i++)
		{
			if ((unsigned int)image[i] < 64)
				input_array[i] = 0;
			else
				input_array[i] = 1;
		}
		//设置目标值
		int index = (unsigned int)label[0];
		memset(aim_array, 0, sizeof(aim_array));
		aim_array[index] = 1;
		GetOutput1(); //得到隐含层输出
		GetOutput2(); //得到输出层输出
		GetDeviation2(); //得到输出层误差
		GetDeviation1(); //得到隐含层误差
		Feedback1(); //反馈输入层与隐含层之间的权重
		Feedback2(); //反馈隐含层与输出层之间的权重
		++times;
		if (times % 2000 == 0)
			cout << "已训练 " << times << "组" << endl;
		if (times % 10000 == 0) //每10000组就测试一下
			Test();
	}

	
	// 关闭文件
	fclose(train_images);
	fclose(train_labels);
}
//测试
void BP::Test()
{
	FILE *test_images;
	FILE *test_labels;
	errno_t err1, err2;

	err1 = fopen_s(&test_images, "t10k-images.idx3-ubyte", "rb");
	if (err1 != 0) {
		perror("Failed to open t10k-images.idx3-ubyte");
		return; // 或者采取其他错误处理措施
	}

	err2 = fopen_s(&test_labels, "t10k-labels.idx1-ubyte", "rb");
	if (err2 != 0) {
		perror("Failed to open t10k-labels.idx1-ubyte");
		fclose(test_images); // 关闭图像文件
		return; // 或者采取其他错误处理措施
	}

	unsigned char image[784];
	unsigned char label[10];
	unsigned char temp[100];
	//读取文件开头
	fread(temp, 1, 16, test_images);
	fread(temp, 1, 8, test_labels);
	int total_times = 0; //当前测试了几次
	int success_times = 0; //当前正确了几次
	cout << "开始测试..." << endl;
	

	while (!feof(test_images) && !feof(test_labels))
	{
		fread(image, 1, INPUT_LAYER, test_images);
		fread(label, 1, 1, test_labels);
		//设置输入向量
		for (int i = 0; i < INPUT_LAYER; i++)
		{
			if ((unsigned int)image[i] < 64)
				input_array[i] = 0;
			else
				input_array[i] = 1;
		}
		//设置目标值
		memset(aim_array, 0, sizeof(aim_array));
		int index = (unsigned int)label[0];
		aim_array[index] = 1;
		GetOutput1(); //得到隐含层输出
		GetOutput2(); //得到输出层输出
		//以输出结果中最大的那个值所对应的数字作为预测的数字
		double maxn = -99999999;
		int max_index = 0;
		for (int i = 0; i < OUTPUT_LAYER; i++)
		{
			if (output2_array[i] > maxn)
			{
				maxn = output2_array[i];
				max_index = i;
			}
		}
		//如果预测正确
		if (aim_array[max_index] == 1)
			++success_times;
		++total_times;
		if (total_times % 2000 == 0)
			cout << "已测试:" << total_times << "组" << endl;
	}
	


	cout << "正确率: " << 100.0 * success_times / total_times << "%" << endl << endl;
	cout << "*************************" << endl << endl;

	// 关闭文件
	fclose(test_images);
	fclose(test_labels);
}
int main(int argc, char * argv[])
{
	// 计时器开始
	auto start_time = std::chrono::high_resolution_clock::now();
	BP bp;
	bp.Init();
	//训练数据反复利用TRAIN_TIMES次
	for (int i = 0; i < TRAIN_TIMES; i++)
	{
		cout << "开始第" << i + 1 << "轮迭代" << endl << endl;
		bp.Train();
	}
	// 计时器结束
	auto end_time = std::chrono::high_resolution_clock::now();
	auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time);
	std::cout << "总时间: " << duration.count() << " milliseconds" << std::endl;
	return 0;
}

运行截图
搭建BP神经网络_第1张图片

测量迭代10轮所用的时间:在main()函数中添加计时器:std::chrono::high_resolution_clock

学习率 0.1 0.2 0.3
准确率 94.48% 95.02% 95.15%
时间 717.224s 869.209s 725.855s

正确率的增长逐渐变得缓慢,程序的运行时间也要相应的加长

你可能感兴趣的:(神经网络,人工智能,深度学习)