一种简单的双层BP神经网络做图案识别的JAVA实现(识别菱形、圆形、矩形、正等腰三角形)

感谢周志华先生写的《机器学习》这本书,还有网上的资源,使得我勉强能理解简单的人工神经网络的实现方法,觉得自己在这方面真是弱爆了。

先贴一下BP神经网络的实现代码:

package com.test.bpNeuronNetworkVer2;

/**
 * Created by cjz on 2018/12/19.
 */

import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.Random;

public class BPDualLayer implements Serializable{
	
    protected static final long serialVersionUID = 1;
    
    public final double[] input;    //输入层

    public final double[] hiddenLayer1;   //隐含层1
    
    public final double[] hiddenLayer2;   //隐含层2

    public final double[] output;  //输出层

    public final double[] target;   //预测的输出内容


    /**隐含层的神经元的误差(每一个的)**/
    public final double[] hidDeltaLayer1;  
    public final double[] hidDeltaLayer2;  

    /**输出层的神经元的误差(每一个的)**/
    public final double[] optDeltaLayer1;  
    public final double[] optDeltaLayer2;  


    public final double eta;     //学习率

    /**动量参数**/
    public final double momentum;   

    /**从输入层到隐含层1的矩阵权值**/
    public final double[][] iptToHidLayer1Weights; 
    /**从到隐含层1到隐含层2的矩阵权值**/
    public final double[][] HidLayer1ToHidLayer2Weights;    
    /**从隐含层2到输出层的矩阵权值**/
    public final double[][] hidLayer2ToOptWeights;    

    /**更新之前的权值信息(输入层到隐含层1)**/
    public final double[][] iptToHid1PrevUptWeights;  
    /**更新之前的权值信息(隐含层1到隐含层2)**/
    public final double[][] hid1ToHid2PrevUptWeights;  
    /**更新之前的权值信息(隐含层2到输出层)**/
    public final double[][] hid2ToOptPrevUptWeights;  

    
    
    public double optErrSum = 0d;

    public double hidLayer1ErrSum = 0d;
    public double hidLayer2ErrSum = 0d;

    private final Random random;  //主要是对权值采取的是随机产生的方法


    //初始化
    public BPDualLayer(int inputSize, int hiddenSize, int outputSize, double eta,
              double momentum) {

        input = new double[inputSize];
        hiddenLayer1 = new double[hiddenSize];
        hiddenLayer2 = new double[hiddenSize];
        output = new double[outputSize];
        target = new double[outputSize];

        hidDeltaLayer1 = new double[hiddenSize];
        hidDeltaLayer2 = new double[hiddenSize];
        
        optDeltaLayer1 = new double[hiddenSize];
        optDeltaLayer2 = new double[outputSize];

        iptToHidLayer1Weights = new double[inputSize][hiddenSize];
        HidLayer1ToHidLayer2Weights = new double[hiddenSize][hiddenSize];
        hidLayer2ToOptWeights = new double[hiddenSize][outputSize];

        random = new Random(100000);   //使每次产生的随机数都是第一次的分配,这是有参数和没参数的区别
        randomizeWeights(iptToHidLayer1Weights);  //分配输入层到隐含层的神经元的权值
        randomizeWeights(HidLayer1ToHidLayer2Weights);  //分配输入层到隐含层的神经元的权值
        randomizeWeights(hidLayer2ToOptWeights);  //分配隐含层到输出层的神经元的权值

        iptToHid1PrevUptWeights = new double[inputSize][hiddenSize];  //更新权值
        hid1ToHid2PrevUptWeights = new double[hiddenSize][hiddenSize];  //更新权值
        hid2ToOptPrevUptWeights = new double[hiddenSize][outputSize];

        this.eta = eta;  			//学习率
        this.momentum = momentum;   //动态量
    }
    
    //初始化输入层,隐含层长度,和输出层长度
    public BPDualLayer(int inputSize, int hiddenSize, int outputSize) {
        this(inputSize, hiddenSize, outputSize, 0.25, 0.9);
    }

    //随机产生神经元之间的权值信息
    private void randomizeWeights(double[][] matrix) {
        for(int i = 0; i < matrix.length; i++){
        	for(int j = 0; j < matrix[i].length; j++){
                double real = random.nextDouble();    //随机分配着产生0-1之间的值
                matrix[i][j] = random.nextDouble() > 0.5 ? real : -real;
        	}        	
        }
    }
    
    



    public void train(double[] trainData, double[] target) {     //训练数据
    	System.arraycopy(trainData, 0, this.input, 0, trainData.length);  //加载输入的数据      
        System.arraycopy(target, 0, this.target, 0, target.length);  //方法和之前的输入数据一样,都是调用复制数据的方法
        forward();                  //向前计算神经元权值(先算输入到隐含层的,然后再算隐含到输出层的权值)
        calculateDelta();           //计算误差逆传播值
        adjustWeight();             //调整更新神经元的权值
    }

    //测试自己弄的BP神经网络训练的效果咋样
    public double[] test(double[] inData) {
        System.arraycopy(inData, 0, input, 0, inData.length);
        forward();
        return getNetworkOutput();
    }

    //返回最后的输出层的结果
    private double[] getNetworkOutput() {
        return output;
    }

    //向前计算各个神经元的权值 (输入层到隐含层的)(参数一:输入层的数据,二:隐含层的内容,三:输入到隐含的神经元的权值)
    //向前计算各个神经元的权值 (隐含层到输出层)(参数一:隐含层的数据,二:输出层的内容,三:隐含层到输出层的神经元的权值)
    private void forward(double[] currentLayer, double[] nextLayer, double[][] weight) {
        for(int j = 0; j < nextLayer.length; j++){
        	double sum = 0f;
        	for(int i = 0; i < currentLayer.length; i++){
        		sum += currentLayer[i] * weight[i][j];  //第i结点乘以第i结点的各个权值
        	}
        	
        	nextLayer[j] = sigmoid(sum);	//调用神经元的激活函数来得到结果(结果肯定是在0-1之间的),并输出到下一层
        }
    }

    //向前计算(先算输入到隐含层的,然后再算隐含到输出层的权值)
    private void forward() {
        forward(input, hiddenLayer1, iptToHidLayer1Weights);    //计算输入层到隐含层的权值
        forward(hiddenLayer1, hiddenLayer2, HidLayer1ToHidLayer2Weights);    //计算输入层到隐含层的权值
        forward(hiddenLayer2, output, hidLayer2ToOptWeights);   //计算隐含层到输出层的权值
    }

    //计算输出层的误差
    private void outputErr() {
        double errSum = 0;  //误传播值
        for (int idx = 0; idx < optDeltaLayer2.length; idx++) {
            double o = output[idx];
            optDeltaLayer2[idx] = o * (1d - o) * (target[idx] - o);  //书上p104的公式
            errSum += Math.abs(optDeltaLayer2[idx]);
        }
        optErrSum = errSum;
    }
    
    
    //计算隐含层的误差
    private void hiddenLayer2Err() {
        double errSum = 0; //保存误差
        for (int j = 0; j < hidDeltaLayer2.length; j++) {
            double o = hiddenLayer2[j];  //神经元权值
            double sum = 0;
            for (int k = 0; k < optDeltaLayer2.length; k++){  //由输出层来反向计算
                sum += hidLayer2ToOptWeights[j][k] * optDeltaLayer2[k]; 
            }
            optDeltaLayer1[j] = o * (1d - o) * sum;  //书上p104的公式
            hidDeltaLayer2[j] = o * (1d - o) * sum;  //书上的P104的(5.15)公式
            errSum += Math.abs(hidDeltaLayer2[j]);
        }
        hidLayer2ErrSum = errSum;
    }
    

    //计算隐含层的误差
    private void hiddenLayer1Err() {
        double errSum = 0; //保存误差
        for (int j = 0; j < hidDeltaLayer1.length; j++) {
            double o = hiddenLayer1[j];  //神经元权值
            double sum = 0;
            for (int k = 0; k < optDeltaLayer1.length; k++){  //由输出层来反向计算
                sum += HidLayer1ToHidLayer2Weights[j][k] * optDeltaLayer1[k];
            }
            hidDeltaLayer1[j] = o * (1d - o) * sum;  //书上的P104的(5.15)公式
            errSum += Math.abs(hidDeltaLayer1[j]);
        }
        hidLayer1ErrSum = errSum;
    }

    
    

    //计算每一层的误差(因为在BP中,要达到使误差最小)(就是逆传播算法,书上有P101)
    private void calculateDelta() {
        outputErr();   //计算输出层的误差(因为要反过来算,所以先算输出层的)
        hiddenLayer2Err();   //计算隐含层的误差
        hiddenLayer1Err();   //计算隐含层的误差
    }

    //更新每层中的神经元的权值信息(这也就是不断的训练过程,)
    private void adjustWeight(double[] delta, double[] layer,
                              double[][] weight, double[][] prevWeight) {

        for (int i = 0; i < delta.length; i++) {
            for (int j = 0; j < layer.length; j++) {
                double newVal = momentum * prevWeight[j][i] + eta * delta[i] * layer[j];      //通过公式计算误差限=(动态量*之前的该神经元的阈值+学习率*误差*对应神经元的阈值),来进行更新权值
                weight[j][i] += newVal;  //得到新的神经元之间的权值
                prevWeight[j][i] = newVal;  //保存这一次得到的权值,方便下一次进行更新
            }
        }
    }

//    //更新每层中的神经元的权值信息
//    private void adjustWeight() {
//        adjustWeight(optDeltaLayer2, hiddenLayer2, hidLayer2ToOptWeights, hid2ToOptPrevUptWeights);
//        adjustWeight(optDeltaLayer1, hiddenLayer1, HidLayer1ToHidLayer2Weights, hid1ToHid2PrevUptWeights);
//        adjustWeight(hidDeltaLayer1, input, iptToHidLayer1Weights, iptToHid1PrevUptWeights);
//    }
    
    //更新每层中的神经元的权值信息
    private void adjustWeight() {
        final Object a = new Object();
    	new Thread(new Runnable() {
			@Override
			public void run() {
				synchronized (a) {
					adjustWeight(optDeltaLayer2, hiddenLayer2, hidLayer2ToOptWeights, hid2ToOptPrevUptWeights);
				}
			}
		}).start();
    	new Thread(new Runnable() {
			@Override
			public void run() {
				synchronized (a) {
					adjustWeight(optDeltaLayer1, hiddenLayer1, HidLayer1ToHidLayer2Weights, hid1ToHid2PrevUptWeights);
				}
			}
		}).start();
    	new Thread(new Runnable() {
			@Override
			public void run() {
				synchronized (a) {
					adjustWeight(hidDeltaLayer1, input, iptToHidLayer1Weights, iptToHid1PrevUptWeights);
				}
			}
		}).start();
    	try {
			Thread.sleep(1);
			synchronized (a) {
				//System.out.println("finished");
			}
		} catch (InterruptedException e) {
			e.printStackTrace();
		}
    }

    //我这里用的是sigmoid激活函数,当然也可以用阶跃函数,看自己选择吧
    private double sigmoid(double val) {
        return 1d / (1d + Math.exp(-val));  //这函数书上有P98页
    }
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    public void outputParameter() throws IOException{
    	File outputFile = new File("E:\\work\\ExampleProject\\人工神经网络\\手写图案的识别数据\\NeuronParameter.txt");
    	if(outputFile.exists()){
    		outputFile.delete();
    	}
    	outputFile.createNewFile();
    	FileOutputStream fileOutputStream = new FileOutputStream(outputFile);
    	writeToFileOs(fileOutputStream, "input:" + input.length + ";\n");
    	writeToFileOs(fileOutputStream, "hidden:" + hiddenLayer1.length + ";\n");
    	writeToFileOs(fileOutputStream, "output:" + output.length + ";\n");
    	writeToFileOs(fileOutputStream, "hidDelta:" + "\n"); 
    	for(int i = 0; i < hidDeltaLayer1.length; i ++){
    		writeToFileOs(fileOutputStream,  String.format("%.12f", hidDeltaLayer1[i]) + (i == hidDeltaLayer1.length - 1 ? ";" : ","));
    	}
    	writeToFileOs(fileOutputStream,  "\n");
    	writeToFileOs(fileOutputStream, "optDelta:" + "\n"); 
    	for(int i = 0; i < optDeltaLayer1.length; i ++){
    		writeToFileOs(fileOutputStream,  String.format("%.12f", optDeltaLayer1[i]) + (i == optDeltaLayer1.length - 1 ? ";" : ","));
    	}
    	writeToFileOs(fileOutputStream,  "\n");
    	writeToFileOs(fileOutputStream, "eta:" + eta + ";\n");
    	writeToFileOs(fileOutputStream, "momentum:" + momentum + ";\n");
    	writeToFileOs(fileOutputStream, "iptHidWeights:" + ";\n");
    	for(int i = 0; i < iptToHidLayer1Weights.length; i++){
    		for(int j = 0; j < iptToHidLayer1Weights[i].length; j++){
    			writeToFileOs(fileOutputStream,  String.format("%.12f", iptToHidLayer1Weights[i][j]) + (j == iptToHidLayer1Weights[i].length - 1 ? ";" : ","));
    		}    		
    		writeToFileOs(fileOutputStream,  "\n");
    	}
    	writeToFileOs(fileOutputStream,  "\n");
    	
    	writeToFileOs(fileOutputStream, "hidOptWeights:" + "\n");
    	for(int i = 0; i < hidLayer2ToOptWeights.length; i++){
    		for(int j = 0; j < hidLayer2ToOptWeights[i].length; j++){
    			writeToFileOs(fileOutputStream,  String.format("%.12f", hidLayer2ToOptWeights[i][j]) + (j == hidLayer2ToOptWeights[i].length - 1 ? ";" : ","));
    		}    		
    		writeToFileOs(fileOutputStream,  "\n");
    	}
    	writeToFileOs(fileOutputStream,  "\n");
    	
    	
    	
    	writeToFileOs(fileOutputStream, "iptHidPrevUptWeights:" + "\n");
    	for(int i = 0; i < iptToHid1PrevUptWeights.length; i++){
    		for(int j = 0; j < iptToHid1PrevUptWeights[i].length; j++){
    			writeToFileOs(fileOutputStream,  String.format("%.12f", iptToHid1PrevUptWeights[i][j]) + (j == iptToHid1PrevUptWeights[i].length - 1 ? ";" : ","));
    		}    		
    		writeToFileOs(fileOutputStream,  "\n");
    	}
    	writeToFileOs(fileOutputStream,  "\n");
    	
    	writeToFileOs(fileOutputStream, "hidOptPrevUptWeights:" + "\n");
    	for(int i = 0; i < hid2ToOptPrevUptWeights.length; i++){
    		for(int j = 0; j < hid2ToOptPrevUptWeights[i].length; j++){
    			writeToFileOs(fileOutputStream,  String.format("%.12f", hid2ToOptPrevUptWeights[i][j]) + (j == hid2ToOptPrevUptWeights[i].length - 1 ? ";" : ","));
    		}    		
    		writeToFileOs(fileOutputStream,  "\n");
    	}
    	writeToFileOs(fileOutputStream,  "\n");
    	
    	
    	writeToFileOs(fileOutputStream, "optErrSum:" + String.format("%.12f", optErrSum) + ";\n");
    	writeToFileOs(fileOutputStream, "hidErrSum:" + String.format("%.12f", hidLayer1ErrSum) + ";\n");
    }
    
    public void outputBpObject(File file){
    	try {
    		if(!file.exists()){
    			file.createNewFile();
    		}
			ObjectOutputStream objectOutputStream = new ObjectOutputStream(new FileOutputStream(file));
			objectOutputStream.writeObject(this);
			objectOutputStream.flush();
			objectOutputStream.close();
			System.out.println("保存BP神经网络成功");
		} catch (Exception e) {
			System.out.println("保存BP神经网络失败");
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
    }
    
    private void writeToFileOs(FileOutputStream fileOutputStream, String content){
    	try {
			fileOutputStream.write(content.getBytes());
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
    }

}

先占个位置,以后再解释原理。为了偷懒,两个隐层的细胞数我设定成一样的,读者可以根据需求进行更改。

 

        我测试时利用了很多预先画好的图案集合导出的csv,和一个交代每一行训练像素数据的标签的csv。读者也可以更改为自己需要的集合,例如数字集合等等进行训练:

测试代码:

       我的测试代码使用了两个28细胞的隐含层,目标识别正确率为超过0.990,如果达到才跳出循环。另外,trainingresult代表期望的正确输出值,例如训练集某一项数据对应的Label为0,那么trainingresult[0]则要为1,其他下标均为0,从而标记出正确的输出应该是怎样的,训练过程就会使得神经网络逼近这个结果而运行。

       最后,你可以在训练到目标正确率之后,调用bp神经网络对象的outputBpObject方法得到序列化后的对象文件,然后在安卓平台上建立一样的包路径,并放到assets路径中备用,就可以把电脑上训练好的神经网络放到安卓上使用了:

package com.test.bpNeuronNetworkVer2;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.Scanner;

public class TestRecoginzeShapeDualLayer {

	/**
	 * @param args
	 * @throws Exception 
	 */
//	private static int hidCount = 16;
//	private static int hidCount = 18;
//	private static int hidCount = 30;
//	private static int hidCount = 50;
	private static int hidCount = 28;
//	private static int hidCount = 27;
//	private static int hidCount = 120;
	private static int trainCount = 0;
	private static double targetRightRate = 0.99f;
	private static int outputCount = 4;
	public static void main(String[] args) throws Exception {
		System.out.println("双层神经网络,默认");
		// TODO Auto-generated method stub
		System.out.println("隐层神经元数目" + hidCount);
		BPDualLayer bp = new BPDualLayer(50*50, hidCount, outputCount, 0.3, 0.9); // 分别代表784个输入层,一层隐含层其中是784个神经元,outputCount个输出结果
		float rightRate;
		new Thread(new Runnable() {
			
			@Override
			public void run() {
				Scanner scanner = new Scanner(System.in);
				while(true){
					targetRightRate = Float.valueOf(scanner.nextLine());
					System.out.println("目标正确率更改为:" + targetRightRate);
				}
			}
		}).start();
		do {
			System.out.println("开始训练" + "第" +  (trainCount++) + "次");
			//训练数据的标签
			CSVFileUtil util = new CSVFileUtil("E:\\work\\ExampleProject\\人工神经网络\\手写图案的识别数据\\trainLabels.csv");
			int resultRow = util.getRowNum(); // 得到训练标签行数
//			int resultRow = 20; // 得到训练标签行数
			int resultColumn = util.getColNum(); // 得到训练标签列数
			//训练数据,50*50=784像素
			CSVFileUtil util2 = new CSVFileUtil("E:\\work\\ExampleProject\\人工神经网络\\手写图案的识别数据\\trainData.csv");
			int inputRow = util2.getRowNum(); // 得到训练数据行数
			int inputColumn = util2.getColNum(); // 得到训练数据列数
			
			int prevPercent = 0;
			for (int index = 0; index < resultRow; index++) {
				int nowPercent = (int)(Float.valueOf(index) /Float.valueOf(resultRow) * 100);
				if(nowPercent != prevPercent) {
					System.out.print("+");
				}
				prevPercent = (int)(Float.valueOf(index) /Float.valueOf(resultRow) * 100); 
				double[] trainingresult = new double[outputCount]; // 因为结果有outputCount种情况
				String getresult = util.getString(index, 0); // 得到训练集的第i的标签
				trainingresult[Integer.parseInt(getresult)] = 1; // 表示当前的这个训练集的结果是对应的下标的值,1代表可能性为百分之几,满为1
				// 将训练数据的每一个转换成只有0和1的形式
				double[] binary = new double[50 * 50]; // 转成二进制进行处理(2500)
				String currentTrainData = util2.getRow(index);
				String currentTrainDataNum[] = currentTrainData.split("\\,");
				for(int i = 0; i < currentTrainDataNum.length; i++){
//					float val = Float.valueOf(currentTrainDataNum[i]) / 255f;
//					binary[i] = val;

					float val = Float.valueOf(currentTrainDataNum[i]);
					if(val >= 255f / 2){
						binary[i] = 1;
					} else {
						binary[i] = 0;
					}
					//System.out.println("::" + binary[i]);
				}
				bp.train(binary, trainingresult); // 训练数据,各个标签的可能性
			}
			System.out.println("神经网络拓扑结构训练好了,可以进行测试");
			rightRate = recognize(bp);
			System.out.println(String.format("正确率:%.20f", rightRate));
			if(rightRate < targetRightRate){
				System.out.println("正确率过低,重新训练");
				if(trainCount > 500){
					break;
				}
			} else {
				System.out.println("训练完成");
			}
		} while(rightRate < targetRightRate);
		//bp.outputParameter();
		bp.outputBpObject(new File("E:\\work\\ExampleProject\\人工神经网络\\手写图案的识别数据\\neuroNetworkObject_" + System.currentTimeMillis() + "_neuronCount_" + hidCount + ".bp"));
//		realRecognize(bp);
	}
	
	
	private static float recognize(BPDualLayer bp){
		try {
//			BufferedReader fileReaderTrainData = new BufferedReader(new FileReader(new File("E:\\work\\ExampleProject\\人工神经网络\\手写图案的识别数据\\trainData.csv")));
//			BufferedReader fileReaderTrainDataLables = new BufferedReader(new FileReader(new File("E:\\work\\ExampleProject\\人工神经网络\\手写图案的识别数据\\trainLabels.csv")));
			BufferedReader fileReaderTrainData = new BufferedReader(new FileReader(new File("E:\\work\\ExampleProject\\人工神经网络\\手写图案的识别数据\\testData.csv")));
			BufferedReader fileReaderTrainDataLables = new BufferedReader(new FileReader(new File("E:\\work\\ExampleProject\\人工神经网络\\手写图案的识别数据\\testLabels.csv")));
			
			int dataCount = 0;
			int rightCount = 0;
			
			String data = fileReaderTrainData.readLine();
			String label = fileReaderTrainDataLables.readLine();
			while (data != null) {
				String[] dataSplited = data.split("\\,");

				//System.out.println("数据长度:" + dataSplited.length);

				double dataDouble[] = new double[dataSplited.length];
				for (int i = 0; i < dataDouble.length; i++) {
					dataDouble[i] = Double.parseDouble(dataSplited[i]);
				}

				double[] result = bp.test(dataDouble); // 测试的结果
				int maxPossibleChoice = 0;
				double maxPossible = 0;
				for (int i = 0; i < result.length; i++) {
					//System.out.println(i + ",识别可能性:" + String.format("%.6f", result[i]));
					if(result[i] > maxPossible) {
						maxPossible = result[i];
						maxPossibleChoice = i;
					}
				}
				if(Integer.parseInt(label) == maxPossibleChoice){
					rightCount ++;
				} else {
					
				}
				dataCount ++;
				data = fileReaderTrainData.readLine();
				label = fileReaderTrainDataLables.readLine();
			}
			//返回正确率
			return Float.valueOf(rightCount) / Float.valueOf(dataCount);
		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return 0f;
	}
	
	
    public enum Shapes{
        TRANIGLE,
        //LEFT_ISO_TRANIGLE,
        //RIGHT_ISO_TRANIGLE,
        //TOP_LEFT_ISO_TRANIGLE,
        //TOP_RIGHT_ISO_TRANIGLE,
        RECTANGLE,
        CIRCLE,
        DIAMON
        //PENTAGON
    }
    
	private static void realRecognize(BPDualLayer bp){
		Scanner scanner = new Scanner(System.in);
		while(true){
			String data = scanner.nextLine();
			String[] dataSplited = data.split("\\,");

			//System.out.println("数据长度:" + dataSplited.length);

			double dataDouble[] = new double[dataSplited.length];
			for (int i = 0; i < dataDouble.length; i++) {
				dataDouble[i] = Double.parseDouble(dataSplited[i]);
			}

			double[] result = bp.test(dataDouble); // 测试的结果
			int maxPossibleChoice = 0;
			double maxPossible = 0;
			for (int i = 0; i < result.length; i++) {
				//System.out.println(i + ",识别可能性:" + String.format("%.6f", result[i]));
				if(result[i] > maxPossible) {
					maxPossible = result[i];
					maxPossibleChoice = i;
				}
			}
			System.out.println("这是一个:" + Shapes.values()[maxPossibleChoice]);
		}
	}
	
	
//	private static double[] transPixelToBinary(){
//		
//	}

}

训练过程:

一种简单的双层BP神经网络做图案识别的JAVA实现(识别菱形、圆形、矩形、正等腰三角形)_第1张图片

 

使用效果:

一种简单的双层BP神经网络做图案识别的JAVA实现(识别菱形、圆形、矩形、正等腰三角形)_第2张图片

 

训练集以及全部代码的下载地址:

 

 

链接: https://pan.baidu.com/s/1up5_IAYCU06_oX2ePojZNA 提取码: xju8 

你可能感兴趣的:(安卓开发,JAVA,弱人工智能)