代码来自闵老师”日撸 Java 三百行(61-70天)
日撸 Java 三百行(61-70天,决策树与集成学习)_闵帆的博客-CSDN博客
抽象分类器定义了两个抽象方法,是为了支持不同的弱分类器。这里并没有具体实现分类,是一个超类。实现了准确率计算、分类正确的实例保存等。
抽象分类器代码:
package machinelearning.adaboosting;
import java.util.Random;
import weka.core.Instance;;
/**
* The super class of any simple classifier.
*
* @author WX873
*
*/
public abstract class SimpleClassifier {
/**
* The index of the current attribute.
*/
int selectedAttribute;
/**
* Weighted data.
*/
WeightedInstances weightedInstances;
/**
* The accuracy on the training set.
*/
double trainingAccuracy;
/**
* The number of classes. For binary classification it is 2.
*/
int numClasses;
/**
* The number of instances.
*/
int numInstances;
/**
* The number of conditional attributes.
*/
int numConditions;
/**
* For random number generation.
*/
Random random = new Random();
/**
* ****************************************************************
* The first constructor
* @param paraweightedInstances The given instances.
* ****************************************************************
*/
public SimpleClassifier(WeightedInstances paraweightedInstances) {
// TODO Auto-generated constructor stub
weightedInstances = paraweightedInstances;
numConditions = weightedInstances.numAttributes() - 1;
numInstances = weightedInstances.numInstances();
numClasses = weightedInstances.classAttribute().numValues();
}//of the first constructor
/**
* ******************************************************
* Train the classifier.
* ******************************************************
*/
public abstract void train();
/**
* **************************************************************
* Classify an instance.
* @param paraInstance The given instance.
* @return Predicted label.
* **************************************************************
*/
public abstract int classify(Instance paraInstance) ;
/**
* *************************************************
* Which instances in the training set are correctly classified.
* @return The correctness array.
* *************************************************
*/
public boolean[] computeCorrectnessArray() {
boolean[] resultCorrectnessArray = new boolean[weightedInstances.numInstances()];
for (int i = 0; i < resultCorrectnessArray.length; i++) {
Instance tempInstance = weightedInstances.instance(i);
if ((int)tempInstance.classValue() == classify(tempInstance)) {
resultCorrectnessArray[i]= true;
}//of if
}//of for i
return resultCorrectnessArray;
}//of computeCorrectnessArray
/**
* ********************************************************
* Compute the accuracy on the training set.
* @return The training accuracy.
* ********************************************************
*/
public double computeTrainingAccuracy() {
double tempCorrect = 0;
boolean[] tempCorrectnessArray = computeCorrectnessArray();
for (int i = 0; i < tempCorrectnessArray.length; i++) {
if (tempCorrectnessArray[i]) {
tempCorrect++;
}//of if
}//of for i
double resultAccuracy = tempCorrect / tempCorrectnessArray.length;
return resultAccuracy;
}//of computeTrainingAccuracy
public double computeWeightedError() {
double resultError = 0;
boolean[] tempCorrectnessArray = computeCorrectnessArray();
for (int i = 0; i < tempCorrectnessArray.length; i++) {
if (!tempCorrectnessArray[i]) {
resultError += weightedInstances.getWeight(i);
}//of if
}//of for i
if (resultError < 1e-6) {
resultError = 1e-6;
}//of if
return resultError;
}//of computeWeightedError
}//of class SimpleClassifier
树桩分类器代码:
package machinelearning.adaboosting;
import java.io.FileReader;
import java.util.Arrays;
import weka.core.Instance;
/**
* The stump classifier.
*
* @param paraweightedInstances
*/
public class StumpClassifier extends SimpleClassifier{
/**
* The best cut for the current attribute on weightedInstances.
*/
double bestCut;
/**
* The class label for attribute value less than bestCut.
*/
int leftLeafLabel;
/**
* The class label for attribute value no less than bestCut.
*/
int rightLeafLabel;
/**
* *******************************************************
* The only constructor
*
* @param paraweightedInstances The given instances.
* *******************************************************
*/
public StumpClassifier (WeightedInstances paraweightedInstances) {
super(paraweightedInstances);
// TODO Auto-generated constructor stub
}//of the only constructor
/**
* ********************************************************
* Train the classifier.
* ********************************************************
*/
public void train() {
// Step 1. Randomly choose an attribute.
selectedAttribute = random.nextInt(numConditions);
// Step 2. Find all values of the given attribute and sort.
double[] tempValuesArrays = new double[numInstances];
for (int i = 0; i < tempValuesArrays.length; i++) {
tempValuesArrays[i] = weightedInstances.instance(i).value(selectedAttribute);
}//of for i
Arrays.sort(tempValuesArrays);
// Step 3. Initialize, classify all instances as the same with the original cut.
int tempNumLabels = numClasses;
double[] tempLabelCountArray = new double[tempNumLabels];
int tempCurrentLabel;
// Step 3.1 Scan all labels to obtain their counts.
for (int i = 0; i < numInstances; i++) {
// The label of the ith instance
tempCurrentLabel = (int)weightedInstances.instance(i).classValue();
tempLabelCountArray[tempCurrentLabel] += weightedInstances.getWeight(i);
}//of for i
// Step 3.2 Find the label with the maximal scores.
double tempMaxCorrect = 0;
int tempBestLabel = -1;
for (int i = 0; i < tempLabelCountArray.length; i++) {
if (tempMaxCorrect < tempLabelCountArray[i]) {
tempMaxCorrect = tempLabelCountArray[i];
tempBestLabel = i;
}//of if
}//of for i
// Step 3.3 The cut is a little bit smaller than the minimal value.
bestCut = tempValuesArrays[0] - 0.1;
leftLeafLabel = tempBestLabel;
rightLeafLabel = tempBestLabel;
// Step 4. Check candidate cuts one by one.
// To handle multi-class data, left and right.
double tempCut;
double[][] tempLabelCountMatrix = new double[2][tempNumLabels];
for (int i = 0; i < tempValuesArrays.length - 1; i++) {
// Step 4.1 Some attribute values are identical, ignore them.
if (tempValuesArrays[i] == tempValuesArrays[i + 1]) {
continue;
}//of if
tempCut = (tempValuesArrays[i] + tempValuesArrays[i + 1]) / 2;
// Step 4.2 Scan all labels to obtain their counts wrt. the cut.
// Initialize again since it is used many times.
for (int j = 0; j < 2; j++) {
for (int k = 0; k < tempNumLabels; k++) {
tempLabelCountMatrix[j][k] = 0;
}//of for k
}//of for i
for (int j = 0; j < numInstances; j++) {
// The label of the jth instance
tempCurrentLabel = (int)weightedInstances.instance(j).classValue();
if (weightedInstances.instance(j).value(selectedAttribute) < tempCut) {
tempLabelCountMatrix[0][tempCurrentLabel] += weightedInstances.getWeight(j);
} else {
tempLabelCountMatrix[1][tempCurrentLabel] += weightedInstances.getWeight(j);
}//of if
}//of for j
// Step 4.3 Left leaf.
double tempLeftMaxCorrect = 0;
int tempLeftBestLAbel = 0;
for (int j = 0; j < tempLabelCountMatrix[0].length; j++) {
if (tempLeftMaxCorrect < tempLabelCountMatrix[0][j]) {
tempLeftMaxCorrect = tempLabelCountMatrix[0][j];
tempLeftBestLAbel = j;
}//of if
}//of for j
// Step 4.4 Right leaf.
double tempRightMaxCorrect = 0;
int tempRightBestLAbel = 0;
for (int j = 0; j < tempLabelCountMatrix[1].length; j++) {
if (tempRightMaxCorrect < tempLabelCountMatrix[1][j]) {
tempRightMaxCorrect = tempLabelCountMatrix[1][j];
tempRightBestLAbel = j;
}//of if
}//of for j
// Step 4.5 Compare with the current best.
//原本tempMaxCorrect存的是cut之前的权值累加最大的值。
//分成左右之后,两个部分各自标签累加最大的值相加,肯定大于没分之前。接下来的代码就是为了找出最好的cut。如果是最优的cut,tempLeftMaxCorrect + tempRightMaxCorrect之和应该是最大的
if (tempMaxCorrect < tempLeftMaxCorrect + tempRightMaxCorrect) {
tempMaxCorrect = tempLeftMaxCorrect + tempRightMaxCorrect;
bestCut = tempCut;
leftLeafLabel = tempLeftBestLAbel;
rightLeafLabel = tempRightBestLAbel;
}//of if
}//of for i
}//of train
/**
******************
* Classify an instance.
*
* @param paraInstance
* The given instance.
* @return Predicted label.
******************
*/
public int classify(Instance paraInstance) {
// TODO Auto-generated method stub
int resultLabel = -1;
if (paraInstance.value(selectedAttribute) < bestCut) {
resultLabel = leftLeafLabel;
} else {
resultLabel = rightLeafLabel;
}//of if
return resultLabel;
}//of classify
/**
******************
* For display.
******************
*/
public String toString() {
String resultString = "I am a stump classifier.\r\n" + "I choose attribute #" + selectedAttribute
+ " with cut value " + bestCut + ".\r\n" + "The left and right leaf labels are " + leftLeafLabel
+ " and " + rightLeafLabel + ", respectively.\r\n" + "My weighted error is: " + computeWeightedError()
+ ".\r\n" + "My weighted accuracy is : " + computeTrainingAccuracy() + ".";
return resultString;
}//of toString
/**
* ********************************************
* For unit test.
* @param args
* ********************************************
*/
public static void main(String args[]) {
WeightedInstances tempWeightedInstances = null;
String tempFilename = "E:/Datasets/UCIdatasets/其他数据集/iris.arff";
try {
FileReader fileReader = new FileReader(tempFilename);
tempWeightedInstances = new WeightedInstances(fileReader);
fileReader.close();
} catch (Exception e) {
// TODO: handle exception
System.out.println("Cannot read the file: " + tempFilename + "\r\n" + e);
System.exit(0);
}//of try
StumpClassifier tempClassifier = new StumpClassifier(tempWeightedInstances);
tempClassifier.train();
System.out.println(tempClassifier);
System.out.println(Arrays.toString(tempClassifier.computeCorrectnessArray()));
}//of main
}// Of class StumpClassifier
这里实现了一个弱分类器——树桩分类,只是将数据集分为两类。分类的时候的操作有点儿类似决策树,以某一属性进行分类。决策树是根据信息熵计算按照哪个属性分类,树桩分类是随机选择一个属性进行分类。
随机选择一个属性之后,将属性值进行排序。
然后从头开始画线,直至画出最优分类效果的线。小于cut的是左孩子节点;大于cut的是右孩子类。此时对应的cut就是最好的分类位置。
分别算出左孩子类和右孩子类的最优标签。
最优分类效果的标准是左边最优类标签权重和加上右边最优类标签权重和达到最大值。过程是通过遍历每一个实例进行逐一查找。