本文中分三个方法介绍判别分析,Bayes判别,距离判别,Fisher判别。前两种判别方法都要考虑两个、或多个总体协方差(这里是算方差,方差是协方差的一种)相等或不等的情况,由var.equal=的逻辑参数表示,默认是FALSE,表示认为两总体协方差不等。用样本的协方差可以估计总体的协方差。在Bayes方法中我们把相等和不等的两个结果都列了出来,距离判别里我们默认两总体协方差不等。事实上,一般使用时,我们都以两总体的协方差不等作为标准来进行后续计算。
协方差计算公式:
S(xy) = N,i ∑ [(xi-x均)(yi-y均)]/(N-1) ;
这里要计算的总体协方差就是方差,为
S = N,i ∑ [(xi-x均)*(xi-x均)^T]/(N-1)
Bayes判别是假定对研究对象已有一定的认识,这种认识常用先验概率来描述,当取得样本后,就可以用样本来修正已有的先验概率分布,得出后验概率分布,现通过后验概率分布进行各种统计推断.
discriminiant.bayes<-function
(TrnX1, TrnX2, rate=1, TstX = NULL, var.equal = FALSE){
if (is.null(TstX) == TRUE) TstX<-rbind(TrnX1,TrnX2)
if (is.vector(TstX) == TRUE) TstX<-t(as.matrix(TstX))
else if (is.matrix(TstX) != TRUE)
TstX<-as.matrix(TstX)
if (is.matrix(TrnX1) != TRUE) TrnX1<-as.matrix(TrnX1)
if (is.matrix(TrnX2) != TRUE) TrnX2<-as.matrix(TrnX2)
nx<-nrow(TstX)
blong<-matrix(rep(0, nx), nrow=1, byrow=TRUE,
dimnames=list("blong", 1:nx))
mu1<-colMeans(TrnX1); mu2<-colMeans(TrnX2)
if (var.equal == TRUE || var.equal == T){
S<-var(rbind(TrnX1,TrnX2)); beta<-2*log(rate)
w<-mahalanobis(TstX, mu2, S)-mahalanobis(TstX, mu1, S)
}
else{
S1<-var(TrnX1); S2<-var(TrnX2)
beta<-2*log(rate)+log(det(S1)/det(S2))
w<-mahalanobis(TstX, mu2, S2)-mahalanobis(TstX, mu1, S1)
}
for (i in 1:nx){
if (w[i]>beta)
blong[i]<-1
else
blong[i]<-2
}
blong
}
#它们的先验概率pi分别用15/31、16/31来估计
#载入数据
TrnX1<-matrix(
c(9.86, 13.33, 14.66, 9.33, 12.80, 10.66, 10.66, 13.33, 13.33, 13.33, 12.00, 14.66, 13.33, 12.80, 13.33,
5.18, 3.73, 3.89, 7.10, 5.49, 4.09, 4.45, 3.63, 5.96, 5.70, 6.19, 4.01, 4.01, 3.63, 5.96),
ncol=2)
TrnX2<-matrix(
c(10.66, 12.53, 13.33, 9.33, 10.66, 10.66, 9.33, 10.66, 10.66, 10.66, 10.40, 9.33, 10.66, 10.66, 11.20, 9.33,
2.07, 4.45, 3.06, 3.94, 4.45, 4.92, 3.68, 2.77, 3.21, 5.02, 3.94, 4.92, 2.69, 2.43, 3.42, 3.63),
ncol=2)
#输入待测样本TrnX
TstX<-matrix(
c(9.06,13.00,12.66,9.00,12.12,11.66,12.11,12.63,8.33,11.12,
5.68,3.43,2.82,6.86,5.19,2.17,4.12,3.26,2.91,4.22 ),
ncol=2)
#载入两总体的贝叶斯判别函数 注 把贝叶斯判别函数存在了wd中
source("discriminiant.bayes.R")
#### 总体协方差阵相同
discriminiant.bayes(TrnX1, TrnX2,rate=16/15,var.equal=TRUE) #rate=L(1|2)/L(2|1)* p2/p1
discriminiant.bayes(TrnX1, TrnX2,TstX,rate=16/15,var.equal=TRUE)
#### 总体协方差阵不同
discriminiant.bayes(TrnX1, TrnX2,rate=16/15)
discriminiant.bayes(TrnX1, TrnX2,TstX,rate=16/15)
在判别函数的鉴定下,有5个被错判,3个样本从冠心病组被错判为正常组,两个样本从正常组错判到冠心病组,分别为1,6,7,17,18
待测样本分类结果为:
1 2 3 4 5 6 7 8 9 10 (样本协方差阵相同)
blong 2 1 2 1 1 2 1 2 2 2
1 2 3 4 5 6 7 8 9 10 (样本协方差阵不同)
blong 1 1 2 1 1 2 1 2 2 2
#也可以csv格式导入数据,先将以上两类数据存为csv格式文档
coronary_disease <- read.csv("coronary_disease.csv")
#把分组变量变为定性变量
group <- factor(coronary_disease$group)
#随机抽取20个一般样本做训练样本
#train <- sample(1:31,20)
#显示训练样本中各类的比例
#table(group[train])
#group作为分组变量,X1,X2作为判别变量,使用训练样本生成判别函数,先验概率各为50%
library(MASS)
Z <- lda(group~.,data = coronary_disease ,prior=c(1,1)/2) #subset=train
Z
判别函数是Z = -0.6379195 * X1 + -0.8001452 * X2
coronary_disease <- read.csv("coronary_disease.csv")
#读取行数
N = length(coronary_disease$group)
#ind=1的是0.7概率出现的行,ind=2是0.3概率出现的行
ind=sample(2,N,replace=TRUE,prob=c(0.8,0.2))
#生成训练集(这里训练集和测试集随机设置为原数据集的80%,20%)
coronary_train <- coronary_disease[ind==1,]
#生成测试集
coronary_test <- coronary_disease[ind==2,]
#固定这个28分组
set.seed(7)
#可以看到这里的train中,i个1组,j个2组,共i+j个数据,则test里有(31-i-j个)数据
i <- table(coronary_train$group)[[1]]
j <- table(coronary_train$group)[[2]]
i;j
#输入train的组别1、2,TrnXi;输入test
TrnX1 <- coronary_train[1:i,2:3]
TrnX2 <- coronary_train[(i+1):(i+j),2:3]
TstX <- coronary_test[,2:3]
source("discriminiant.bayes.R")
#### 总体协方差阵相同
##它们的先验概率pi分别用i/24、j/24来估计
discriminant_train1 <- discriminiant.bayes(TrnX1, TrnX2,rate=j/i,var.equal=TRUE) ; discriminant_train1 #rate=L(1|2)/L(2|1)* p2/p1
discriminant_test1 <- discriminiant.bayes(TrnX1, TrnX2,TstX,rate=j/i,var.equal=TRUE) ; discriminant_test1
#### 总体协方差阵不同
discriminant_train2 <- discriminiant.bayes(TrnX1, TrnX2,rate=j/i) ; discriminant_train2
discriminant_test2 <- discriminiant.bayes(TrnX1, TrnX2,TstX,rate=j/i) ; discriminant_test2
#在原始训练集,测试集中加入一列Is,表示是训练集还是测试集
coronary_train$Is <- c(rep("train",(i+j))) ;coronary_test$Is <- c(rep("test",(31-i-j)))
#在原始训练集,测试集中分别加入总体协方差相同或不同时候的判定组别Discriminant
coronary_train$Belong_VarSame <- discriminant_train1[1:(i+j)]
coronary_test$Belong_VarSame <- discriminant_test1[1:(31-i-j)]
coronary_train$Belong_VarDiff <- discriminant_train2[1:(i+j)]
coronary_test$Belong_VarDiff <- discriminant_test2[1:(31-i-j)]
#合成一张表
coronary_result <- rbind(coronary_train,coronary_test)
write.csv(coronary_result,"coronary_result.csv")
#以train里的Var相同情况为例:
true_value_VarSame_train = coronary_result[coronary_result$Is=="train",]$Belong_VarSame
predict_value_VarSame_trian = coronary_result[coronary_result$Is=="train",]$group
#计算模型精确度
error_VarSame_train = predict_value_VarSame_trian-true_value_VarSame_train
accuracy_VarSame_train = (nrow(coronary_train)-sum(abs(error_VarSame_train)))/nrow(coronary_train) #精确度--判断正确的数量占总数的比例
accuracy_VarSame_train
discriminiant.distance<-function
(TrnX1, TrnX2, TstX = NULL, var.equal = FALSE){
if (is.null(TstX) == TRUE) TstX<-rbind(TrnX1,TrnX2)
if (is.vector(TstX) == TRUE) TstX<-t(as.matrix(TstX))
else if (is.matrix(TstX) != TRUE)
TstX<-as.matrix(TstX)
if (is.matrix(TrnX1) != TRUE) TrnX1<-as.matrix(TrnX1)
if (is.matrix(TrnX2) != TRUE) TrnX2<-as.matrix(TrnX2)
nx<-nrow(TstX)
blong<-matrix(rep(0, nx), nrow=1, byrow=TRUE,
dimnames=list("blong", 1:nx))
mu1<-colMeans(TrnX1); mu2<-colMeans(TrnX2)
if (var.equal == TRUE || var.equal == T){
S<-var(rbind(TrnX1,TrnX2))
w<-mahalanobis(TstX, mu2, S)-mahalanobis(TstX, mu1, S)
}
else{
S1<-var(TrnX1); S2<-var(TrnX2)
w<-mahalanobis(TstX, mu2, S2)-mahalanobis(TstX, mu1, S1)
}
for (i in 1:nx){
if (w[i]>0)
blong[i]<-1
else
blong[i]<-2
}
blong
}
distinguish.distance<-function
(TrnX, TrnG, TstX = NULL, var.equal = FALSE){
if ( is.factor(TrnG) == FALSE){
mx<-nrow(TrnX); mg<-nrow(TrnG)
TrnX<-rbind(TrnX, TrnG)
TrnG<-factor(rep(1:2, c(mx, mg)))
}
if (is.null(TstX) == TRUE) TstX<-TrnX
if (is.vector(TstX) == TRUE) TstX<-t(as.matrix(TstX))
else if (is.matrix(TstX) != TRUE)
TstX<-as.matrix(TstX)
if (is.matrix(TrnX) != TRUE) TrnX<-as.matrix(TrnX)
nx<-nrow(TstX)
blong<-matrix(rep(0, nx), nrow=1, dimnames=list("blong", 1:nx))
g<-length(levels(TrnG))
mu<-matrix(0, nrow=g, ncol=ncol(TrnX))
for (i in 1:g)
mu[i,]<-colMeans(TrnX[TrnG==i,])
D<-matrix(0, nrow=g, ncol=nx)
if (var.equal == TRUE || var.equal == T){
for (i in 1:g)
D[i,]<- mahalanobis(TstX, mu[i,], var(TrnX),tol=2e-21)
}
else{
for (i in 1:g)
D[i,]<- mahalanobis(TstX, mu[i,], var(TrnX[TrnG==i,]),tol=2e-21)
}
for (j in 1:nx){
dmin<-Inf
for (i in 1:g)
if (D[i,j]<dmin){
dmin<-D[i,j]; blong[j]<-i
}
}
blong
}
#用和张佳玉sas文档里一样的数据输入
X <- data.frame(
x1 = c(8.11,9.36,9.85,2.55,6.01,9.64,4.11,8.9,7.71,7.51,8.06,6.8,8.68,5.67,8.1,3.71,5.37,5.22,4.71,4.71,3.36,8.27),
x2 = c(251.01,185.39,249.58,137.13,231.34,231.38,260.25,259.51,273.84,303.59,231.03,308.9,258.69,255.54,476.69,316.12,274.57,330.34,331.47,352.5,347.31,189.59),
x3 = c(13.23,9.02,15.61,9.21,14.27,13.03,14.72,14.16,16.01,19.14,14.41,15.11,14.02,15.13,7.38,17.12,16.75,18.19,21.26,20.79,17.9,12.74),
x4 = c(5.46,5.66,6.06,6.11,5.21,4.86,5.36,4.91,5.15,5.7,5.72,5.52,4.79,4.97,5.32,6.04,4.98,4.96,4.3,5.07,4.65,5.46),
x5 = c(7.31,5.99,6.11,4.35,8.79,8.53,10.03,9.79,8.79,8.53,6.15,8.49,7.16,9.43,11.32,8.17,9.67,9.61,13.72,11,11.19,6.94)
)
G <- factor(c("1","1","1","1","1","1","1","1","1","1","1","2","2","2","2","2","2","3","3","3","3","3"),levels = c("1","2","3"))
source("distinguish.distance.R")
distinguish.distance(X,G) #默认协方差不同
#用薛毅书中例子里的数据输入
X1 <- iris[,1:4]
G1 <- gl(3,50)
source("distinguish.distance.R")
distinguish.distance(X1,G1) #默认协方差不同
这里用薛毅的数据时没有问题,换成自己的数据会一直报错,后来找到了原因。
distinguish.distance.R程序中有两行代码是mahalanobis(TstX, mu[i,], var(TrnX[TrnG==i,])),因为我们的数据太小,出现如下错误:
(Error in solve.default(cov, …) :
system is computationally singular: reciprocal condition number = 2.60317e-20)
mahalanobis()函数会将这个很小的值认为是0,无法继续后面的运算,所以报错
解决办法是,加一个参数,tol=2e-21,这个值设置成比error里的值小就行。
在程序中,输入变量TrnX1,TrnX2表示X1类,X2类训练样本,其输入格式是数据框,或矩阵(样本按行输入).TstX是待测样本,其输入格式是数据框,或矩阵(样本按行输入),或向量(一个待测样本).如果不输入TstX(缺省值),则待测样本为两个训练样本之和,即计算训练样本的回代情况. 函数的输出是由”1”,”2”构成的一维矩阵,”1”代表待测样本属于X1类,”2”代表待测样本属于X2类
discriminiant.fisher<-function(TrnX1, TrnX2, TstX = NULL){
if (is.null(TstX) == TRUE) TstX<-rbind(TrnX1,TrnX2)
if (is.vector(TstX) == TRUE) TstX<-t(as.matrix(TstX))
else if (is.matrix(TstX) != TRUE)
TstX<-as.matrix(TstX)
if (is.matrix(TrnX1) != TRUE) TrnX1<-as.matrix(TrnX1)
if (is.matrix(TrnX2) != TRUE) TrnX2<-as.matrix(TrnX2)
nx<-nrow(TstX)
blong<-matrix(rep(0, nx), nrow=1, byrow=TRUE,
dimnames=list("blong", 1:nx))
n1<-nrow(TrnX1); n2<-nrow(TrnX2)
mu1<-colMeans(TrnX1); mu2<-colMeans(TrnX2)
S<-(n1-1)*var(TrnX1)+(n2-1)*var(TrnX2)
mu<-n1/(n1+n2)*mu1+n2/(n1+n2)*mu2
w<-(TstX-rep(1,nx) %o% mu) %*% solve(S, mu2-mu1);
for (i in 1:nx){
if (w[i]<=0)
blong[i]<-1
else
blong[i]<-2
}
blong
}
classX1 <- data.frame(
x1 = c(245,236,238,233,240,235,204,200,297,177,200,195,166,144,233,143,228,264,178,240,180,161,236,168,174,215,268,213,285,193
),
x2 = c(157,275,354,250,149,166,365,95,240,97,172,211,217,111,107,91,223,186,131,127,211,91,106,106,141,168,185,387,154,123),
x3 = c(38,40,38,31,35,40,38,43,38,49,43,47,33,28,42,24,34,41,49,33,27,39,36,36,28,38,28,22,39,42),
x4 = c(168,125,126,150,170,164,90,100,207,108,116,106,86,46,156,108,136,183,98,174,106,88,104,104,103,134,203,141,210,121),
x5 = c(1.1,1.22,0.9,1.02,1.26,1.3,1.33,1.24,1.14,1.49,1.25,1.22,1.1,0.71,0.95,0.67,1.05,1.22,1.18,0.78,0.85,0.94,0.87,0.87,0.81,0.88,0.75,0.8,1.17,1.12),
x6 = c(1.01,1.12,1.06,0.98,1.13,1.15,0.95,0.98,1.51,1.02,1.03,0.94,0.74,0.65,0.77,0.65,0.84,0.92,1.27,0.9,0.69,0.52,0.58,0.73,0.73,0.87,0.97,0.78,1.37,1)
)
classX2 <- data.frame(
x1 = c(174,106,173,178,198,180,134,204,168,219,189,180,177,172,166,210,166,223,136,156,201,134,195,262,194,165,183,200,171,222),
x2 = c(140,110,82,100,112,114,60,118,80,157,158,90,227,55,217,166,217,186,72,107,117,58,93,257,171,70,249,191,309,350),
x3 = c(47,52,53,43,53,48,36,63,52,28,43,59,75,51,33,42,33,73,67,45,45,60,51,62,42,36,44,58,52,13),
x4 = c(120,40,103,117,123,110,84,119,90,142,115,102,64,102,86,130,86,113,46,106,147,65,141,142,114,110,88,100,51,57),
x5 = c(0.84,1.08,0.97,0.98,0.98,1.02,0.98,1.02,1.07,1.02,0.92,1.32,1.4,1.31,1.1,1.28,1.1,1.62,1.45,0.93,1.06,1.03,1.22,1.56,1.11,1.22,1.12,1.61,1.37,0.36),
x6 = c(0.57,0.87,0.66,0.65,0.72,0.8,0.58,0.84,0.8,0.83,0.8,0.9,0.99,0.97,0.74,1.02,0.74,0.98,0.84,0.74,0.85,0.54,0.72,0.8,0.71,0.96,0.96,0.77,0.69,1.39)
)
source("discriminiant.fisher.R")
discriminiant.fisher(classX1,classX2)
将训练样本回代进行判别,有10个点判错,分别是10、19、22、24、40、45、46、47、56、60号样本。
#粗略导出以上三个例子里的结果
write.csv(discriminiant.bayes(TrnX1, TrnX2,TstX,rate=16/15,var.equal=TRUE),"disease1.csv")
write.csv(distinguish.distance(X1,G1),"disease2.csv")
write.csv(discriminiant.fisher(classX1,classX2),"disease3.csv")
R里还可以用knn来进行判别。