十大数据挖掘算法的R语言实现

iris数据集 
iris以鸢尾花的特征作为数据来源,常用在分类操作中。该数据集由3种不同类型的鸢尾花的50个样本数据构成。其中的一个种类与另外两个种类是线性可分离的,后两个种类是非线性可分离的。

library(ggplot2)
summary(iris)
qplot(Petal.Length, Petal.Width, data=iris, color=Species)
  • 1
  • 2
  • 3
  • 1
  • 2
  • 3

1:C5.0决策树 
先加载所需要的包

library(C50)
library(printr)
  • 1
  • 2
  • 1
  • 2

对iris数据集进行抽样,获得训练样本和测试样本

train.indeces <- sample(1:nrow(iris), 100)
iris.train <- iris[train.indeces, ]
iris.test <- iris[-train.indeces, ]
  • 1
  • 2
  • 3
  • 1
  • 2
  • 3

利用C5.0函数对训练样本进行模型训练

model <- C5.0(Species ~ ., data = iris.train)
  • 1
  • 1

对测试样本进行预测

results <- predict(object = model, newdata = iris.test, type = "class")
confusion_matrix=table(results, iris.test$Species)
confusion_matrix
  • 1
  • 2
  • 3
  • 1
  • 2
  • 3

计算错误率

error=1-sum(diag(confusion_matrix))/nrow(iris.test)
  • 1
  • 1

预测错误率为0.12 
2:K-means 
模型建立

library(stats)
library(printr)
model <- kmeans(x = subset(iris, select = -Species), centers = 3)
  • 1
  • 2
  • 3
  • 1
  • 2
  • 3

分类性能测试

table(model$cluster, iris$Species)
/   setosa  versicolor  virginica
1   33  0   0
2   17  4   0
3   0   46  50
  • 1
  • 2
  • 3
  • 4
  • 5
  • 1
  • 2
  • 3
  • 4
  • 5
3:Support Vector Machines
导入包

library(e1071)
library(printr)

 对iris数据集进行抽样,获得训练样本和测试样本


train.indeces <- sample(1:nrow(iris), 100)
iris.train <- iris[train.indeces, ]
iris.test <- iris[-train.indeces, ]
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

利用C5.0函数对训练样本进行模型训练

model <- svm(Species ~ ., data = iris.train)

对测试样本进行预测

results <- predict(object = model, newdata = iris.test, type = "class")
confusion_matrix=table(results, iris.test$Species)
confusion_matrix
results/    setosa  versicolor  virginica
setosa  12  0   0
versicolor  0   19  0
virginica   0   1   18

计算错误率
error=1-sum(diag(confusion_matrix))/nrow(iris.test)
预测错误率为0.02
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
4:Apriori
导入包和数据集
library(arules)
library(printr)
data("Adult")
训练模型
rules <- apriori(Adult,
    parameter = list(support = 0.4, confidence = 0.7),
    appearance = list(rhs = c("race=White", "sex=Male"), default = "lhs"))
获得前五的关联关系
rules.sorted <- sort(rules, by = "lift")
top5.rules <- head(rules.sorted, 5)
as(top5.rules, "data.frame")
    rules   support confidence  lift
2   {relationship=Husband} => {sex=Male}    0.4036485   0.9999493   1.495851
12  {marital-status=Married-civ-spouse,relationship=Husband} => {sex=Male}  0.4034028   0.9999492   1.495851
3   {marital-status=Married-civ-spouse} => {sex=Male}   0.4074157   0.8891818   1.330151
4   {marital-status=Married-civ-spouse} => {race=White} 0.4105892   0.8961080   1.048027
19  {workclass=Private,native-country=United-States} => {race=White}    0.5433848   0.8804113   1.029669
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
5:EM算法
library(mclust)
library(printr)
model <- Mclust(subset(iris, select = -Species))
table(model$classification, iris$Species)
/   setosa  versicolor  virginica
1   50  0   0
2   0   50  50
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
6PageRank
PageRank用来计算图中各点的相关程度,其原理是马尔科夫链
library(igraph)
library(dplyr)
library(printr)
生成随机的网络图
g <- random.graph.game(n = 10, p.or.m = 1/4, directed = TRUE)
plot(g)

对每个节点计算rankpagepr <- page.rank(g)$vector
df <- data.frame(Object = 1:10, PageRank = pr)
arrange(df, desc(PageRank))
Object  PageRank
10  0.1768655
7   0.1369388
1   0.1263876
4   0.1198167
2   0.1161824
9   0.0891266
6   0.0847579
8   0.0793286
5   0.0390147
3   0.0315813
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
7:adaboost
library(adabag)
library(printr)
train.indeces <- sample(1:nrow(iris), 100)
iris.train <- iris[train.indeces, ]
iris.test <- iris[-train.indeces, ]
模型训练
model <- boosting(Species ~ ., data = iris.train)
训练结果
results <- predict(object = model, newdata = iris.test, type = "class")
results$confusion
Predicted Class/Observed Class  setosa  versicolor  virginica
setosa  15  0   0
versicolor  0   18  4
virginica   0   0   13
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
8:kNN
library(class)
library(printr)
train.indeces <- sample(1:nrow(iris), 100)
iris.train <- iris[train.indeces, ]
iris.test <- iris[-train.indeces, ]
模型训练
results <- knn(train = subset(iris.train, select = -Species),
    test = subset(iris.test, select = -Species),
    cl = iris.train$Species)
分类效果
table(results, iris.test$Species)
results/    setosa  versicolor  virginica
setosa  22  0   0
versicolor  0   10  0
virginica   0   1   17
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
9:naive bayes
library(e1071)
library(printr)
train.indeces <- sample(1:nrow(iris), 100)
iris.train <- iris[train.indeces, ]
iris.test <- iris[-train.indeces, ]
训练集训练模型
model <- naiveBayes(x = subset(iris.train, select=-Species), y = iris.train$Species)
测试集预测效果
results <- predict(object = model, newdata = iris.test,type ="class")
table(results, iris.test$Species)
results/    setosa  versicolor  virginica
setosa  18  0   0
versicolor  0   17  0
virginica   0   4   11
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15


10:cart 
library(rpart) 
library(printr) 
train.indeces <- sample(1:nrow(iris), 100) 
iris.train <- iris[train.indeces, ] 
iris.test <- iris[-train.indeces, ] 
训练模型 
model <- rpart(Species ~ ., data = iris.train) 
测试模型 
results <- predict(object = model, newdata = iris.test, type = "class") 
table(results, iris.test$Species) 
results/ setosa versicolor virginica 
setosa 15 0 0 
versicolor 0 16 6 

virginica 0 1 12 


转自:http://blog.csdn.net/cmddds11235/article/details/47724871

你可能感兴趣的:(十大数据挖掘算法的R语言实现)