knitr::opts_chunk$set(echo = TRUE)
利用R做简单的词云图
当文本很大时,需要用专门的模型进行分词,可以使用Rwordseg包里的分词函数;这里只是做一个简单的词云图,假设已经做好分词,即词频率值。根据频率为图中词的大小赋权!
1. 先用wordcloud做一个静态的词云图:
library(wordcloud) # 静态
library(grDevices) # 调色
## 设置中文单词
v <- c("董肖凯","STATA","SPSS","R","AMOS","Eviews","数据挖掘","机器学习","分类算法","聚类","回归","logist","probit","时间序列模型","面板数据模型","双重差分模型","结构方程模型","Python")
## 频数计算频率
f <- c(20,10,20,40,20,15,30,30,25,25,25,20,10,30,15,10,15,30)
p <- f/sum(f)
## 转换为数据框
mydata1 <- data.frame(word=v, frep=p)
## 设置颜色系统
pal <- brewer.pal(9,"Pastel1") # "Dark2","Accent","Reds","Blues"
## 调整背景颜色rgb格式
background<-rgb(5,5,20,max=255) # 调色,越小越黑
## 绘图:
par(bg=background)
wordcloud(mydata1$word, mydata1$frep, random.order=F, colors=pal, rot.per=0.1,
fixed.asp=T)
2.用wordcloud2做一个动态词云图
library(wordcloud2)
mydata2 <- data.frame(word=as.factor(v), frep=f)
wordcloud2(mydata2,backgroundColor="slateblue4",size = 0.6,minRotation = -pi/2,
maxRotation = -pi/2,color="random-light", shape = 'cardioid')