资源描述:
《挖掘高频词R语言程序.doc》由会员上传分享,免费在线阅读,更多相关内容在教育资源-天天文库。
1、#导入需用的R包library(Rwordseg)require(rJava)library(tm)library(slam)library(topicmodels)library(RColorBrewer)library(wordcloud)library(igraph)library(grDevices)loadDict()#更改为工作路径,并存放数据在此setwd("C:\Usersununu\Desktop\111")#路径中不能存在中文字符txt<-read.csv("5kaiyue.csv",colClasses="character")#去除字母、数字等data
2、1<-gsub(pattern="http:[a-zA-Z\/\.0-9]+","",txt$sentence)data1=gsub("[a-z0-9A-Z_]","",data1)data1<-gsub("","",data1)data1<-gsub(" ","",data1)head(data1)length(data1)#添加词语使分词更准确insertWords(c("限牌","中签","胡冰蜀黍","买车","中签率","买车","拉风","杯具","吐槽","放牌","谈资","摇中","摇号","公信力","辟谣","裸奔","限行","车源","攒钱","指点",
3、"高冷","央视","限号","囤牌","竞价","上牌","猛戳","东问西问","错峰","治堵","限堵"))#进行分词处理poem_words<-lapply(1:length(data1),function(i)segmentCN(data1[i],nature=TRUE))#去除停止词data_stw<-readLines("C:\Usersununu\Desktop\111\中文停用词库.txt")removeStopWords=function(x,words){ret=character(0)index<-1it_max<-length(x)while(i
4、ndex<=it_max){if(length(words[words==x[index]])<1)ret<-c(ret,x[index])index<-index+1}ret}sample.words<-lapply(poem_words,removeStopWords,data_stw)#组成语料库格式wordcorpus<-Corpus(VectorSource(sample.words))length(wordcorpus)#将数据通过tm这个R包转化为文本-词矩阵(DocumentTermMatrix)Sys.setlocale(locale="Chinese")dtm1<-D
5、ocumentTermMatrix(wordcorpus,control=list(wordLengths=c(2,2),bounds=list(global=c(2,Inf)),removeNumbers=TRUE,weighting=weightTf,encoding="UTF-8"))colnames(dtm1)#看一下高频词findFreqTerms(dtm1,600)m<-as.matrix(dtm1)v<-sort(colSums(m),decreasing=TRUE)write.csv(v,file="1.txt",row.names=TRUE)myNames<-names
6、(v)d<-data.frame(word=myNames,freq=v)#设置图片格式并保存图片par(mar=rep(2,4))png(paste(getwd(),"/词云5",".png",sep=''),width=20,height=10,units="in",res=300)#将高频词以词云形式可视化展现pal2<-brewer.pal(8,"Dark2")wordcloud(d$word,d$freq,scale=c(5,.2),min.freq=mean(d$freq),max.words=1000,random.order=FALSE,rot.per=.15,color
7、s=pal2)dev.off()