2 获得文本语料和词汇资源

from__future__importdivision

importnltk

fromnltk.probabilityimportFreqDist

fromnltk.corpusimportbrown, inaugural, stopwords,swadesh, wordnetaswn, state_union,names,gutenberg

#from nltk.book import *

 4

# print(state_union.fileids())

# cfd=nltk.ConditionalFreqDist((words, fileid[:4])

#                              for fileid in state_union.fileids()

#                              for words in state_union.words(fileid)

#                              )

# words=['men','women','people']

# print(cfd.plot(conditions=words))

8

# cfd=nltk.ConditionalFreqDist((fileid,name.lower()[0])

#                              for fileid in names.fileids()

#                              for name in names.words(fileid))

#

# print(cfd.plot())

15

# fd=nltk.FreqDist(brown.words())

# more_than_tree_times=[word for word in fd if fd[word]>=3]

16

# def word_diversity(words):

#    num_word=len(words)

#    num_vocab=len(set([word.lower()for word in words]))

#    word_diversity=int(num_word/num_vocab)

#    return word_diversity

#

# def word_diversity_categories():

#    for categorie in brown.categories():

#        word_diversity_categories = word_diversity(brown.words(categories=categorie))

#        print(categorie,':', word_diversity_categories)

#

# word_diversity_categories()

17

# stopwords=stopwords.words('english')

# def frequentest_words(text):

#    words=[word for word in text if word.lower() not in stopwords and word.isalpha()]

#    cfd=nltk.FreqDist(words)

#    frequentest_words = sorted(cfd.items(),key=lambda item:item[1], reverse=True)

#    print (frequentest_words[:50])

#

# frequentest_words(gutenberg.words('austen-emma.txt'))

19

# cfd=nltk.ConditionalFreqDist((genre,word)

#                              for genre in brown.categories()

#                              for word in brown.words(categories=genre))

# for genre in brown.categories():

#    fdist=cfd[genre]

#    sorted_words=sorted(fdist.keys(),key=lambda x:fdist[x],reverse=True)

#    print (type(fdist), genre, sorted_words[:10], sorted_words[-10::])

你可能感兴趣的:(2 获得文本语料和词汇资源)