内置的语料库
import nltk
nltk.corpus.gutenberg.fileids() #Gutenberg语料库的全部文件id
emma = nltk.corpus.gutenberg.words('austen-emma.txt') #打开一个Gutenberg的文本文件
len(emma)
或者
from nltk.corpus import gutenberg
gutenberg.fileids()
emma = gutenberg.words('austen-emma.txt')
从语料库中提取文本信息
for fileid in gutenberg.fileids():
num_chars = len(gutenberg.raw(fileid))
num_words = len(gutenberg.words(fileid))
num_sents = len(gutenberg.sents(fileid))
num_vocab = len(set(w.lower() for w in gutenberg.words(fileid)))
再例如Brown语料库
from nltk.corpus import brown
brown.categories()
brown.words(categories='news')
brown.words(fileids=['cg22'])
brown.sents(categories=['news', 'editorial', 'reviews'])
cfd = nltk.ConditionalFreqDist( (genre, word) for genre in brown.categories() for word in brown.words(categories=genre))
genres = ['news', 'religion', 'hobbies', 'science_fiction', 'romance', 'humor']
modals = ['can', 'could', 'may', 'might', 'must', 'will']
cfd.tabulate(conditions=genres, samples=modals)
内置语料库基本函数
#Example Description
fileids() #the files of the corpus
fileids([categories]) #the files of the corpus corresponding to these categories
categories() #the categories of the corpus
categories([fileids]) #the categories of the corpus corresponding to these files
raw() #the raw content of the corpus
raw(fileids=[f1,f2,f3]) #the raw content of the specified files
raw(categories=[c1,c2]) #the raw content of the specified categories
words() #the words of the whole corpus
words(fileids=[f1,f2,f3]) #the words of the specified fileids
words(categories=[c1,c2]) #the words of the specified categories
sents() #the sentences of the whole corpus
sents(fileids=[f1,f2,f3]) #the sentences of the specified fileids
sents(categories=[c1,c2]) #the sentences of the specified categories
abspath(fileid) #the location of the given file on disk
encoding(fileid) #the encoding of the file (if known)
open(fileid) #open a stream for reading the given corpus file
root #if the path to the root of locally installed corpus
readme() #the contents of the README file of the corpus
载入本地语料库
from nltk.corpus import PlaintextCorpusReader
corpus_root = '/usr/share/dict'
wordlists = PlaintextCorpusReader(corpus_root, '.*')
wordlists.fileids()
wordlists.words('connectives')
条件频率分布
频率分布计数可观察事件,例如文本中的单词的出现。 条件频率分布需要将每个事件与条件配对。 因此,不是处理一个单词序列,我们必须处理一个配对序列:
genre_word = [(genre, word) for genre in ['news', 'romance'] for word in brown.words(categories=genre)]
cfd = nltk.ConditionalFreqDist(genre_word)
cfd['romance'].most_common(20)
cfd['romance']['could']
词语料
使用词表
def unusual_words(text):
text_vocab = set(w.lower() for w in text if w.isalpha())
english_vocab = set(w.lower() for w in nltk.corpus.words.words())
unusual = text_vocab - english_vocab
return sorted(unusual)
unusual_words(nltk.corpus.gutenberg.words('austen-sense.txt'))
使用停用词表
from nltk.corpus import stopwords
stopwords.words('english')
使用英文姓名词表
names = nltk.corpus.names
names.fileids()['female.txt', 'male.txt']
male_names = names.words('male.txt')
female_names = names.words('female.txt')
[w for w in male_names if w in female_names]
使用发音词表
entries = nltk.corpus.cmudict.entries()
for entry in entries[42371:42379]:
print(entry)
使用WordNet
from nltk.corpus import wordnet as wn
wn.synsets('motorcar') #[Synset('car.n.01')]
wn.synset('car.n.01').lemma_names() #['car', 'auto', 'automobile', 'machine', 'motorcar']
wn.synset('car.n.01').definition() #'a motor vehicle with four wheels; usually propelled by an internal combustion engine'
wn.synset('car.n.01').examples() #['he needs a car to get to work']