nlp实战——使用IMDB数据集做情感分析

文章目录

    • 需要导入的包
    • 加载数据
      • 加载停用词
    • 数据处理
      • 查看总词汇数
      • 词频统计
      • word2tag
      • 分割训练集与测试集
    • 模型定义
      • 参数定义
      • 定义模型
    • 训练模型
    • 测试
    • 完整代码

需要导入的包

python环境为:
python== 3.8
tensorflow-macos== 2.10.0
keras== 2.10.0
nltk== 3.7

import json
import os
import nltk
from nltk.corpus import stopwords

from keras.models import Sequential
from keras.layers import Flatten, Dense, Embedding
from keras import optimizers

根据视频:https://www.bilibili.com/video/BV17A411e7qL?p=2&vd_source=16b6cb21545bb7c517e562893dd6a8a3 写的

加载数据

IMDB数据集下载地址1: http://ai.stanford.edu/~amaas/data/sentiment/
IMDB数据集下载地址2: 链接: https://pan.baidu.com/s/1W-cQbDtlA6XywfPeTDvYIg?pwd=audr 提取码: audr

def get_data(path):
    # 获取数据,截取每个句子前30个单词
    data = []
    file_names = os.listdir(path)
    for name in file_names:
        with open(f'{path}/{name}', 'r') as f:
            voc_li = f.read().split(' ')
            if len(voc_li) > 30:
                voc_li = voc_li[:30]
            if len(voc_li) < 30:
                voc_li.extend([None] * (30 - len(voc_li)))
            data.append(voc_li)
    return data
  
pos_data = get_data('../datasets/IMDB/aclImdb/train/pos')
neg_data = get_data('../datasets/IMDB/aclImdb/train/neg')

加载停用词

nltk.download('stopwords')
stop_word_list = ['very', 'ourselves', 'am', 'doesn', 'through', 'me', 'against', 'up', 'just', 'her', 'ours',
                  'couldn', 'because', 'is', 'isn', 'it', 'only', 'in', 'such', 'too', 'mustn', 'under', 'their',
                  'if', 'to', 'my', 'himself', 'after', 'why', 'while', 'can', 'each', 'itself', 'his', 'all', 'once',
                  'herself', 'more', 'our', 'they', 'hasn', 'on', 'ma', 'them', 'its', 'where', 'did', 'll', 'you',
                  'didn', 'nor', 'as', 'now', 'before', 'those', 'yours', 'from', 'who', 'was', 'm', 'been', 'will',
                  'into', 'same', 'how', 'some', 'of', 'out', 'with', 's', 'being', 't', 'mightn', 'she', 'again', 'be',
                  'by', 'shan', 'have', 'yourselves', 'needn', 'and', 'are', 'o', 'these', 'further', 'most',
                  'yourself', ',', ',', '.', '。', '!', '~', '`', '@', '#', '$', '%', '^', '&', '*', '(', ')', '-', '+',
                  '=', '-'
                       'having', 'aren', 'here', 'he', 'were', 'but', 'this', 'myself', 'own', 'we', 'so', 'i', 'does',
                  'both',
                  'when', 'between', 'd', 'had', 'the', 'y', 'has', 'down', 'off', 'than', 'haven', 'whom', 'wouldn',
                  'should', 've', 'over', 'themselves', 'few', 'then', 'hadn', 'what', 'until', 'won', 'no', 'about',
                  'any', 'that', 'for', 'shouldn', 'don', 'do', 'there', 'doing', 'an', 'or', 'ain', 'hers', 'wasn',
                  'weren', 'above', 'a', 'at', 'your', 'theirs', 'below', 'other', 'not', 're', 'him', 'during',
                  'which']

数据处理

查看总词汇数

不去重的话,理论上长度为25000 * 30 = 750000

total_vocabulary = [*merge_data(pos_data), *merge_data(neg_data)]

词频统计

stop_word_list = [*stop_word_list, stopwords.words('english'), '/>, ', '--', '\x96']
word_statistic = dict()
for voc in total_vocabulary:
    if not voc or voc.lower() in stop_word_list:
        continue
    if voc not in word_statistic:
        word_statistic[voc.lower()] = 1
    else:
        word_statistic[voc] += 1
word_statistic = sorted(word_statistic.items(), key=lambda x: x[1], reverse=True)
print(f'一共有{len(word_statistic)}个单词')
voc_tag_map = {None: 0}
for index, data in enumerate(word_statistic):
    voc_tag_map[data[0]] = index + 1

word2tag

pos = sentences2tag(pos_data, voc_tag_map)
neg = sentences2tag(neg_data, voc_tag_map)

分割训练集与测试集

x = [*pos, *neg]
y = [*[0] * len(pos), *[1] * len(neg)]
x_train = [x[w] for w in range(len(x))[::2]]
y_train = [y[w] for w in range(len(y))[::2]]
x_valid = [x[w] for w in range(len(x))[1::2]]
y_valid = [y[w] for w in range(len(y))[1::2]]

模型定义

参数定义

embedding_dim = 8
vocabulary = 10000
word_num = 30
epoch = 50

定义模型

model = Sequential()
model.add(Embedding(vocabulary, embedding_dim, input_length=word_num))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))

定义完用model.summary()查看整体模型

训练模型

model.compile(optimizer=optimizers.RMSprop(lr=0.0001),
              loss='binary_crossentropy', metrics=['acc'])
history = model.fit(x_train, y_train, epochs=epoch,
                    batch_size=32, validation_data=(x_valid, y_valid))

测试

x_test,y_tests同训练数据集,不想做测试数据集了,就不做了。

ret = model.evaluate(x_train, y_train)
print('loss = ' + str(loss_and_acc[0]))
print('acc = ' + str(loss_and_acc[1]))

完整代码

import json
import os
import nltk
from nltk.corpus import stopwords

from keras.models import Sequential
from keras.layers import Flatten, Dense, Embedding
from keras import optimizers

# 加载停用词
# nltk.download('stopwords')

# load_data
stop_word_list = ['very', 'ourselves', 'am', 'doesn', 'through', 'me', 'against', 'up', 'just', 'her', 'ours',
                  'couldn', 'because', 'is', 'isn', 'it', 'only', 'in', 'such', 'too', 'mustn', 'under', 'their',
                  'if', 'to', 'my', 'himself', 'after', 'why', 'while', 'can', 'each', 'itself', 'his', 'all', 'once',
                  'herself', 'more', 'our', 'they', 'hasn', 'on', 'ma', 'them', 'its', 'where', 'did', 'll', 'you',
                  'didn', 'nor', 'as', 'now', 'before', 'those', 'yours', 'from', 'who', 'was', 'm', 'been', 'will',
                  'into', 'same', 'how', 'some', 'of', 'out', 'with', 's', 'being', 't', 'mightn', 'she', 'again', 'be',
                  'by', 'shan', 'have', 'yourselves', 'needn', 'and', 'are', 'o', 'these', 'further', 'most',
                  'yourself', ',', ',', '.', '。', '!', '~', '`', '@', '#', '$', '%', '^', '&', '*', '(', ')', '-', '+',
                  '=', '-'
                       'having', 'aren', 'here', 'he', 'were', 'but', 'this', 'myself', 'own', 'we', 'so', 'i', 'does',
                  'both',
                  'when', 'between', 'd', 'had', 'the', 'y', 'has', 'down', 'off', 'than', 'haven', 'whom', 'wouldn',
                  'should', 've', 'over', 'themselves', 'few', 'then', 'hadn', 'what', 'until', 'won', 'no', 'about',
                  'any', 'that', 'for', 'shouldn', 'don', 'do', 'there', 'doing', 'an', 'or', 'ain', 'hers', 'wasn',
                  'weren', 'above', 'a', 'at', 'your', 'theirs', 'below', 'other', 'not', 're', 'him', 'during',
                  'which']


def get_data(path):
    # 获取数据,截取每个句子前30个单词
    data = []
    file_names = os.listdir(path)
    for name in file_names:
        with open(f'{path}/{name}', 'r') as f:
            voc_li = f.read().split(' ')
            if len(voc_li) > 30:
                voc_li = voc_li[:30]
            if len(voc_li) < 30:
                voc_li.extend([None] * (30 - len(voc_li)))
            data.append(voc_li)
    return data


def merge_data(data):
    # 讲句子拆开,融合到一起,方便统计样本总单词数
    word = []
    for w_list in data:
        word.extend(w_list)
    return word


def save_data(data, path):
    # 保存处理后的数据
    with open(path, 'w') as f:
        f.write(json.dumps(data))


def load_data(path):
    # 加载处理后的数据
    with open(path, 'r') as f:
        data = f.read()
    return json.loads(data)


def sentences2tag(sentences, tag):
    # 将句子变成数字标签,方便模型训练
    ret = []
    for sentence in sentences:
        temp = []
        for word in sentence:
            if word and word.lower() in tag:
                temp.append(tag[word.lower()])
            else:
                temp.append(0)
        ret.append(temp)
    return ret

pos_data = get_data('../datasets/IMDB/aclImdb/train/pos')
neg_data = get_data('../datasets/IMDB/aclImdb/train/neg')

# 处理数据
pos_data_ = merge_data(pos_data)
neg_data_ = merge_data(neg_data)

# # 保存数据
# save_data(pos_data_, '../datasets/IMDB/aclImdb/train/struct_pos_data.json')
# save_data(neg_data_, '../datasets/IMDB/aclImdb/train/struct_neg_data.json')
#
# # 加载数据
# pos_data = load_data('../datasets/IMDB/aclImdb/train/struct_pos_data.json')
# neg_data = load_data('../datasets/IMDB/aclImdb/train/struct_neg_data.json')

# 查看总词汇数,不去重的话,理论上长度为25000 * 30 = 750000
total_vocabulary = [*merge_data(pos_data), *merge_data(neg_data)]

# 词频统计
stop_word_list = [*stop_word_list, stopwords.words('english'), '/>, ', '--', '\x96']
word_statistic = dict()
for voc in total_vocabulary:
    if not voc or voc.lower() in stop_word_list:
        continue
    if voc not in word_statistic:
        word_statistic[voc.lower()] = 1
    else:
        word_statistic[voc] += 1
word_statistic = sorted(word_statistic.items(), key=lambda x: x[1], reverse=True)
print(f'一共有{len(word_statistic)}个单词')
voc_tag_map = {None: 0}
for index, data in enumerate(word_statistic):
    voc_tag_map[data[0]] = index + 1

# word 2 tag
pos = sentences2tag(pos_data, voc_tag_map)
neg = sentences2tag(neg_data, voc_tag_map)

# 分割训练集与测试集
x = [*pos, *neg]
y = [*[0] * len(pos), *[1] * len(neg)]
x_train = [x[w] for w in range(len(x))[::2]]
y_train = [y[w] for w in range(len(y))[::2]]
x_valid = [x[w] for w in range(len(x))[1::2]]
y_valid = [y[w] for w in range(len(y))[1::2]]


# 模型参数
embedding_dim = 8
vocabulary = 10000
word_num = 30
epoch = 50

# 定义模型
model = Sequential()
model.add(Embedding(vocabulary, embedding_dim, input_length=word_num))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))

# 查看模型
model.summary()

# 训练
model.compile(optimizer=optimizers.RMSprop(lr=0.0001),
              loss='binary_crossentropy', metrics=['acc'])
history = model.fit(x_train, y_train, epochs=epoch,
                    batch_size=32, validation_data=(x_valid, y_valid))

# loss_and_acc = model.evaluate(x_test, labels_tests)
# print('loss = ' + str(loss_and_acc[0]))
# print('acc = ' + str(loss_and_acc[1]))

视频参考:https://www.bilibili.com/video/BV17A411e7qL?p=2&vd_source=16b6cb21545bb7c517e562893dd6a8a3

你可能感兴趣的:(NLP,笔记,python,自然语言处理,python,深度学习)