IMDB情感分析

from keras import layers
from keras import Sequential
from keras import activations
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras import callbacks

from gensim.models import Word2Vec
from gensim import corpora
from gensim.utils import simple_preprocess

import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
# 加载语料
def gen_corpus(corpus):
    with corpus.open("r") as f:
        return f.read()
# 加载数据
file_path = Path("./aclImdb/")
def load_data(target="train", /):
    assert target.lower() in ["train", "test"], f"Target must be either 'train' or 'test', got: {target}"
    categories = ["pos", "neg"]
    target_path = file_path.joinpath(target)
    labels = np.asarray([])
    corpus = []
    for category in categories:
        label_path = target_path.joinpath(category)
        all_txt_files = label_path.glob("*.txt")
        
        plain_texts = map(gen_corpus, all_txt_files)
        temp_corpus = [simple_preprocess(plain_text) for plain_text in plain_texts]
        corpus += temp_corpus
        
        if category == 'neg':
            labels = np.insert(labels, 0, np.zeros((len(temp_corpus), )))
        else :
            labels = np.insert(labels, 0, np.ones((len(temp_corpus), )))
    return corpus, labels
# 建立tokenizer方法
def build_tokenizer(num_words, corpus, /, **kwargs):
    tokenizer = Tokenizer(num_words=num_words, **kwargs)
    tokenizer.fit_on_texts(corpus)
    return tokenizer
corpus, labels = load_data()
# 初始化最大词汇量、每个句子最大长度、每个词对应维度
max_words = 15000
max_len = 150
vocab_size = 64
# 建立tokenizer,主要用于把词语转化为向量
tokenizer = build_tokenizer(max_words, corpus)
x_train = tokenizer.texts_to_sequences(corpus)
x_train = pad_sequences(x_train, maxlen=max_len)

开发基准模型

基准模型只使用最普通的RNN处理方法,一个词嵌入层和单层LSTM,由此来训练一个适合此情感分析的Embedded。

# 打乱数据
indicies = np.arange(0, len(x_train))
np.random.shuffle(indicies)
x_train = x_train[indicies]
labels = labels[indicies]
# 基准模型开发
model = Sequential()
model.add(layers.Embedding(input_dim=max_words, output_dim=vocab_size, input_length=max_len))
model.add(layers.LSTM(32))
model.add(layers.LeakyReLU(alpha=0.01))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

ht = model.fit(x_train, labels, epochs=5, validation_split=0.1, batch_size=64, shuffle=True)
Epoch 1/5
352/352 [==============================] - 14s 38ms/step - loss: 0.5669 - accuracy: 0.6860 - val_loss: 0.3256 - val_accuracy: 0.8660
Epoch 2/5
352/352 [==============================] - 13s 36ms/step - loss: 0.2467 - accuracy: 0.9106 - val_loss: 0.3164 - val_accuracy: 0.8776
Epoch 3/5
352/352 [==============================] - 13s 36ms/step - loss: 0.1638 - accuracy: 0.9451 - val_loss: 0.3459 - val_accuracy: 0.8648
Epoch 4/5
352/352 [==============================] - 13s 36ms/step - loss: 0.1195 - accuracy: 0.9628 - val_loss: 0.3904 - val_accuracy: 0.8588
Epoch 5/5
352/352 [==============================] - 13s 36ms/step - loss: 0.0974 - accuracy: 0.9692 - val_loss: 0.4907 - val_accuracy: 0.8528
hd = ht.history
accuracy = hd["accuracy"]
val_accuracy = hd["val_accuracy"]

loss = hd["loss"]
val_loss = hd["val_loss"]

epochs = range(0, 5)
plt.plot(epochs, accuracy, label="Training Accuracy")
plt.plot(epochs, val_accuracy, label="Validational Accuracy")
plt.title("Training and Validational Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
output_12_1.png
plt.plot(epochs, loss, label="Training Loss")
plt.plot(epochs, val_loss, label="Validational Loss")
plt.title("Training and Validational Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
Loss

模型解析

模型在第一轮以后开始过拟合,并且训练速度较慢。

# 利用skip-gram模型生成word2vec词向量。
word2vec_model = Word2Vec(corpus, sg=1, vector_size=vocab_size)
# 初始化embedding层的权重
embedd_matrix = np.zeros((max_words, vocab_size))
# 把训练好的word2vec向量加载到embedding matrix里。
word_index = tokenizer.word_index
default_values = np.zeros((vocab_size, ))
wv = word2vec_model.wv
for word, index in word_index.items():
    if index < max_words:
        if word in wv.index_to_key:
            embedd_matrix[index] = wv[word]
        else :
            embedd_matrix[index] = default_values
# 新建keras词向量层,使用word2vec的词向量且不可训练
embedding_layer = layers.Embedding(input_dim=max_words, output_dim=vocab_size, input_length=max_len, weights=[embedd_matrix], trainable=False)
# 使用word2vec的基本模型,与基准模型的不同就只有使用的是word2vec的词向量。
model = Sequential()
model.add(embedding_layer)
model.add(layers.LSTM(32))
model.add(layers.LeakyReLU(alpha=0.01))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=["accuracy"])
word2vec_ht = model.fit(x_train, labels, epochs=5, validation_split=0.1, batch_size=64)
Epoch 1/5
352/352 [==============================] - 12s 29ms/step - loss: 0.6166 - accuracy: 0.6650 - val_loss: 0.4878 - val_accuracy: 0.7816
Epoch 2/5
352/352 [==============================] - 10s 29ms/step - loss: 0.4850 - accuracy: 0.7769 - val_loss: 0.4613 - val_accuracy: 0.7888
Epoch 3/5
352/352 [==============================] - 10s 29ms/step - loss: 0.4302 - accuracy: 0.8027 - val_loss: 0.4132 - val_accuracy: 0.8192
Epoch 4/5
352/352 [==============================] - 10s 29ms/step - loss: 0.4151 - accuracy: 0.8128 - val_loss: 0.4043 - val_accuracy: 0.8192
Epoch 5/5
352/352 [==============================] - 11s 30ms/step - loss: 0.3895 - accuracy: 0.8288 - val_loss: 0.3794 - val_accuracy: 0.8356
word2vec_hd = word2vec_ht.history
accuracy = word2vec_hd["accuracy"]
val_accuracy = word2vec_hd["val_accuracy"]

loss = word2vec_hd["loss"]
val_loss = word2vec_hd["val_loss"]

epochs = range(1, len(loss) + 1)
plt.plot(epochs, accuracy, label="Training Accuracy")
plt.plot(epochs, val_accuracy, label="Validational Accuracy")
plt.title("Training and Validational Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
Accuracy
plt.plot(epochs, loss, label="Training Loss")
plt.plot(epochs, val_loss, label="Validational Loss")
plt.title("Training and Validational Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
Loss

模型解析

可以看到模型的其他参数不变,使用了word2vec预处理以后模型得到了极大的升华,而且不用训练embedded速度极快,并且具有极大提升空间,说明word2vec对NLP具有重要作用。

优化pre-trained by word2vec模型

  • 首先使用更大的LSTM和加入更大的Full Connection Network来训练模型
# 新建模型,使用Word2Vec的作为词嵌入,而且不训练该层。
model = Sequential()
model.add(embedding_layer)
model.add(layers.LSTM(32, return_sequences=True))
model.add(layers.LeakyReLU(alpha=0.01))
model.add(layers.LSTM(64))
model.add(layers.LeakyReLU(alpha=0.01))
model.add(layers.Dense(64))
model.add(layers.LeakyReLU(alpha=0.01))
model.add(layers.Dense(128))
model.add(layers.LeakyReLU(alpha=0.01))
model.add(layers.Dense(256))
model.add(layers.LeakyReLU(alpha=0.01))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
ht = model.fit(x_train, labels, epochs=40, batch_size=64, validation_split=0.1)
Epoch 1/40
352/352 [==============================] - 31s 81ms/step - loss: 0.5901 - accuracy: 0.6595 - val_loss: 0.4299 - val_accuracy: 0.8120
...
Epoch 36/40
352/352 [==============================] - 32s 90ms/step - loss: 0.1512 - accuracy: 0.9451 - val_loss: 0.3723 - val_accuracy: 0.8728
Epoch 37/40
352/352 [==============================] - 31s 87ms/step - loss: 0.1548 - accuracy: 0.9416 - val_loss: 0.3959 - val_accuracy: 0.8564
Epoch 38/40
352/352 [==============================] - 31s 87ms/step - loss: 0.1449 - accuracy: 0.9475 - val_loss: 0.3884 - val_accuracy: 0.8760
Epoch 39/40
352/352 [==============================] - 31s 88ms/step - loss: 0.1361 - accuracy: 0.9494 - val_loss: 0.3712 - val_accuracy: 0.8776
Epoch 40/40
352/352 [==============================] - 31s 89ms/step - loss: 0.1398 - accuracy: 0.9498 - val_loss: 0.4240 - val_accuracy: 0.8660
word2vec_hd = ht.history
accuracy = word2vec_hd["accuracy"]
val_accuracy = word2vec_hd["val_accuracy"]

loss = word2vec_hd["loss"]
val_loss = word2vec_hd["val_loss"]

epochs = range(1, len(loss) + 1)
plt.plot(epochs, accuracy, label="Training Accuracy")
plt.plot(epochs, val_accuracy, label="Validational Accuracy")
plt.title("Training and Validational Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
Accuracy
plt.plot(epochs, loss, label="Training Loss")
plt.plot(epochs, val_loss, label="Validational Loss")
plt.title("Training and Validational Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
Loss

模型解析

在参数量如此庞大的模型中,使用了word2vec预训练的模型也在第20轮开始过拟合,但也比基准模型强很多倍。

解决过拟合

过拟合的产生

  • 数据差异性较大,本来第i个样本与目标相关性不高,但偏偏选取了强相关的样本训练,此项可排除。
  • 参数过多,待定。
  • 训练过程中存在大量噪音,此项可排除。
  • 网络容量过大。
  • 权重系数无惩罚项。
  • 数据量不足。

解决方法

  • 添加dropout。
  • 减少网络容量。
  • 添加L2惩罚。
  • 增加数据量。
import random
random_indicies = random.sample(range(0, len(x_train)), int(len(x_train) * 0.1))
part_x_train = x_train[random_indicies]
part_y_labels = labels[random_indicies]
class OptimizerChanger(callbacks.EarlyStopping):
    
    def __init__(self, x_train, y_train, **kwargs):
        super(OptimizerChanger, self).__init__(**kwargs)
        self.x_train = x_train
        self.y_train = y_train
        
        
    def on_train_end(self, logs=None):
        self.model.compile(optimizer="SGD", loss="binary_crossentropy", metrics=["accuracy"])
        self.model.fit(self.x_train, self.y_train, epochs=10, batch_size=128, validation_split=0.1, shuffle=True)
        
        
from keras.optimizers import Adam

# 适用于sigmoid等普通函数的权重初始化
from keras.initializers import glorot_normal
# 适用于Relu激活函数的高斯分布初始化
from keras.initializers import he_normal
# 使用刚刚的模型,添加dropout等方法解决过拟合
model = Sequential()
model.add(embedding_layer)
# model.add(layers.Flatten())
model.add(layers.LSTM(32, return_sequences=True, recurrent_dropout=0.5, kernel_regularizer="l2"))
model.add(layers.LeakyReLU(alpha=0.01))
model.add(layers.Dropout(rate=0.3))

model.add(layers.LSTM(64, recurrent_dropout=0.5, kernel_regularizer="l2"))
model.add(layers.LeakyReLU(alpha=0.01))
model.add(layers.Dropout(rate=0.3))

model.add(layers.Dense(64, kernel_initializer=he_normal()))
model.add(layers.LeakyReLU(alpha=0.01))
model.add(layers.Dropout(rate=0.3))

model.add(layers.Dense(128, kernel_initializer=he_normal()))
model.add(layers.LeakyReLU(alpha=0.01))
model.add(layers.Dropout(rate=0.3))

model.add(layers.Dense(256, kernel_initializer=he_normal()))
model.add(layers.LeakyReLU(alpha=0.01))
model.add(layers.Dropout(rate=0.3))

model.add(layers.Dense(1, activation="sigmoid", kernel_initializer=glorot_normal()))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
cbs = [
    callbacks.ModelCheckpoint("./word2vec_best_model_d.h5", save_best_only=True),
    callbacks.ReduceLROnPlateau(factor=0.5, patience=5, verbose=1, cooldown=10, min_lr=5e-4),
    callbacks.EarlyStopping(patience=20, restore_best_weights=True)
]
ht = model.fit(x_train, labels, epochs=150, batch_size=128, validation_split=0.1, callbacks=cbs, shuffle=True)

Epoch 1/150
176/176 [==============================] - 43s 230ms/step - loss: 1.4252 - accuracy: 0.5733 - val_loss: 0.6511 - val_accuracy: 0.7548
...
Epoch 122/150
176/176 [==============================] - 41s 234ms/step - loss: 0.3413 - accuracy: 0.8689 - val_loss: 0.3448 - val_accuracy: 0.8684

Epoch 00122: ReduceLROnPlateau reducing learning rate to 0.0005.
Epoch 123/150
176/176 [==============================] - 41s 231ms/step - loss: 0.3336 - accuracy: 0.8756 - val_loss: 0.3434 - val_accuracy: 0.8700
word2vec_hd = ht.history
accuracy = word2vec_hd["accuracy"]
val_accuracy = word2vec_hd["val_accuracy"]

loss = word2vec_hd["loss"]
val_loss = word2vec_hd["val_loss"]

epochs = range(1, len(loss) + 1)
plt.plot(epochs, accuracy, label="Training Accuracy")
plt.plot(epochs, val_accuracy, label="Validational Accuracy")
plt.title("Training and Validational Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
Accuracy
plt.plot(epochs, loss, label="Training Loss")
plt.plot(epochs, val_loss, label="Validational Loss")
plt.title("Training and Validational Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
output_41_1.png

训练过程记录

  1. LSTM: (32, 64), kernel_regularizer=l2, dropout=0.3
    Dense: (64, 128, 256), dropout=0.3
    Epochs: 20
    结果: 没有过拟合, 但没有完全收敛成功,添加了规则以后需要适当增加训练轮次。

  2. LSTM: (32, 64), kernel_regularizer=l2, dropout=0.3
    Dense: (64, 128, 256), dropout=0.3
    Epochs: 40
    结果: 最终于35轮以后开始过拟合,对比没有使用规则的模型,取得了很大进步。

  3. LSTM: (32, 64), kernel_regularizer=l2, dropout=0.3, recurrent_dropout=0.5
    Dense: (64, 128, 256), dropout=0.3

    Epochs: 100
    结果: 最终结果于67轮以后开始过拟合,验证精度不再上升,最终测试精度在86%。

  4. LSTM: (32, 64), kernel_regularizer=l2, dropout=0.3, recurrent_dropout=0.5
    Dense: (64, 128, 256), dropout=0.3
    Epochs: 150
    结果:模型没有过拟合,并且使用提前停止和保存最优方案。

from keras.models import load_model
text_corpus, y_test = load_data("test")
x_test = tokenizer.texts_to_sequences(text_corpus)
x_test = pad_sequences(x_test, maxlen=max_len)
test_indicies = np.arange(0, len(x_test))
np.random.shuffle(test_indicies)
x_test, y_test = x_test[test_indicies], y_test[test_indicies]
model = load_model("./word2vec_best_model.h5")
model.evaluate(x_test, y_test)
WARNING:tensorflow:Model was constructed with shape (None, 100) for input KerasTensor(type_spec=TensorSpec(shape=(None, 100), dtype=tf.float32, name='embedding_1_input'), name='embedding_1_input', description="created by layer 'embedding_1_input'"), but it was called on an input with incompatible shape (None, 150).
782/782 [==============================] - 24s 30ms/step - loss: 0.2957 - accuracy: 0.8863





[0.29574865102767944, 0.8862800002098083]

1D CNN for NLP

卷积网络的本质是空间识别,它具有平移不变性,在某个空间学习到的模式可以在其他空间识别出来;

加上池化层的降维,可以防止过拟合;

网络越往后拥有的表达能力越强,前面学习的是局部模式,由局部组成后面的抽象表示。

# 训练一维cnn网络来解决同样的问题
cnn_model = Sequential()
cnn_model.add(embedding_layer)

# 由128个长度为7的一维卷积核与词向量作卷积,输出128个特征图,每个特征图的尺寸为150-7+1=144,
# 权重添加l2惩罚,避免过拟合;使用了BN层,使用不再需要bias。
cnn_model.add(layers.Conv1D(128, 7, kernel_regularizer='l2', activation='relu', use_bias=False))
# 对第一层作BN传入下一层
cnn_model.add(layers.BatchNormalization())

cnn_model.add(layers.Conv1D(128, 7, kernel_regularizer='l2', activation='relu', use_bias=False))
cnn_model.add(layers.BatchNormalization())
cnn_model.add(layers.MaxPool1D(3))
cnn_model.add(layers.BatchNormalization())

cnn_model.add(layers.Conv1D(64, 7, kernel_regularizer='l2', activation='relu', use_bias=False))
cnn_model.add(layers.BatchNormalization())

cnn_model.add(layers.Conv1D(64, 5, kernel_regularizer='l2', activation='relu', use_bias=False))
cnn_model.add(layers.BatchNormalization())
cnn_model.add(layers.MaxPool1D(3))
cnn_model.add(layers.BatchNormalization())

# cnn_model.add(layers.LSTM(32, dropout=0.5, recurrent_dropout=0.5, return_sequences=True, kernel_regularizer='l2',
#                           kernel_initializer=glorot_normal(), recurrent_regularizer='l2'))
cnn_model.add(layers.LSTM(64, dropout=0.5, recurrent_dropout=0.5, kernel_regularizer='l2',
                          kernel_initializer=glorot_normal(), recurrent_regularizer='l2'))
cnn_model.add(layers.BatchNormalization())

cnn_model.add(layers.Dense(128, activation="relu", use_bias=False))
cnn_model.add(layers.Dropout(0.5))
cnn_model.add(layers.BatchNormalization())

cnn_model.add(layers.Dense(1, activation="sigmoid"))
cnn_model.compile(loss="binary_crossentropy", optimizer=Adam(learning_rate=0.01), metrics=["accuracy"])
cnn_cbs = [
    callbacks.ReduceLROnPlateau(factor=0.5, patience=5, verbose=1, cooldown=3, min_lr=1e-4),
    callbacks.ModelCheckpoint("./cnn_word2vec.h5", save_best_only=True),
    callbacks.EarlyStopping(patience=15, )
]
cnn_ht = cnn_model.fit(x_train, labels, epochs=150, batch_size=128, validation_split=0.1, shuffle=True, callbacks=cnn_cbs)

Epoch 1/150
176/176 [==============================] - 34s 169ms/step - loss: 3.6083 - accuracy: 0.7270 - val_loss: 0.9785 - val_accuracy: 0.4988
Epoch 2/150
176/176 [==============================] - 31s 174ms/step - loss: 0.7969 - accuracy: 0.7855 - val_loss: 1.1814 - val_accuracy: 0.5484
...
Epoch 57/150
176/176 [==============================] - 30s 169ms/step - loss: 0.2950 - accuracy: 0.8834 - val_loss: 0.3810 - val_accuracy: 0.8432
Epoch 58/150
176/176 [==============================] - 30s 170ms/step - loss: 0.2856 - accuracy: 0.8887 - val_loss: 0.4759 - val_accuracy: 0.8128
cnn_hd = cnn_ht.history
accuracy = cnn_hd["accuracy"]
val_accuracy = cnn_hd["val_accuracy"]

loss = cnn_hd["loss"]
val_loss = cnn_hd["val_loss"]

epochs = range(1, len(loss) + 1)
plt.plot(epochs, accuracy, label="Training Accuracy")
plt.plot(epochs, val_accuracy, label="Validational Accuracy")
plt.title("Training and Validational Accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
Accuracy
plt.plot(epochs, loss, label="Training Loss")
plt.plot(epochs, val_loss, label="Validational Loss")
plt.title("Training and Validational Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
Loss
model = load_model("./cnn_word2vec.h5")
model.evaluate(x_test, y_test)
782/782 [==============================] - 9s 11ms/step - loss: 0.3546 - accuracy: 0.8535





[0.3546295762062073, 0.8535199761390686]

结论

一维cnn一样可以对序列作预测,并且cnn是识别空间的模式,对顺序不严格的序列可以用卷积神经网络来执行,但是一旦顺序重要就不能这样做,
比如根据历史天气数据预测明天天气数据这种顺序性比较重要的回归问题。

最终神经网络以85%略逊于RNN的准确率完成任务,这或许数据里面有较多「转折」之类的语句,比如先表扬后批评的,cnn不能抓住「but」这种转折,它只能对一些good,bad,awesome之类的代表性词语来判断句子是正面还是负面评价。

BN层的作用

  • 可以使用更大的学习率。
  • BN的标准化过程会移除直流分量,不再需要bias。
  • 对权重的初始化也不再敏感。
  • 可以使用更深的网络。
  • 具有正则化作用,但不能代替L2或Dropout

你可能感兴趣的:(IMDB情感分析)