torchtext进行文本预处理

torchtext文本预处理学习链接

# -*- coding: utf-8 -*-
# @Time : 2020/2/25 11:18
# @Author : liusen
from torchtext import data
from tqdm import tqdm
import pandas as pd
import numpy as np
import random
import torch
from torchtext.vocab import Vectors
import os
import codecs
import dill

def split_data_train_dev():
    pass


def x_tokenize(x):
    # 如果加载进来的是已经转成id的文本
    # 此处必须将字符串转换成整型
    # 否则必须将use_vocab设为True
    return list(x.strip())


# print(x_tokenize("我是中国人"))

train_path = '../data/train.csv'
test_path = '../data/test_new.csv'
TEXT = data.Field(sequential=True, tokenize=x_tokenize, use_vocab=True, lower=False)
LABEL = data.Field(sequential=False, use_vocab=False)


def get_one_hot(label, N):
    # size = list(label.size())
    # label = label.view(-1)  # reshape 为向量
    ones = torch.sparse.torch.eye(N)
    ones = ones.index_select(0, torch.tensor(int(label)))  # 用上面的办法转为换one hot
    # size.append(N)  # 把类别输目添到size的尾后,准备reshape回原来的尺寸
    return ones



class MyDataset(data.Dataset):

    def __init__(self, path, text_field, label_field, test=False, aug=False, **kwargs):
        fields = [("id", None), ("comment", text_field), ("label", label_field)]
        examples = []
        csv_data = pd.read_csv(path)
        print('read data from {}'.format(path))
        # try:
        if test:
            for comment in tqdm(csv_data['comment']):
                examples.append(data.Example.fromlist([None, comment, 1], fields))
        else:
            for comment, label in tqdm(zip(csv_data['comment'], csv_data['label'])):
                if aug:
                    rate = random.random()
                    if rate > 0.5:
                        comment = self.dropout(comment)
                    else:
                        comment = self.shuffle(comment)
                if label == np.nan:
                    print("error")
                examples.append(data.Example.fromlist([None, comment, label], fields))
        # except:
        # 	pass
        super(MyDataset, self).__init__(examples, fields, **kwargs)

    def shuffle(self, text):
        text = np.random.permutation(text.strip().split())
        return ' '.join(text)

    def dropout(self, text, p=0.5):
        text = text.strip().split()
        len_ = len(text)
        indexs = np.random.choice(len_, int(len_ * p))
        for i in indexs:
            text[i] = ''
        return ' '.join(text)



train = MyDataset(train_path, text_field=TEXT, label_field=LABEL, test=False, aug=False)
test = MyDataset(test_path, text_field=TEXT, label_field=LABEL, test=True, aug=False)
# TEXT.build_vocab(train, vectors=GloVe(name='6B', dim=300))
if not os.path.exists('.vector_cache'):
    os.mkdir('.vector_cache')
vectors = Vectors(name='../data/glove.6B/glove.6B.300d.txt')
# TEXT.build_vocab(train, vectors=vectors)
# LABEL.build_vocab(train)
snli_text_vocab_path = "../data/snli_text_vocab_path"
snli_label_vocab_path = "../data/snli_label_vocab_path"
if os.path.exists(snli_text_vocab_path) and os.path.exists(snli_label_vocab_path):
    print('加载已创建的词汇表...')
    with open(snli_text_vocab_path, 'rb')as f:
        TEXT.vocab = dill.load(f)
    with open(snli_label_vocab_path, 'rb')as f:
        LABEL.vocab = dill.load(f)
else:
    print('本地没有发现词汇表,新建词汇表...')
    TEXT.build_vocab(train, vectors=vectors)
    LABEL.build_vocab(train)
    with open(snli_text_vocab_path, 'wb')as f:
        dill.dump(TEXT.vocab, f)
    with open(snli_label_vocab_path, 'wb')as f:
        dill.dump(LABEL.vocab, f)
train_iter = data.BucketIterator(train, batch_size=1, shuffle=True)
test_iter = data.BucketIterator(test, batch_size=1, shuffle=False)
vocab = TEXT.vocab
# try:
#     for batch_idx, batch in enumerate(train_iter):
#         print("batch_idx", batch_idx)
#         print("label", batch.label.numpy())
#         print("query1", batch.query1.vocab)
#         print("query2", batch.query2.vocab)
#         # print(vocab.lookup_token(batch.query1))
# except:
#         pass
# print(TEXT.vocab)
# print(TEXT.tokenize)
# print(TEXT.vocab.freqs.most_common(10))

1.关键点定义field,torchtext都是以field来定义数据格式

TEXT = data.Field(sequential=True, tokenize=x_tokenize, use_vocab=True, lower=False)
文本数据格式
LABEL = data.Field(sequential=False, use_vocab=False)
label数据格式

2.整个数据读取

fields = [("id", None), ("comment", text_field), ("label", label_field)]
examples.append(data.Example.fromlist([None, comment, 1], fields))

3.使用下载的glove词典

if not os.path.exists('.vector_cache'):
    os.mkdir('.vector_cache')
vectors = Vectors(name='../data/glove.6B/glove.6B.300d.txt')

4.对词典进行保存,以及加载

  

snli_text_vocab_path = "../data/snli_text_vocab_path"
snli_label_vocab_path = "../data/snli_label_vocab_path"
if os.path.exists(snli_text_vocab_path) and os.path.exists(snli_label_vocab_path):
    print('加载已创建的词汇表...')
    with open(snli_text_vocab_path, 'rb')as f:
        TEXT.vocab = dill.load(f)
    with open(snli_label_vocab_path, 'rb')as f:
        LABEL.vocab = dill.load(f)
else:
    print('本地没有发现词汇表,新建词汇表...')
    TEXT.build_vocab(train, vectors=vectors)
    LABEL.build_vocab(train)
    with open(snli_text_vocab_path, 'wb')as f:
        dill.dump(TEXT.vocab, f)
    with open(snli_label_vocab_path, 'wb')as f:
        dill.dump(LABEL.vocab, f)

5.embedding跟词典进行映射,生成有embedding lookup

self.word_embedding = nn.Embedding(len(TEXT.vocab), 300)
self.word_embedding.weight.data.copy_(weight_matrix)

程序员有偿接单和程序指导加QQ:734564390

 

 

你可能感兴趣的:(NLP,Pytorch)