pip install transformers
from transformers import BertTokenizer
# 方式一:网络下载词典
tokenizer = BertTokenizer.from_pretrained(
pretrained_model_name_or_path='bert-base-chinese',
cache_dir=None,
force_download=False,
)
# 方式二:本地加载
tokenizer = BertTokenizer.from_pretrained('./bert-base-chinese/BertTokenizer/vocab.txt')
# 获取字典的长度
dictionary = tokenizer.get_vocab()
len(dictionary)
sents = [
'自然语言处理',
'第三方工具包',
'开发的应用在青少年中颇受欢迎'
]
# 编码两个句子
out = tokenizer.encode(
text = sents[0],
text_pair = sents[1],
# 当句子长度大于max_length时,截断
truncation=True,
# 一律补pad到max_length长度
padding='max_length',
add_special_tokens=True,
max_length=30,
return_tensors=None,
)
print(out)
tokenizer.decode(out)
out = tokenizer.encode_plus(
text = sents[0],
text_pair = sents[1],
truncation = True,
padding = 'max_length',
max_length = 30,
add_special_tokens = True,
# 可取值tf,pt,np,默认为返回list
return_tensors = None,
# 返回token_type_ids
return_token_type_ids = True,
# 返回attention_mask
return_attention_mask = True,
# 返回special_tokens_mask特殊符号标识
return_special_tokens_mask = True,
# 返回offset_mapping标识每个词的起止位置,这个参数只能BertTokenizerFast使用
# return_offsets_mapping=True,
# 返回length标识长度
return_length = True,
)
for k, v in out.items():
print(k, ":", v)
# 批量成对编码
out = tokenizer.batch_encode_plus(
batch_text_or_text_pairs = [(sents[0], sents[1]),(sents[0], sents[1])],
truncation = True,
padding = 'max_length',
max_length = 15,
add_special_tokens = True,
# 可取值tf,pt,np,默认为返回list
return_tensors = None,
# 返回token_type_ids
return_token_type_ids = True,
# 返回attention_mask
return_attention_mask = True,
# 返回special_tokens_mask特殊符号标识
return_special_tokens_mask = True,
# 返回offset_mapping标识每个词的起止位置,这个参数只能BertTokenizerFast使用
# return_offsets_mapping=True,
# 返回length标识长度
return_length = True,
)
for k, v in out.items():
print(k, ":", v)
dictionary = tokenizer.get_vocab()
type(dictionary), len(dictionary), '月光' in dictionary
# 添加新词
tokenizer.add_tokens(new_tokens=['月光','希望'])
# 添加新符号
tokenizer.add_special_tokens({'eos_token':'[EOS]'})
dictionary = tokenizer.get_vocab()
type(dictionary), len(dictionary), '月光' in dictionary
out = tokenizer.encode(
text='月光的新希望[EOS]',
text_pair=None,
# 当句子的长度大于max_length时,截断
truncation=True,
#一律补pad到max_length长度
padding='max_length',
add_special_tokens=True,
max_length=8,
return_tensors=None,
)
print(out)
tokenizer.decode(out)
from datasets import load_dataset
dataset = load_dataset(path='../datasets', split='train')
from transformers import BertModel
# 方式一:网络下载
pretrained = BertModel.from_pretrained(
pretrained_model_name_or_path='bert-base-chinese',
)
# 方式二:本地加载(pytorch_model.bin、config.json)
pretrained = BertModel.from_pretrained('./bert-base-chinese/BertModel')
# 不训练, 不需要计算梯度
for param in pretrained.parameters():
param.requires_grad_(False)
out = pretrained(input_ids=input_ids.to(config.device),
attention_mask=attention_mask.to(config.device),
token_type_ids=token_type_ids.to(config.device),
)
print(out.last_hidden_state.shape)
from torch.utils.data import DataLoader,Dataset
from datasets import load_dataset
class MyDatasets(Dataset):
def __init__(self, path, split):
super(MyDatasets, self).__init__()
self.dataset = load_dataset(path=path, split=split)
def __getitem__(self, index):
context = self.dataset[index]['text'][:-2]
label = self.dataset[index]['text'][-1]
return context, int(label)
def __len__(self): # 数据的长度
return len(self.dataset)
def pad_collate(batch): # 批处理函数
sents, labels = zip(*batch)
# 编码
data = tokenizer.batch_encode_plus(
batch_text_or_text_pairs=sents,
truncation=True,
padding='max_length',
max_length=config.max_length,
return_tensors='pt',
return_length=True,
)
# input_ids: 编码之后的数字
# attention_mask: 是补零的位置是0, 其他位置是1
input_ids = data['input_ids']
attention_mask = data['attention_mask']
token_type_ids = data['token_type_ids']
labels = torch.LongTensor(labels)
return input_ids, attention_mask, token_type_ids, labels
train_loader = DataLoader(dataset=MyDatasets(config.train_datasets_path, config.train_split),
batch_size=config.train_batch_size, shuffle=True,
collate_fn=pad_collate,
drop_last=True,
num_workers=config.num_workers)
test_loader = DataLoader(dataset=MyDatasets(config.test_datasets_path, config.test_split),
batch_size=config.test_batch_size, shuffle=True,
collate_fn=pad_collate,
drop_last=True,
num_workers=config.num_workers)
接下来你就可以设计模型训练你的文本分类模型啦!