transformer bert微调实例(文本分类,超详细解析)

官方参考文档:https://huggingface.co/docs/transformers/training#additional-resources
文本分类实例解析:https://www.freesion.com/article/31511099215/

#transformer bert微调实例:以imdb数据集为基础(二分类),进行文本分类任务的微调
#进行下列步骤之前,要先安装好transformer和pytorch

#导入数据,该数据集是一个具有三个键的字典:"train","test"和"unsupervised" 。我们使用"train"进行训练,使用 "test"进行验证。
from datasets import load_dataset

raw_datasets = load_dataset("imdb")

#分词器(用的bert基线模型)
from transformers import AutoTokenizer

tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")

#文本截断:截断一个实例(非必须,默认最大长度为512)
inputs = tokenizer(sentences, padding="max_length", truncation=True)

#文本截断:批量处理
def tokenize_function(examples):
    return tokenizer(examples["text"], padding="max_length", truncation=True)

tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)

#获取数据集中的一部分,进行训练(非必须,主要是快)
small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000)) 
small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000)) 
full_train_dataset = tokenized_datasets["train"]
full_eval_dataset = tokenized_datasets["test"]

#进行微调共有三种方式,分别是用Pytorch API,Keras API 以及原生Pytorch

'''Fine-tuning in PyTorch with the Trainer API'''
#定义模型(会随机初始化)
from transformers import AutoModelForSequenceClassification

model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2)

#实例化一个 TrainingArguments。这个类包含我们可以为Trainer或标志调整的所有超参数 ,以激活它支持的不同训练选项。
from transformers import TrainingArguments

training_args = TrainingArguments("test_trainer")

#实例化一个Trainer
from transformers import Trainer

trainer = Trainer(
    model=model, args=training_args, train_dataset=small_train_dataset, eval_dataset=small_eval_dataset
)
#微调(默认情况下,训练过程中没有评估)
trainer.train()

#计算过程中的指标
import numpy as np
from datasets import load_metric

metric = load_metric("accuracy")

def compute_metrics(eval_pred):
    logits, labels = eval_pred
    predictions = np.argmax(logits, axis=-1)
    return metric.compute(predictions=predictions, references=labels)

#验证
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=small_train_dataset,
    eval_dataset=small_eval_dataset,
    compute_metrics=compute_metrics,
)
trainer.evaluate()

#定期输出评估指标(非必须)
from transformers import TrainingArguments

training_args = TrainingArguments("test_trainer", evaluation_strategy="epoch")

'''Fine-tuning with Keras'''
#使用 Keras API 在 TensorFlow 中进行本地训练。首先,定义模型
import tensorflow as tf
from transformers import TFAutoModelForSequenceClassification

model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2)

#数据集标准转换
tf_train_dataset = small_train_dataset.remove_columns(["text"]).with_format("tensorflow")
tf_eval_dataset = small_eval_dataset.remove_columns(["text"]).with_format("tensorflow")

#将所有内容转换为大张量并使用以下tf.data.Dataset.from_tensor_slices方法
train_features = {x: tf_train_dataset[x] for x in tokenizer.model_input_names}
train_tf_dataset = tf.data.Dataset.from_tensor_slices((train_features, tf_train_dataset["label"]))
train_tf_dataset = train_tf_dataset.shuffle(len(tf_train_dataset)).batch(8)

eval_features = {x: tf_eval_dataset[x] for x in tokenizer.model_input_names}
eval_tf_dataset = tf.data.Dataset.from_tensor_slices((eval_features, tf_eval_dataset["label"]))
eval_tf_dataset = eval_tf_dataset.batch(8)

#编译、训练
model.compile(
    optimizer=tf.keras.optimizers.Adam(learning_rate=5e-5),
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=tf.metrics.SparseCategoricalAccuracy(),
)

model.fit(train_tf_dataset, validation_data=eval_tf_dataset, epochs=3)

#保存模型,及重新加载为 PyTorch 模型(非必须)
from transformers import AutoModelForSequenceClassification

model.save_pretrained("my_imdb_model")
pytorch_model = AutoModelForSequenceClassification.from_pretrained("my_imdb_model", from_tf=True)

'''Fine-tuning in native PyTorch'''
#前面操作会占用一定量的内存,可以先把其释放掉(非必须)
del model
del pytorch_model
del trainer
torch.cuda.empty_cache()

#定义数据加载器,使用它来进行迭代批次。tokenized_datasets在执行此操作之前,需要对其进行一些后处理:

#删除与模型不期望的值相对应的列(这里是"text"列)
#将列重命名"label"为"labels"(因为模型期望参数被命名labels)
#设置数据集的格式,以便它们返回 PyTorch 张量而不是列表。
tokenized_datasets = tokenized_datasets.remove_columns(["text"])
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
tokenized_datasets.set_format("torch")

small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000))
small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))

#定义数据加载器
from torch.utils.data import DataLoader

train_dataloader = DataLoader(small_train_dataset, shuffle=True, batch_size=8)
eval_dataloader = DataLoader(small_eval_dataset, batch_size=8)

#定义模型
from transformers import AutoModelForSequenceClassification

model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2)

#优化器和学习率调度器
from transformers import AdamW

optimizer = AdamW(model.parameters(), lr=5e-5)

#学习率设置为从最大值(此处为 5e-5)到 0 的线性衰减
from transformers import get_scheduler

num_epochs = 3
num_training_steps = num_epochs * len(train_dataloader)
lr_scheduler = get_scheduler(
    "linear",
    optimizer=optimizer,
    num_warmup_steps=0,
    num_training_steps=num_training_steps
)

#定义一个device放置模型(如果有GPU可用,会快很多)
import torch

device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)

#开始训练,为直观展示,训练步骤中添加了一个进度条
from tqdm.auto import tqdm

progress_bar = tqdm(range(num_training_steps))

model.train()
for epoch in range(num_epochs):
    for batch in train_dataloader:
        batch = {k: v.to(device) for k, v in batch.items()}
        outputs = model(**batch)
        loss = outputs.loss
        loss.backward()

        optimizer.step()
        lr_scheduler.step()
        optimizer.zero_grad()
        progress_bar.update(1)

#验证评估
metric= load_metric("accuracy")
model.eval()
for batch in eval_dataloader:
    batch = {k: v.to(device) for k, v in batch.items()}
    with torch.no_grad():
        outputs = model(**batch)

    logits = outputs.logits
    predictions = torch.argmax(logits, dim=-1)
    metric.add_batch(predictions=predictions, references=batch["labels"])

metric.compute()
        

你可能感兴趣的:(transformer,transformer,bert,分类)