Bert—SST-2

数据集:SST-2
论文地址:https://arxiv.org/abs/1810.04805
github(pytorch): https://github.com/huggingface/pytorch-transformers
github(tensorflow): https://github.com/google-research/bert

Step 1 模型下载

下载pretrained Tensorflow model https://github.com/google-research/bert#pre-trained-models

Step 2 模型转化

将 tensorflow model 转换为 pytorch

python3 convert_tf_checkpoint_to_pytorch.py
  --tf_checkpoint_path $BERT_BASE_DIR/bert_model.ckpt \
  --bert_config_file $BERT_BASE_DIR/bert_config.json \
  --pytorch_dump_path $BERT_BASE_DIR/pytorch_model.bin

Step 3 代码详解

1. DataProcessor

# 读取文件的基本类
class DataProcessor(object):
    """Base class for data converters for sequence classification data sets. """

    def get_train_examples(self, data_dir):
        """Gets a collection of `InputExample`s for the train set."""
        raise NotImplementedError()

    def get_dev_examples(self, data_dir):
        """Gets a collection of `InputExample`s for the dev set."""
        raise NotImplementedError()

    def get_labels(self):
        """Gets the list of labels for the train set."""
        raise NotImplementedError()

    @classmethod
    def _read_tsv(cls, input_file, quotechar=None):
        '''read a seqarated value file'''
        with open(input_file, 'r', encoding='utf-8-sig') as f:
            reader = csv.reader(f, delimiter='\t', quotechar=quotechar)
            lines = []
            for line in reader:
                lines.append(line)

            return lines
class SstProcess(DataProcessor):
    ''' processer for SST-2 dataset '''
    def get_train_examples(self, data_dir):
        """Gets a collection of `InputExample`s for the train set."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')

    def get_dev_examples(self, data_dir):
        """Gets a collection of `InputExample`s for the dev set."""
        return self._create_examples(
            self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev'
        )

    def get_labels(self):
        """Gets the list of labels for the train set."""
        """SST-2"""
        return ['0','1']


    def _create_examples(self, lines, set_type):
        ''' Create examples for the training and dev sets'''
        examples = []
        for i, line in enumerate(lines):
            if i == 0:
                continue
            guid = '%s-%s' % (set_type, i)
            text_a = line[0]
            label = line[1]
            examples.append(InputExample(guid=guid, text_a=text_a, label=label))

        return examples

2. convert_examples_to_features

def convert_examples_to_features(examples, label_list, max_seq_length,
                                 tokenizer, output_mode,
                                 cls_token_at_end=False, pad_on_left=False,
                                 cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
                                 sequence_a_segment_id=0, sequence_b_segment_id=1,
                                 cls_token_segment_id=1, pad_token_segment_id=0,
                                 mask_padding_with_zero=True):

    """Loads a data file into a list of `InputBatch`s.
        Args:
            examples: InputExample, 表示样本集
            label_list: 标签列表
            max_seq_length: 句子最大长度
            tokenizer: 分词器
        Returns:
            features: InputFeatures, 表示样本转化后信息
        """
    label_map = {label:i for i, label in enumerate(label_list)}
    features = []
    for (ex_index, example) in enumerate(examples):
        if ex_index % 10000 == 0:
            logger.info("Writing example %d of %d" % (ex_index, len(examples)))

        tokens_a = tokenizer.tokenize(example.text_a)
        tokens_b = None
        if example.text_b:
            tokens_b = tokenizer.tokenize(example.text_b)
            # Modifies `tokens_a` and `tokens_b` in place so that the total
            # length is less than the specified length.
            # Account for [CLS], [SEP], [SEP] with "- 3"
            _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
        else:
            # Account for [CLS] and [SEP] with "- 2"
            # 此处因为只有CLS 和SEP 即token_a & label 没有 token_b 所以-2
            if len(tokens_a) > max_seq_length - 2:
                tokens_a = tokens_a[:(max_seq_length - 2)]

        # [CLS] 可以视作是保存句子全局向量信息
        # [SEP] 用于区分句子,使得模型能够更好的把握句子信息

        tokens = tokens_a + [sep_token]
        segment_ids = [sequence_a_segment_id] * len(tokens)

        if tokens_b:
            tokens += tokens_b + [sep_token]
            segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
		# CLS在句子的前面还是后面 bert 在前面 xlnet在后面
        if cls_token_at_end:
            tokens = tokens + [cls_token]
            segment_ids = segment_ids + [cls_token_segment_id]
        else:
            tokens = [cls_token] + tokens
            segment_ids = [cls_token_segment_id] + segment_ids

        input_ids = tokenizer.convert_tokens_to_ids(tokens)

        # The mask has 1 for real tokens and 0 for padding tokens. Only real
        # tokens are attended to.
        input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)

        # Zero-pad up to the sequence length.
        padding_length = max_seq_length - len(input_ids)
        # PAD在句子的左边还是右边 bert的都在右边 xlnet在左边
        if pad_on_left:
            input_ids = ([pad_token] * padding_length) + input_ids
            input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
            segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
        else:
            input_ids = input_ids + ([pad_token] * padding_length)
            input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
            segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)

        assert len(input_ids) == max_seq_length
        assert len(input_mask) == max_seq_length
        assert len(segment_ids) == max_seq_length

        if output_mode == "classification":
            label_id = label_map[example.label]
        elif output_mode == "regression":
            label_id = float(example.label)
        else:
            raise KeyError(output_mode)

        features.append(
            InputFeatures(input_ids=input_ids,
                          input_mask=input_mask,
                          segment_ids=segment_ids,
                          label_id=label_id))
    return features

def _truncate_seq_pair(tokens_a, tokens_b, max_length):
    # """截断句子a和句子b,使得二者之和不超过 max_length"""
    # 此处可以改进 25% 75% 效果更好

    """Truncates a sequence pair in place to the maximum length."""
    while True:
        total_length = len(tokens_a) + len(tokens_b)
        if total_length <= max_length:
            break
        if len(tokens_a) > len(tokens_b):
            tokens_a.pop()
        else:
            tokens_b.pop()

3. bert family

BertConfig

config = BertConfig.from_pretrained(bert_config_path, num_label=2, finetuning_task='sst-2')

BertTokenizer

tokenizer = BertTokenizer.from_pretrained(bert_model_path, do_lower_case=True) #如果使用uncase的模型 选择True 否则选择False

BertForSequenceClassification

model = BertForSequenceClassification.from_pretrained(bert_model_path, config=config)

AdamW & WarmupLinearSchedule

no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
            {'params': [p for n, p in param_optimizer if not any(
                nd in n for nd in no_decay)], 'weight_decay': 0.01},
            {'params': [p for n, p in param_optimizer if any(
                nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]

optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=args.adam_epsilon)

scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_steps, t_total=t_total)

此处注意 t_total 这个值

t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs

gradient_accumulation_steps 为 做梯度累积的值 一般为3-8 相当于过这么多次进行一次清零 等于将batch_size扩大了n倍 可以节约显存
num_train_epochs 就是一共需要训练的epochs次数

你可能感兴趣的:(pytorch,自然语言处理)