list(map(tokenizer.tokenize, text))

这个用法比较重要,可以做subtokenizer和raw_text的对比和复原

text2tokens = self.tokenizer.tokenize(text, add_special_tokens=self.add_special_tokens)
text_ = text.split(' ')
subwords = list(map(tokenizer.tokenize, text_))

class Preprocessor(object):
def init(self, tokenizer):
super(Preprocessor, self).init()
self.tokenizer = tokenizer
self.add_special_tokens = True

def get_ent2token_spans(self, text, entity_list):
    """实体列表转为token_spans

    Args:
        text (str): 原始文本
        entity_list (list): [(start, end, ent_type),(start, end, ent_type)...]
    """
    ent2token_spans = []

    inputs = self.tokenizer(text, add_special_tokens=True, return_offsets_mapping=True)
    token2char_span_mapping = inputs["offset_mapping"]
    text2tokens = self.tokenizer.tokenize(text, add_special_tokens=self.add_special_tokens)
    text_ = text.split(' ')
    subwords = list(map(tokenizer.tokenize, text_))
    toks, index = get_index(text2tokens)
    for en_span in entity_list:
        if en_span[0]!=0:
            subh = sum([len(i) for i in subwords[:(en_span[0] )]])
            subt = sum([len(i) for i in subwords[:(en_span[0]+1)]])
        else:
            subh = sum([len(i) for i in subwords[:(en_span[0] )]])
            subt = sum([len(i) for i in subwords[:(en_span[0]+1)]])
        if en_span[1]!=0:
            objh = sum([len(i) for i in subwords[:(en_span[1])]])
            objt = sum([len(i) for i in subwords[:(en_span[1]+1)]])
        else:
            objh = sum([len(i) for i in subwords[:(en_span[1] )]])
            objt = sum([len(i) for i in subwords[:(en_span[1]+1)]])

        start_index = (subh + 1, subt + 1)
        end_index = (objh + 1, objt + 1)

        token_span = (start_index, end_index, en_span[2])
        ent2token_spans.append(token_span)

    return ent2token_spans

你可能感兴趣的:(论文复现记录,前端,大数据)