python unpack原理_Python transformers.Unpack方法代码示例

本文整理汇总了Python中fuel.transformers.Unpack方法的典型用法代码示例。如果您正苦于以下问题:Python transformers.Unpack方法的具体用法?Python transformers.Unpack怎么用?Python transformers.Unpack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块fuel.transformers的用法示例。

在下文中一共展示了transformers.Unpack方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: setup_datastream

​点赞 3

# 需要导入模块: from fuel import transformers [as 别名]

# 或者: from fuel.transformers import Unpack [as 别名]

def setup_datastream(path, vocab_file, config):

ds = QADataset(path, vocab_file, config.n_entities, need_sep_token=config.concat_ctx_and_question)

it = QAIterator(path, shuffle=config.shuffle_questions)

stream = DataStream(ds, iteration_scheme=it)

if config.concat_ctx_and_question:

stream = ConcatCtxAndQuestion(stream, config.concat_question_before, ds.reverse_vocab[''])

# Sort sets of multiple batches to make batches of similar sizes

stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size * config.sort_batch_count))

comparison = _balanced_batch_helper(stream.sources.index('question' if config.concat_ctx_and_question else 'context'))

stream = Mapping(stream, SortMapping(comparison))

stream = Unpack(stream)

stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size))

stream = Padding(stream, mask_sources=['context', 'question', 'candidates'], mask_dtype='int32')

return ds, stream

开发者ID:thomasmesnard,项目名称:DeepMind-Teaching-Machines-to-Read-and-Comprehend,代码行数:21,

示例2: setup_cnnsquad_datastream

​点赞 3

# 需要导入模块: from fuel import transformers [as 别名]

# 或者: from fuel.transformers import Unpack [as 别名]

def setup_cnnsquad_datastream(sq_path, cnn_path, vocab_file, config):

ds = CNNSQDataset(sq_path, cnn_path, vocab_file)

it = CNNSQIterator(sq_path, cnn_path, cnn_ratio=config.add_cnn_data)

stream = DataStream(ds, iteration_scheme=it)

# Sort sets of multiple batches to make batches of similar sizes

stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size * config.sort_batch_count))

comparison = _balanced_batch_helper(stream.sources.index('context'))

stream = Mapping(stream, SortMapping(comparison))

stream = Unpack(stream)

stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size))

stream = Padding(stream, mask_sources=['context', 'question', 'answer'], mask_dtype='int32')

return ds, stream

开发者ID:arianhosseini,项目名称:Question-Answering,代码行数:18,

示例3: setup_squad_datastream

​点赞 3

# 需要导入模块: from fuel import transformers [as 别名]

# 或者: from fuel.transformers import Unpack [as 别名]

def setup_squad_datastream(path, vocab_file, config):

ds = SQuADDataset(path, vocab_file)

it = SQuADIterator(path)

stream = DataStream(ds, iteration_scheme=it)

if config.concat_ctx_and_question:

stream = ConcatCtxAndQuestion(stream, config.concat_question_before, ds.reverse_vocab[''])

# Sort sets of multiple batches to make batches of similar sizes

stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size * config.sort_batch_count))

comparison = _balanced_batch_helper(stream.sources.index('context'))

stream = Mapping(stream, SortMapping(comparison))

stream = Unpack(stream)

stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size))

stream = Padding(stream, mask_sources=['context', 'question', 'answer', 'ans_indices','ans_boundaries'], mask_dtype='int32')

return ds, stream

#train examples count 1836975

#dev examples count 221697

开发者ID:arianhosseini,项目名称:Question-Answering,代码行数:24,

示例4: setup_squad_ranker_datastream

​点赞 3

# 需要导入模块: from fuel import transformers [as 别名]

# 或者: from fuel.transformers import Unpack [as 别名]

def setup_squad_ranker_datastream(path, vocab_file, config, example_count=1836975):

ds = SQuADRankerDataset(path, vocab_file)

it = ShuffledExampleScheme(examples=example_count)

stream = DataStream(ds, iteration_scheme=it)

# Sort sets of multiple batches to make batches of similar sizes

stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size * config.sort_batch_count))

comparison = _balanced_batch_helper(stream.sources.index('question'))

stream = Mapping(stream, SortMapping(comparison))

stream = Unpack(stream)

stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size))

stream = Padding(stream, mask_sources=['question', 'answer', 'better', 'worse', 'b_left', 'b_right','w_left','w_right'], mask_dtype='int32')

return ds, stream

开发者ID:arianhosseini,项目名称:Question-Answering,代码行数:18,

示例5: setup_datastream

​点赞 3

# 需要导入模块: from fuel import transformers [as 别名]

# 或者: from fuel.transformers import Unpack [as 别名]

def setup_datastream(path, batch_size, sort_batch_count, valid=False):

A = numpy.load(os.path.join(path, ('valid_x_raw.npy' if valid else 'train_x_raw.npy')))

B = numpy.load(os.path.join(path, ('valid_phn.npy' if valid else 'train_phn.npy')))

C = numpy.load(os.path.join(path, ('valid_seq_to_phn.npy' if valid else 'train_seq_to_phn.npy')))

D = [B[x[0]:x[1], 2] for x in C]

ds = IndexableDataset({'input': A, 'output': D})

stream = DataStream(ds, iteration_scheme=ShuffledExampleScheme(len(A)))

stream = Batch(stream, iteration_scheme=ConstantScheme(batch_size * sort_batch_count))

comparison = _balanced_batch_helper(stream.sources.index('input'))

stream = Mapping(stream, SortMapping(comparison))

stream = Unpack(stream)

stream = Batch(stream, iteration_scheme=ConstantScheme(batch_size, num_examples=len(A)))

stream = Padding(stream, mask_sources=['input', 'output'])

return ds, stream

开发者ID:thomasmesnard,项目名称:CTC-LSTM,代码行数:21,

示例6: _get_sgnmt_tr_stream

​点赞 2

# 需要导入模块: from fuel import transformers [as 别名]

# 或者: from fuel.transformers import Unpack [as 别名]

def _get_sgnmt_tr_stream(data_stream,

src_vocab_size=30000,

trg_vocab_size=30000,

seq_len=50,

batch_size=80,

sort_k_batches=12,

src_sparse_feat_map='',

trg_sparse_feat_map='',

**kwargs):

"""Prepares the raw text file stream ``data_stream`` for the Blocks

main loop. This includes handling UNKs, splitting ino batches, sort

locally by sequence length, and masking. This roughly corresponds

to ``get_sgnmt_tr_stream`` in ``machine_translation/stream`` in the

blocks examples.

The arguments to this method are given by the configuration dict.

"""

# Filter sequences that are too long

s = Filter(data_stream, predicate=stream._too_long(seq_len=seq_len))

# Replacing out of vocabulary tokens with unk token already

# handled in the `DataSet`s

# Build a batched version of stream to read k batches ahead

s = Batch(s, iteration_scheme=ConstantScheme(batch_size*sort_k_batches))

# Sort all samples in the read-ahead batch

s = Mapping(s, SortMapping(stream._length))

# Convert it into a stream again

s = Unpack(s)

# Construct batches from the stream with specified batch size

s = Batch(s, iteration_scheme=ConstantScheme(batch_size))

# Pad sequences that are short

masked_stream = stream.PaddingWithEOS(s, [utils.EOS_ID, utils.EOS_ID])

return masked_stream

开发者ID:ucam-smt,项目名称:sgnmt,代码行数:42,

示例7: setup_toy_datastream

​点赞 2

# 需要导入模块: from fuel import transformers [as 别名]

# 或者: from fuel.transformers import Unpack [as 别名]

def setup_toy_datastream(config):

ds = ToyDataset()

it = ToyIterator()

stream = DataStream(ds, iteration_scheme=it)

# Sort sets of multiple batches to make batches of similar sizes

stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size * config.sort_batch_count))

comparison = _balanced_batch_helper(stream.sources.index('context'))

stream = Mapping(stream, SortMapping(comparison))

stream = Unpack(stream)

stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size))

stream = Padding(stream, mask_sources=['context', 'question', 'answer','ans_indices'], mask_dtype='int32')

return ds, stream

开发者ID:arianhosseini,项目名称:Question-Answering,代码行数:17,

注:本文中的fuel.transformers.Unpack方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。

你可能感兴趣的:(python,unpack原理)