seq2seq模型中最后的Loss该如何计算

#!coding=utf-8
import numpy as np
import tensorflow as tf
from tensorflow.contrib.seq2seq import sequence_loss

'''
为了探究 seq2seq 中的loss具体改如何计算, 依据的标准是 tensorflow.contrib.seq2seq.sequence_loss
Note: 忘了从哪看的了, 训练的loss要用 batch的loss, 也就是 batch_loss / batch 而不是 batch_loss/num_predict_words.
'''

# logits
output_np = np.array(
    [
        [[0.6, 0.5, 0.3, 0.2], [0.9, 0.5, 0.3, 0.2], [1.0, 0.5, 0.3, 0.2]],
        [[0.2, 0.5, 0.3, 0.2], [0.3, 0.5, 0.3, 0.2], [0.4, 0.5, 0.3, 0.2]]
    ]
)
print(output_np.shape)  # (2, 3, 4)
target_np = np.array([[0, 1, 2], [3, 0, 1]], dtype=np.int32)
target_lengths = np.array([3, 2],  dtype=np.int32)

output = tf.convert_to_tensor(output_np, np.float32)
target = tf.convert_to_tensor(target_np, np.int32)
weights = tf.convert_to_tensor(target_lengths, np.float32)

max_target_sequence_length = tf.reduce_max(weights, name='max_target_len')
masks = tf.sequence_mask(weights, max_target_sequence_length, dtype=tf.float32, name='masks')

cost1 = sequence_loss(output, target, weights=masks)  # 1.429252

crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
cost2 = tf.reduce_mean(crossent * masks)  # 1.1910433 这样直接求均值是默认 mask 全是1, 是错误的.
cost4 = tf.reduce_sum(crossent * masks) / 6  #  cost4 = cost2 = 1.1910433

# 一共预测了 weights 个词语, 所以这里要用
cost3 = tf.reduce_sum(crossent * masks) / tf.reduce_sum(masks)  # 1.429252

init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    masks_r = sess.run(masks)
    print(masks_r)  # (2,3,4)

    cost1_r = sess.run(cost1)
    print(cost1_r)

    crossent_r = sess.run(crossent)
    print(crossent_r) # (2, 3)
    cost2_r = sess.run(cost2)
    print(cost2_r)

    cost4_r = sess.run(cost4)
    print(cost4_r)

    cost3_r = sess.run(cost3)
    print(cost3_r)

你可能感兴趣的:(每天一点TF,Dialog)