import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
# 2.0.0-beta0
def cross_entropy(x, y):
return tf.reduce_sum(-y*tf.math.log(x), axis=-1)
def softmax_cross_entropy_with_logits(x, y):
return tf.reduce_sum(-y * tf.math.log_softmax(x, axis=-1), axis=-1)
target = tf.convert_to_tensor([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
logit = tf.convert_to_tensor([[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]])
cce = keras.losses.CategoricalCrossentropy(from_logits=True)
print(cce(target, logit))
prob = tf.math.softmax(logit, axis=-1)
print(tf.reduce_mean(cross_entropy(prob, target)))
# 0.6981444
cce2 = keras.losses.CategoricalCrossentropy()
print(cce2(target, logit))
prob = logit/tf.reduce_sum(logit, axis=-1, keepdims=True)
"""
prob= [[0.9 , 0.05 , 0.05 ],
[0.2512563 , 0.44723618, 0.30150756],
[0.05 , 0.01 , 0.94 ]],
"""
print(tf.reduce_mean(cross_entropy(prob, target)))
# 0.32396814
bce = keras.losses.BinaryCrossentropy()
print(bce(target, logit))
bce2 = keras.losses.binary_crossentropy
print(tf.math.reduce_mean(bce2(target, logit)))
# 0.22857076
pos = -tf.math.log(tf.boolean_mask(logit, tf.equal(target, 1.0)))
neg = -tf.math.log(tf.boolean_mask(1-logit, tf.equal(target, 0.0)))
print(tf.reduce_mean(tf.concat([pos,neg], axis=0)))
prob2 = tf.stack([logit, 1-logit], axis=-1)
target2 = tf.stack([target, 1-target], axis=-1)
print(cce2(target2, prob2))
# 0.22857088
bce3 = keras.losses.BinaryCrossentropy(from_logits=True)
print(bce3(target, logit))
# 0.6533409
prob3 = tf.stack([tf.math.sigmoid(logit), 1-tf.math.sigmoid(logit)], axis=-1)
target3 = tf.stack([target, 1-target], axis=-1)
print(cce2(target3, prob3))
print(tf.reduce_mean(cross_entropy(prob3, target3)))
# 0.6533408