以下1与2两个案例的loss函数都是针对酸奶日销量案例
import tensorflow as tf
import numpy as np
###拟合的是y = x1 + x2的函数
SEED = 2333
rdm = np.random.RandomState(seed=SEED) # 生成0至1不包含1之间的随机数
x = rdm.rand(32, 2)#生成32行 2列的特征
###生成噪声[0, 1) / 10 = [0, 0.1); 添加点噪声使结果更真实
y_ = [[x1 + x2 + (rdm.rand() / 10.0 - 0.05)] for (x1, x2) in x]
x = tf.cast(x, dtype=tf.float32)#转换数据类型
###这里随机初始化参数w1 2行 1列, 后面会迭代更新
w1 = tf.Variable(tf.random.normal([2, 1], stddev=1, seed=1))
epoch = 15000
lr = 0.002
for epoch in range(epoch):
with tf.GradientTape() as tape:
y = tf.matmul(x, w1)
###tf.reduce_mean(tf.square(y_ - y))求的是均方误差
loss_mse = tf.reduce_mean(tf.square(y_ - y))
grads = tape.gradient(loss_mse, w1)#对w1求偏导
###更新w1
w1.assign_sub(lr * grads)
##每500次打印一下
if epoch % 500 == 0:
print("After %d training steps, w1 is "%(epoch))
print(w1.numpy(),"\n")
print("Final w1 is:", w1.numpy())
import tensorflow as tf
import numpy as np
SEED = 2333
# 成本为1
COST = 1
# 利润为99
PROFIT = 99
###如果实际y_比y大则损失的是利润,否则应损失成本,用自定义的损失函数
rdm = np.random.RandomState(seed= SEED)# 生成0至1不包含1之间的随机数
x = rdm.rand(32, 2)
y_ = [[x1 + x2 + (rdm.rand() / 10.0 - 0.05)] for (x1, x2) in x]
x = tf.cast(x,dtype=tf.float32)
w1 = tf.Variable(tf.random.normal([2, 1], stddev=1, seed=1))
epoch = 10000
lr = 0.002
for epoch in range(epoch):
with tf.GradientTape() as tape:
y = tf.matmul(x, w1)
###loss是自定义的损失函数
###tf.where作用 如果y大就返回(y - y_) * COST ;否则返回(y_ - y) * PROFIT
loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * COST, (y_ - y) * PROFIT))
##求w1偏导
grads = tape.gradient(loss, w1)
w1.assign_sub(lr * grads)
print("Final w1 is:", w1.numpy())
# Final w1 is: [[1.1146302]
# [1.1366452]]
##都大于1说明在尽量往多的预测
import tensorflow as tf
import numpy as np
###分别计算[1, 0] 与 [0.6, 0.4] [0.8, 0.2]的交叉熵 越小的越接近[1,0]
#tf.losses.categorucal_crossentropy计算交叉熵的函数
# loss_ce1 = tf.losses.categorucal_crossentropy([1, 0], [0.6, 0.4])
# loss_ce2 = tf.losses.categorucal_crossentropy([1, 0], [0.8, 0.2])
#
# print("loss_ce1:", loss_ce1)
# print("loss_ce2:", loss_ce2)
###softmax与交叉熵结合,经过softmax使数据符合概率分布
# tf.nn.softmax_cross_entropy_with_logits(y_, y)
y_ = np.array([[1, 0, 0], [1, 0, 1], [0, 0, 1], [1, 0, 0]])
y = np.array([[12,3,2], [3,10,1], [1,2,5], [4,6.5,1.2]])
loss_ce = tf.nn.softmax_cross_entropy_with_logits(y_, y)
print(loss_ce)