Tensorflow自定义损失函数

import tensorflow as tf
from numpy.random import RandomState
batch_size = 8
#两个输入结点
x = tf.placeholder(tf.float32,shape=(None,2),name='x-input')
y_ = tf.placeholder(tf.float32,shape=(None,1),name='y-input')
w1 = tf.Variable(tf.random_normal([2,1],stddev=1,seed=1))

y = tf.matmul(x,w1)
#loss=f(x,y) if x>y f=a(x-y) else f = b(y-x)
#自定义损失函数
loss = tf.reduce_sum(tf.where(tf.greater(y,y_),(y-y_),(y-y_)*10))
#反向传播
train_step = tf.train.AdadeltaOptimizer(0.001).minimize(loss)
rdm = RandomState(1)
X = rdm.rand(128,2)
Y = [[x1+x2 +rdm.rand()/10-0.05] for (x1,x2) in X]
#开始训练
sess = tf.Session()
init_op = tf.initialize_all_variables()
sess.run(init_op)
steps = 5000
for i in range(steps):
    start = (i*batch_size) %128
    end = min(128,start+batch_size)
    sess.run(train_step,feed_dict={x:X[start:end],y_:Y[start:end]})
    if i%1000 ==0:
        print(sess.run(w1))
sess.close()

你可能感兴趣的:(机器学习)