import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
data = []
label = []
np.random.seed(0)
for i in range(150):
x1 = np.random.uniform(-1,1)
x2 = np.random.uniform(0,2)
if x1**2 + x2**2 <= 1:
data.append([np.random.normal(x1, 0.1),np.random.normal(x2,0.1)])
label.append(0)
else:
data.append([np.random.normal(x1, 0.1), np.random.normal(x2, 0.1)])
label.append(1)
data = np.hstack(data).reshape(-1,2)
label = np.hstack(label).reshape(-1, 1)
plt.scatter(data[:,0], data[:,1], c=label,
cmap="RdBu", vmin=-.2, vmax=1.2, edgecolor="white")
plt.show()
def get_weight(shape, lambda1):
var = tf.Variable(tf.random_normal(shape), dtype=tf.float32)
tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(lambda1)(var))
return var
x = tf.placeholder(tf.float32, shape=(None, 2))
y_ = tf.placeholder(tf.float32, shape=(None, 1))
sample_size = len(data)
layer_dimension = [2,10,5,3,1]
n_layers = len(layer_dimension)
cur_layer = x
in_dimension = layer_dimension[0]
for i in range(1, n_layers):
out_dimension = layer_dimension[i]
weight = get_weight([in_dimension, out_dimension], 0.003)
bias = tf.Variable(tf.constant(0.1, shape=[out_dimension]))
cur_layer = tf.nn.elu(tf.matmul(cur_layer, weight) + bias)
in_dimension = layer_dimension[i]
y= cur_layer
mse_loss = tf.reduce_sum(tf.pow(y_ - y, 2)) / sample_size
tf.add_to_collection('losses', mse_loss)
loss = tf.add_n(tf.get_collection('losses'))
train_op = tf.train.AdamOptimizer(0.001).minimize(mse_loss)
TRAINING_STEPS = 40000
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(TRAINING_STEPS):
sess.run(train_op, feed_dict={x: data, y_: label})
if i % 2000 == 0:
print("After %d steps, mse_loss: %f" % (i,sess.run(mse_loss, feed_dict={x: data, y_: label})))
xx, yy = np.mgrid[-1.2:1.2:.01, -0.2:2.2:.01]
grid = np.c_[xx.ravel(), yy.ravel()]
probs = sess.run(y, feed_dict={x:grid})
probs = probs.reshape(xx.shape)
plt.scatter(data[:,0], data[:,1], c=label,
cmap="RdBu", vmin=-.2, vmax=1.2, edgecolor="white")
plt.contour(xx, yy, probs, levels=[.5], cmap="Greys", vmin=0, vmax=.1)
plt.show()
'''
After 0 steps, mse_loss: 3.866614
After 2000 steps, mse_loss: 0.037082
After 4000 steps, mse_loss: 0.029324
After 6000 steps, mse_loss: 0.023534
After 8000 steps, mse_loss: 0.021745
After 10000 steps, mse_loss: 0.020566
After 12000 steps, mse_loss: 0.019632
After 14000 steps, mse_loss: 0.018284
After 16000 steps, mse_loss: 0.016959
After 18000 steps, mse_loss: 0.015033
After 20000 steps, mse_loss: 0.008671
After 22000 steps, mse_loss: 0.006562
After 24000 steps, mse_loss: 0.005542
After 26000 steps, mse_loss: 0.004682
After 28000 steps, mse_loss: 0.003911
After 30000 steps, mse_loss: 0.003389
After 32000 steps, mse_loss: 0.002933
After 34000 steps, mse_loss: 0.002545
After 36000 steps, mse_loss: 0.002171
After 38000 steps, mse_loss: 0.001805
'''
train_op = tf.train.AdamOptimizer(0.001).minimize(loss)
TRAINING_STEPS = 40000
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(TRAINING_STEPS):
sess.run(train_op, feed_dict={x: data, y_: label})
if i % 2000 == 0:
print("After %d steps, loss: %f" % (i, sess.run(loss, feed_dict={x: data, y_: label})))
xx, yy = np.mgrid[-1:1:.01, 0:2:.01]
grid = np.c_[xx.ravel(), yy.ravel()]
probs = sess.run(y, feed_dict={x:grid})
probs = probs.reshape(xx.shape)
plt.scatter(data[:,0], data[:,1], c=label,
cmap="RdBu", vmin=-.2, vmax=1.2, edgecolor="white")
plt.contour(xx, yy, probs, levels=[.5], cmap="Greys", vmin=0, vmax=.1)
plt.show()
'''
After 0 steps, loss: 3.600181
After 2000 steps, loss: 0.139405
After 4000 steps, loss: 0.097296
After 6000 steps, loss: 0.069810
After 8000 steps, loss: 0.059866
After 10000 steps, loss: 0.055504
After 12000 steps, loss: 0.054992
After 14000 steps, loss: 0.054985
After 16000 steps, loss: 0.054981
After 18000 steps, loss: 0.054980
After 20000 steps, loss: 0.054979
After 22000 steps, loss: 0.054979
After 24000 steps, loss: 0.054979
After 26000 steps, loss: 0.054979
After 28000 steps, loss: 0.054979
After 30000 steps, loss: 0.054979
After 32000 steps, loss: 0.054979
After 34000 steps, loss: 0.054979
After 36000 steps, loss: 0.054979
After 38000 steps, loss: 0.054979
'''