【北京大学】人工智能实践:Tensorflow笔记 4-5

generateds.py         生成数据

import numpy as np
import matplotlib.pyplot as plt
seed = 2
def generateds():
    rdm = np.random.RandomState(seed)
    X  = rdm.randn(300,2)
    Y_ = [int(x0*x0+x1*x1<2)for (x0,x1) in X]
    # 1红0 绿
    Y_c = [['red' if y else 'blue']for y in Y_]
    # 形状整理,-1表示跟随第二列计算,第二个表示多少列
    X = np.vstack(X).reshape(-1,2)
    Y_ = np.vstack(Y_).reshape(-1, 1)
    return X,Y_,Y_c
# def draw(X,Y_c):
#     x =[None]*300
#     y =[None]*300
#     i = 0
#     for x1,y1 in X:
#         x[i] = x1
#         y[i] =y1
#         i+=1
#     print(Y_c)
#     z = [None]*300
#     for i in range(300):
#         z[i]="".join(Y_c[i])
#     print(z)
#     plt.figure()
#     plt.scatter(x,y,c=z)
#     plt.show()

# X,Y_,Y_c = generateds()
# draw(X,Y_c)
 

forward.py  搭建网络,设计网络结构

import tensorflow as tf
def forward(x,regularizer):#输入x,正则化权重,采用relu激活函数
    w1 =get_weight([2,11],regularizer)                  #权值/权重
    b1 = get_bias([11])                 #偏执值
    y1 = tf.nn.relu(tf.matmul(x,w1)+b1)

    w2 =get_weight([11,1],regularizer)
    b2 = get_bias([1])
    y = tf.matmul(y1,w2)+b2# 输出层不过激活
    return y

def get_weight(shape,regularizer):
    w = tf.Variable(tf.random_normal(shape),dtype=tf.float32) #付初值
    tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w))
    return  w

def  get_bias(shape):
    b = tf.Variable(tf.constant(0.01,shape=shape))
    return b

backward.py  反向传播,训练参数

 

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import generateds
import forward

#定义超参数
STEPS =40000
BATCH_SIZE = 30
LEARNING_RATE_BASE = 0.001
LEARNING_RATE_DECAT = 0.999  #滑动平均的超参数
REGULARIZER = 0.01

def backward():
    x = tf.placeholder(tf.float32,shape=(None,2))
    y_= tf.placeholder(tf.float32,shape=(None,1))
    X,Y_,Y_c = generateds.generateds()
    y = forward.forward(x,REGULARIZER)
    #定义轮数计数器
    global_step = tf.Variable(0,trainable=False)
    #指数衰减学习率
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        300/BATCH_SIZE,
        LEARNING_RATE_DECAT,
        staircase=True
    )

    #定义损失函数  正则化的
    loss_mse = tf.reduce_mean(tf.square(y-y_))
    loss_total = loss_mse+tf.add_n(tf.get_collection('losses'))

    #定义反向传播方法:包含正则化
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss_total)

    with  tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(STEPS):
            start = (i*BATCH_SIZE)%300
            end = start+BATCH_SIZE
            sess.run(train_step,feed_dict={x:X[start:end],y_:Y_[start:end]})
            if i % 2000 == 0:
                loss_v = sess.run(loss_total,feed_dict={x:X,y_:Y_})
                print('After %d steps,loss is :%f'%(i,loss_v))
        #-3 - 3 之间生成网格坐标点
        xx,yy = np.mgrid[-3:3:0.01,-3:3:.01]
        grid = np.c_[xx.ravel(),yy.ravel()]
        probs = sess.run(y,feed_dict={x:grid})
        probs = probs.reshape(xx.shape)

    plt.scatter(X[:,0],X[:,1],c = np.squeeze(Y_c))
    plt.contour(xx,yy,probs,levels=[.5]) #曲线
    plt.show()

if __name__ =='__main__':
    backward()

结果:

【北京大学】人工智能实践:Tensorflow笔记 4-5_第1张图片

 

 

你可能感兴趣的:(人工智能)