tensorflow随笔-非线性回归

#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 15 10:54:53 2018


非线性回归y=a*x^3+b*x^3+c
单样本
"""
import tensorflow as tf
import numpy as np

trainCount=350
g=tf.Graph()
with g.as_default():

    def getWeights(shape,wname):
        weights=tf.Variable(tf.truncated_normal(shape,stddev=0.1),name=wname)
        return weights

    def getBias(shape,bname):
        biases=tf.Variable(tf.constant(0.1,shape=shape),name=bname)
        return biases

    def inference(x):
        result=tf.add(tf.matmul(tf.pow(x,3),w),b)
        return result

    def loss(x,y):
        yp=inference(x)
        return tf.multiply(tf.reduce_sum(tf.squared_difference(y,yp)),0.5)

    def train(learningRate,trainLoss,trainStep):
        trainOp=tf.train.GradientDescentOptimizer(learningRate).minimize(trainLoss,global_step=trainStep)
        return trainOp

    def evaluate(x):
        return inference(x)

    def accuracy(x,y):
        yp=inference(x)
        return tf.subtract(1.0,tf.reduce_mean(tf.divide(tf.abs(yp-y),y)))

    def inputs(n):
        sampleX=np.array(np.random.rand(n,2),dtype=np.float32)
        sampleb1=5.
        samplew=np.array([0.5,0.9],dtype=np.float32)
        sampleY=np.matmul(pow(sampleX,3),samplew)+sampleb1
        return (sampleX,sampleY)

    with tf.name_scope("variables"):
        w=getWeights([2,1],"w")
        b=getBias((),"b") 
        trainStep=tf.Variable(0,dtype=tf.int32,name="step")       

    with tf.name_scope("inputDatas"):
        x=tf.placeholder(dtype=tf.float32,shape=[None,2],name="input_x")
        y=tf.placeholder(dtype=tf.float32,shape=[None],name="input_y")

    init=tf.global_variables_initializer()     
with tf.Session(graph=g) as sess:
    sess.run(init)


  sampleX,sampleY=inputs(100)
    sampleCount=sampleX.shape[0]

    testX,testY=inputs(5)
    testCount=testX.shape[0]

    trainLoss=loss(x,y)

    accuracyOp=accuracy(sampleX,sampleY)
    inputX=sampleX
    inputY=sampleY
    print inputX.shape
    print inputY.shape
    trainOp=train(0.25,trainLoss,trainStep)
    while trainStep.eval()正确率%g"%(nowStep,validate_acc)
            if nowStep>trainCount:
                break
    print "w:",sess.run(w)  
    print "b:",sess.run(b)  
    print "测试样本正确率%g"%sess.run(accuracy(testX,testY))

(100, 2)
(100,)
50次后=>正确率0.941076
100次后=>正确率0.942413
150次后=>正确率0.943086
200次后=>正确率0.943109
250次后=>正确率0.943165
300次后=>正确率0.943153
350次后=>正确率0.943156
w: [[0.5005716]
[0.8993188]]
b: 5.000005
测试样本正确率0.950526

#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 15 10:54:53 2018

非线性回归
单样本
"""
import tensorflow as tf
import numpy as np

trainCount=50
g=tf.Graph()
with g.as_default():

    with tf.name_scope("variables"):
        w=tf.Variable(tf.zeros([2,1]),name="w",dtype=tf.float32)
        b=tf.Variable(0.,dtype=tf.float32,name="b")       

    with tf.name_scope("inputDatas"):
        x=tf.placeholder(dtype=tf.float32,shape=[None,2],name="input_x")
        y=tf.placeholder(dtype=tf.float32,shape=[None],name="input_y")

    def inference(x):
        result=tf.add(tf.matmul(tf.pow(x,2),w),b)
        return result

    def loss(x,y):
        yp=inference(x)
        return tf.reduce_sum(tf.squared_difference(y,yp))

    def train(learningRate,trainLoss):
        trainOp=tf.train.GradientDescentOptimizer(learningRate).minimize(trainLoss)
        return trainOp

    def evaluate(x):
        return inference(x)

    def accuracy(x,y):
        yp=inference(x)
        return tf.subtract(1.0,tf.reduce_mean(tf.divide(tf.abs(yp-y),y)))

    def inputs(n):
        sampleX=np.array(np.random.rand(n,2),dtype=np.float32)
        sampleb1=5.
        samplew=np.array([0.5,0.9],dtype=np.float32)
        b2=np.array(np.random.rand(n),dtype=np.float32)
        sampleY=np.matmul(pow(sampleX,2),samplew)+sampleb1+b2
        return (sampleX,sampleY)

    init=tf.global_variables_initializer()     
with tf.Session(graph=g) as sess:
    sess.run(init)

    sampleX,sampleY=inputs(100)
    sampleCount=sampleX.shape[0]

    testX,testY=inputs(5)
    testCount=testX.shape[0]

    trainLoss=loss(x,y)
    trainOp=train(0.25,trainLoss)
    accuracyOp=accuracy(sampleX,sampleY)
    inputX=sampleX
    inputY=sampleY
    print inputX.shape
    print inputY.shape
    for trainStep in xrange(trainCount): 
        if trainStep%5==0:
            validate_acc=sess.run(accuracyOp)
            print "%d次后=>正确率%g"%(trainStep,validate_acc)
        for i in xrange(sampleCount):
            inputX=np.array([sampleX[i]],dtype=np.float32)
            inputY=np.array([sampleY[i]],dtype=np.float32)
            sess.run(trainOp,feed_dict={x:inputX,y:inputY})
    print "w:",sess.run(w)  
    print "b:",sess.run(b)  
    print "测试样本正确率%g"%sess.run(accuracy(testX,testY))

(100, 2)
(100,)
0次后=>正确率0
5次后=>正确率0.927204
10次后=>正确率0.927204
15次后=>正确率0.927204
20次后=>正确率0.927204
25次后=>正确率0.927204
30次后=>正确率0.927204
35次后=>正确率0.927204
40次后=>正确率0.927204
45次后=>正确率0.927204
w: [[0.4828106 ]
[0.82115054]]
b: 5.412575
测试样本正确率0.956847
单样本训练

你可能感兴趣的:(AI)