1.Tensorflow linux环境下安装以及问题处理
http://blog.csdn.net/levy_cui/article/details/51251095
2.Tensorflow基础:构造部分+执行部分
demo
import tensorflow as tf # Create a variable. w = tf.Variable([[0.5,1.0]]) x = tf.Variable([[2.0],[1.0]]) y = tf.matmul(w, x) #variables have to be explicitly initialized before you can run Ops init_op = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init_op) print (y.eval()) # [[ 2.]] # float32 tf.zeros([3, 4], 'int32') ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] # 'tensor' is [[1, 2, 3], [4, 5, 6]] tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]] tf.ones([2, 3], int32) ==> [[1, 1, 1], [1, 1, 1]] # 'tensor' is [[1, 2, 3], [4, 5, 6]] tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]] # Constant 1-D Tensor populated with value list. tensor = tf.constant([1, 2, 3, 4, 5, 6, 7]) => [1 2 3 4 5 6 7] # Constant 2-D tensor populated with scalar value -1. tensor = tf.constant(-1.0, shape=[2, 3]) => [[-1. -1. -1.] [-1. -1. -1.]] tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] # 'start' is 3 # 'limit' is 18 # 'delta' is 3 tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] norm = tf.random_normal([2, 3], mean=-1, stddev=4) # Shuffle the first dimension of a tensor c = tf.constant([[1, 2], [3, 4], [5, 6]]) shuff = tf.random_shuffle(c) # Each time we run these ops, different results are generated sess = tf.Session() print (sess.run(norm)) print (sess.run(shuff)) state = tf.Variable(0) new_value = tf.add(state, tf.constant(1)) update = tf.assign(state, new_value) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) print(sess.run(state)) for _ in range(3): sess.run(update) #tf.train.Saver w = tf.Variable([[0.5,1.0]]) x = tf.Variable([[2.0],[1.0]]) y = tf.matmul(w, x) init_op = tf.global_variables_initializer() saver = tf.train.Saver() with tf.Session() as sess: sess.run(init_op) # Do some work with the model. # Save the variables to disk. save_path = saver.save(sess, "C://tensorflow//model//test") print ("Model saved in file: ", save_path) #numpy转tensor import numpy as np a = np.zeros((3,3)) ta = tf.convert_to_tensor(a) with tf.Session() as sess: print(sess.run(ta)) #占位符 input1 = tf.placeholder(tf.float32) input2 = tf.placeholder(tf.float32) output = tf.multiply(input1, input2) with tf.Session() as sess: print(sess.run([output], feed_dict={input1:[7.], input2:[2.]}))
LinR demo
import numpy as np import tensorflow as tf import matplotlib.pyplot as plt # 随机生成1000个点,围绕在y=0.1x+0.3的直线周围 num_points = 1000 vectors_set = [] for i in range(num_points): x1 = np.random.normal(0.0, 0.55) y1 = x1 * 0.1 + 0.3 + np.random.normal(0.0, 0.03) vectors_set.append([x1, y1]) # 生成一些样本 x_data = [v[0] for v in vectors_set] y_data = [v[1] for v in vectors_set] plt.scatter(x_data,y_data,c='r') plt.show() # 生成1维的W矩阵,取值是[-1,1]之间的随机数 with tf.name_scope('weight'): W = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name='W') # 生成1维的b矩阵,初始值是0 with tf.name_scope('bias'): b = tf.Variable(tf.zeros([1]), name='b') # 经过计算得出预估值y with tf.name_scope('y'): y = W * x_data + b # 以预估值y和实际值y_data之间的均方误差作为损失 with tf.name_scope('loss'): loss = tf.reduce_mean(tf.square(y - y_data), name='loss') # 采用梯度下降法来优化参数 optimizer = tf.train.GradientDescentOptimizer(0.5) # 训练的过程就是最小化这个误差值 train = optimizer.minimize(loss, name='train') sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) # 初始化的W和b是多少 print ("W =", sess.run(W), "b =", sess.run(b), "loss =", sess.run(loss)) # 执行20次训练 for step in range(20): sess.run(train) # 输出训练好的W和b print ("W =", sess.run(W), "b =", sess.run(b), "loss =", sess.run(loss)) writer = tf.train.SummaryWriter("logs/", sess.graph) plt.scatter(x_data,y_data,c='r') plt.plot(x_data,sess.run(W)*x_data+sess.run(b)) plt.show()
3.tensor. Tensorflow的核心单元是tensor.
tensorflow编程可以分为两部分:构建计算图和运行计算图