【机器学习实战】9_deeplearning《Hands-On Machine Learning with Scikit-Learn&TensorFlow》

深入学习入门

# -*- coding: utf-8 -*-
"""
Created on Tue Feb 12 17:33:43 2019

@author: Administrator
"""

# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals

# Common imports
import numpy as np
import os

# to make this notebook's output stable across runs
def reset_graph(seed=42):
    tf.reset_default_graph()
    tf.set_random_seed(seed)
    np.random.seed(seed)

# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12

# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "tensorflow"

def save_fig(fig_id, tight_layout=True):
    path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
    print("Saving figure", fig_id)
    if tight_layout:
        plt.tight_layout()
    plt.savefig(path, format='png', dpi=300)

import tensorflow as tf

#reset_graph()

x = tf.Variable(3, name="x")
y = tf.Variable(4, name="y")
f = x*x*y + y + 2
f

#手动初始化
sess = tf.Session()
sess.run(x.initializer)
sess.run(y.initializer)
result = sess.run(f)
result #42

sess.close()


with tf.Session() as sess:
    x.initializer.run()
    y.initializer.run()
    result = f.eval()
#eval()函数将字符串当成有效python表达式求值,并返回结果    
result #42


init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
init.run()
result = f.eval()
print(result)
sess.close()  #42

result #42

#managing graphs
reset_graph()
x1 = tf.Variable(1)
x1.graph is tf.get_default_graph()#返回结果为True

graph = tf.Graph()
with graph.as_default():
    x2 = tf.Variable(2)
x2.graph is graph #返回结果为true

x2.graph is tf.get_default_graph() #返回结果为false

w = tf.constant(3)
x = w + 2
y = x + 5
z = x * 3

#不会复用以前的w x 求出的结果
with tf.Session() as sess:
    print('y:',y.eval())
    print('z:',z.eval())
    
#如果有效的求出y,z,而不像之前的代码求职w,x两次,那么必须要求TensorFlow在一个图形运行中求出y,z   
with tf.Session() as sess:
    y_val,z_val = sess.run([y,z])
    print('y:',y.eval())
    print('z:',z.eval())
    
    
#Linear Regression

import numpy as np
from sklearn.datasets import fetch_california_housing

reset_graph()

housing = fetch_california_housing()
dir(housing)
housing.feature_names
m, n = housing.data.shape
housing_data_plus_bias = np.c_[np.ones((m,1)),housing.data]

# np.ones((m,1)).shape 为m行, nones()返回一个全1的n维数组
X = tf.constant(housing_data_plus_bias,dtype=tf.float32,name='X')
y = tf.constant(housing.target.reshape(-1,1),dtype=tf.float32,name='y')
XT = tf.transpose(X)
theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT,X)),XT),y) #matmul()相当矩阵点乘

with tf.Session() as sess:
    theta_value = theta.eval()
    

theta_value

X = housing_data_plus_bias
X.shape
y = housing.target.reshape(-1,1)
y.shape
theta_numpy = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)

print(theta_numpy)

from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(housing.data,housing.target.reshape(-1,1))

print(np.r_[lin_reg.intercept_.reshape(-1,1),lin_reg.coef_.T])

#using batch gredient Descent
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m,1)),scaled_housing_data]

print(scaled_housing_data_plus_bias.mean(axis=0))
print(scaled_housing_data_plus_bias.mean(axis=1))
print(scaled_housing_data_plus_bias.mean())
print(scaled_housing_data_plus_bias.shape)


#Manually comoputing the gradients
reset_graph()
n_epochs = 1000
learning_rate = 0.01

X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32,name='X')
y = tf.constant(housing.target.reshape(-1,1), dtype=tf.float32,name='y')

theta = tf.Variable(tf.random_uniform([n+1,1],-1.0,1.0,seed=42),name='theta')
y_pred = tf.matmul(X,theta,name='predictions')
error = y_pred - y
mse = tf.reduce_mean(tf.square(error),name='mse')
gradients = 2/m*tf.matmul(tf.transpose(X),error)
training_op = tf.assign(theta,theta - learning_rate*gradients)

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    
    for epoch in range(n_epochs):
        if epoch%100 ==0:
            print('Epoch',epoch,'MSE=',mse.eval())
        sess.run(training_op)
        
    best_theta = theta.eval()
    
    
best_theta

#using autodiff
reset_graph()
n_epochs = 1000
learning_rate = 0.01

X = tf.constant(scaled_housing_data_plus_bias,dtype=tf.float32,name='X')
y = tf.constant(housing.target.reshape(-1,1),dtype=tf.float32,name='y')
theta = tf.Variable(tf.random_uniform([n+1,1],-1.0,1.0,seed=42),name='theta')
y_pred = tf.matmul(X,theta,name='predictions')
error = y_pred - y
mse = tf.reduce_mean(tf.square(error),name='mse')

gradients = tf.gradients(mse,[theta])[0]

training_op = tf.assign(theta,theta - learning_rate*gradients)

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    
    for epoch in range(n_epochs):
        if epoch%100 ==0:
            print('Epoch',epoch,'MSE=',mse.eval())
        sess.run(training_op)
        
    best_theta = theta.eval()
    
print('Best theta:',best_theta)

你可能感兴趣的:(python)