tensorflow搭建、储存和载入神经网络、查看网络权重数值,包括数据滤波和归一化

首先是数据处理,创建wavelet.py文件,对数据进行小波滤波。

import pywt
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt


def Wavelet(data):
    w = pywt.Wavelet('db8') # 选用Daubechies8小波

    maxlev = pywt.dwt_max_level(len(data), w.dec_len)
    maxlev = 1    #进行几阶小波滤波
    threshold = 1 # Threshold for filtering

    # Decompose into wavelet components, to the level selected:
    coeffs = pywt.wavedec(data, 'db8', level=maxlev) # 将信号进行小波分解    Multilevel decomposition using wavedec

    for i in range(1, len(coeffs)):

        coeffs[i] = pywt.threshold(coeffs[i], threshold*max(coeffs[i])) # 将噪声滤波

    datarec = pywt.waverec(coeffs, 'db8') # 将信号进行小波重构

    return datarec
if __name__ == "__main__":
    #调试
    df = pd.read_excel(r'E:\新建文件夹\0906.xls')
    df = df.drop(["序号"], axis=1)
    #提取某几列数据
    allParameters = df.loc[:, ["数据1", "数据2", "数据3", "数据4", "数据5", "数据6", "数据7"]]
    data = allParameters.values
    #提取某几行数据
    rhoRaw = np.hstack((data[288:822, 0], data[1547:2185, 0], data[2880:3396, 0], data[4100:4487, 0]))

    rhoFlitered = Wavelet(rhoRaw)

    plt.plot(rhoRaw)
    plt.plot(rhoFlitered)

    plt.show()

创建normalize.py文件,进行数据归一化

import numpy as np


def Normalize(data):
    row = data.shape[0]
    col = data.shape[1]
    mx = np.ones((1, col)) * (-1) * 10e6
    mn = np.ones((1, col)) * 10e6

    data_n = np.zeros((row, col))

    for j in range(col):
        for i in range(row):
            if data[i, j] > mx[0, j]:
                mx[0, j] = data[i, j]
            if data[i ,j] < mn[0, j]:
                mn[0, j] = data[i, j]

    for j in range(col):
        for i in range(row):
            data_n[i, j] = (data[i, j] - mn[0, j]) / (mx[0, j] - mn[0, j])

    criterion = np.vstack((mx, mn))

    return data_n, criterion

def NormalizeWithCriterion(data, criterion):
    row = data.shape[0]
    col = data.shape[1]
    mx = criterion[0, :]
    mn = criterion[1, :]

    data_n = np.zeros((row, col))
    for j in range(col):
        for i in range(row):
            data_n[i, j] = (data[i, j] - mn[j]) / (mx[j] - mn[j])

    return data_n

def InverseNormalize(data, criterion):
    row = data.shape[0]
    col = data.shape[1]
    mx = criterion[0, :]
    mn = criterion[1, :]
    data_r = np.zeros((row, col))
    for j in range(col):
        for i in range(row):
            data_r[i, j] = (mx[j] - mn[j]) * data[i, j] + mn[j]

    return data_r


if __name__ == "__main__":
    #测试
    test = np.array([[1, 2.1, 5, 9.5],[2, -3.4, -5, 6.5],[3, 4, 5, -8],[2, -3, 6, 0]])
    t, c = Normalize(test)
    tt = InverseNormalize(t, c)

    print(test)
    print(t)
    print(tt)

创建main_program.py 训练并储存神经网络

import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
from wavelet import Wavelet
from normalize import *
import pickle


df = pd.read_excel(r'E:\新建文件夹\0906.xls')
df = df.drop(["序号"], axis=1)
#提取需要哪几列数据,数据维数
allParameters = df.loc[:,["数据1", "数据2", "数据3", "数据4", "数据5", "数据6"]]
#DataFrame转numpy的array数组
dataAll = allParameters.values
# 协方差矩阵
corr = allParameters.corr()
# 提取可用数据行
data = np.vstack((dataAll[288:822, :], dataAll[1547:2185, :], dataAll[2880:3396, :], dataAll[4100:4487, :]))
# 滤波
dataFiltered = np.zeros((data.shape[0]+1, data.shape[1]))
for i in range(data.shape[1]):
    dataFiltered[:, i] = Wavelet(data[:, i])

#归一化
dataFiltered_n, criterion = Normalize(dataFiltered)
# 将归一化的基准 以二进制写模式打开目标文件
f = open("normalizeCriterion.txt", 'wb')
# 将变量存储到目标文件中区
pickle.dump(criterion, f)
# 关闭文件
f.close()
rho_t = dataFiltered_n[:, 0]
rho_t = rho_t[..., np.newaxis]
inptData = dataFiltered_n[:, 1:]
n_features = inptData.shape[1]    #输入数据的维数
nodeQuantityL1 = 30    #隐藏层节点数
nodeQuantityL2 = 1    #输出层节点数,与输出数据的维数一致
lr = 0.01    #learning rate
ep = 50000    #误差反向传播训练次数

sess = tf.Session()
inpt = tf.placeholder(tf.float64, [None, n_features], name="input")
outpt = tf.placeholder(tf.float64, [None, 1], name="desiredOutput")
with tf.variable_scope('network'):
    w_initializer = tf.random_normal_initializer(0, 0.3, dtype='float64')
    b_initializer = tf.constant_initializer(0.1, dtype='float64')

    with tf.variable_scope('layer1'):
        w1 = tf.get_variable('w1', [n_features, nodeQuantityL1], initializer=w_initializer, dtype='float64')
        b1 = tf.get_variable('b1', [1, nodeQuantityL1], initializer=b_initializer, dtype='float64')
        l1 = tf.nn.sigmoid(tf.matmul(inpt, w1) + b1)

    with tf.variable_scope('layer_output'):
        w2 = tf.get_variable('w2', [nodeQuantityL1, nodeQuantityL2], initializer=w_initializer, dtype='float64')
        b2 = tf.get_variable('b2', [1, nodeQuantityL2], initializer=b_initializer, dtype='float64')
        output = tf.nn.sigmoid(tf.matmul(l1, w2) + b2, name="predictOutput")

    with tf.variable_scope('loss'):
        loss = tf.reduce_mean(tf.squared_difference(output, outpt))

    with tf.variable_scope('train'):
        train_optimizer = tf.train.AdamOptimizer(lr).minimize(loss)

sess.run(tf.global_variables_initializer())
for i in range(ep):
    sess.run(train_optimizer, feed_dict={inpt:inptData, outpt:rho_t})
    if i % 100 == 0:
        print(i, "/", ep, sess.run(loss, feed_dict={inpt:inptData, outpt:rho_t}))

#存储模型
saver = tf.train.Saver()
saver.save(sess, "Model/model.ckpt")
#输出模型预测值,进行反归一化
rho_p = InverseNormalize(sess.run(output, feed_dict={inpt:inptData}), criterion)

#真实值与预测值对比
plt.plot(dataFiltered[:, 0])
plt.plot(rho_p)

plt.show()


print("Press any key to continue......")

创建restore_sess.py 调用刚才训练好的模型

import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
from wavelet import Wavelet
from normalize import *
import pickle


# 加载数据归一化的基准
f = open("normalizeCriterion.txt", 'rb')
criterion = pickle.load(f)

# 加载神经网络模型
sess=tf.Session()
saver = tf.train.import_meta_graph('Model/model.ckpt.meta')
saver.restore(sess,tf.train.latest_checkpoint('./Model'))
#修改模型输入输出的name,其中input是训练好的模型中的inpt = tf.placeholder(tf.float64, [None, n_features], name="input")的name。network/layer_output/predictOutput是output = tf.nn.sigmoid(tf.matmul(l1, w2) + b2, name="predictOutput")的name,后面的 :0 是必须加上的
new_x = tf.get_default_graph().get_tensor_by_name("input:0")
new_y_ = tf.get_default_graph().get_tensor_by_name("network/layer_output/predictOutput:0")

Y = []
inptData = np.ones((1, 6))
inptData[0, 1] = 15
inptData[0, 2] = -10
inptData[0, 3] = 10
inptData[0, 4] = 1.5e4
inptData[0, 5] = 0
for i in range(101):
    inptData[0, 5] = 0.01 * i
    inptData_n = NormalizeWithCriterion(inptData, criterion)

    y_1 = InverseNormalize(sess.run(new_y_, feed_dict={new_x: inptData_n[:, 1:]}), criterion)

    print(i, " ", y_1)
    Y.append(y_1[0])


plt.plot(Y)

plt.show()

print("Press any key to continue......")

查看网络权重

import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import os


checkpoint_path = os.path.join('./Model', "model.ckpt")
reader = tf.train.NewCheckpointReader(checkpoint_path)
all_variables = reader.get_variable_to_shape_map()
w1 = reader.get_tensor("network/layer1/w1")
b1 = reader.get_tensor("network/layer1/b1")
w2 = reader.get_tensor("network/layer_output/w2")
b2 = reader.get_tensor("network/layer_output/b2")

 

你可能感兴趣的:(tensorflow,python,机器学习,神经网络)