cnn处理数据

记录一下这段时间学习用的代码 这段代码主要结构是来自网上 我对其进行了部分修改
这是cnn部分

import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from random import sample
# 创建计算图会话
sess = tf.Session()
# 设置模型参数

batch_size = 300  # 批量训练图像张数
initial_learning_rate = 0.003  # 学习率
global_step = tf.Variable(0, trainable=False);
learning_rate = tf.train.exponential_decay(initial_learning_rate,
                                           global_step=global_step,
                                           decay_steps=30, decay_rate=0.9)

evaluation_size = 300  # 测试图像张数
image_width = 23  # 图像的宽和高
image_height = 300
target_size = 3  # 图像的目标为0~9共10个目标
num_channels = 1  # 灰度图,颜色通道为1
generations = 2000  # 迭代1200次
evaluation_step = 10  # 每训练十次进行一次测试
conv1_features = 64  # 卷积层的特征个数filters
conv2_features = 128
max_pool_size1 = 8  # 池化层大小
max_pool_size2 = 8
fully_connected_size = 128  # 全连接层的神经元个数

# 声明占位符
x_input_shape = [batch_size, image_height, image_width, num_channels]
x_input = tf.placeholder(tf.float32, shape=x_input_shape)
y_target = tf.placeholder(tf.int32, shape=[batch_size])

evaluation_input_shape = [evaluation_size, image_height, image_width, num_channels]
evaluation_input = tf.placeholder(tf.float32, shape=evaluation_input_shape)
evaluation_target = tf.placeholder(tf.int32, shape=[evaluation_size])

# 声明卷积层的权重和偏置
# 卷积层1
# 采用滤波器为4X4滤波器,输入通道为1,输出通道为25
conv1_weight = tf.Variable(tf.truncated_normal([21,23,1,conv1_features], stddev=0.1, dtype=tf.float32))
conv1_bias = tf.Variable(tf.truncated_normal([conv1_features], stddev=0.1, dtype=tf.float32))

# 卷积层2
# 采用滤波器为4X4滤波器,输入通道为25,输出通道为50
conv2_weight = tf.Variable(tf.truncated_normal([21, 1, conv1_features, conv2_features], stddev=0.1, dtype=tf.float32))
conv2_bias = tf.Variable(tf.truncated_normal([conv2_features], stddev=0.1, dtype=tf.float32))

# 声明全连接层权重和偏置
# 卷积层过后图像的宽和高
conv_output_width = 1 # //表示整除
conv_output_height = 55

# 全连接层的输入大小
full1_input_size = conv_output_width * conv_output_height * conv2_features
full1_weight = tf.Variable(tf.truncated_normal([full1_input_size, fully_connected_size], stddev=0.1, dtype=tf.float32))
full1_bias = tf.Variable(tf.truncated_normal([fully_connected_size], stddev=0.1, dtype=tf.float32))
full2_weight = tf.Variable(tf.truncated_normal([fully_connected_size, target_size], stddev=0.1, dtype=tf.float32))
full2_bias = tf.Variable(tf.truncated_normal([target_size], stddev=0.1, dtype=tf.float32))

def my_conv_net(input_data):
    # 第一层:Conv-ReLU-MaxPool
    conv1 = tf.nn.conv2d(input_data, conv1_weight, strides=[1, 1, 1, 1], padding='VALID')
    relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_bias))
    print(conv1)
    max_pool1 = tf.nn.max_pool(relu1, ksize=[1,10, 1, 1],
                               strides=[1,2 , 1, 1], padding='VALID')
    print(max_pool1)
    # 第二层:Conv-ReLU-MaxPool
    conv2 = tf.nn.conv2d(max_pool1, conv2_weight, strides=[1, 1, 1, 1], padding='VALID')
    print(conv2)
    relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_bias))
    max_pool2 = tf.nn.max_pool(relu2, ksize=[1, 8, 1, 1],
                               strides=[1, 2, 1, 1], padding='VALID')
    print(max_pool2)
    # 全连接层
    # 先将数据转化为1*N的形式
    # 获取数据大小
    conv_output_shape = max_pool2.get_shape().as_list()
    # 全连接层输入数据大小
    fully_input_size = conv_output_shape[1] * conv_output_shape[2] * conv_output_shape[3]  # 这三个shape就是图像的宽高和通道数
    full1_input_data = tf.reshape(max_pool2,
                                  [conv_output_shape[0], fully_input_size])  # 转化为batch_size*fully_input_size二维矩阵
    # 第一层全连接
    fully_connected1 = tf.nn.relu(tf.add(tf.matmul(full1_input_data, full1_weight), full1_bias))
    # 第二层全连接输出
    model_output = tf.nn.relu(
        tf.add(tf.matmul(fully_connected1, full2_weight), full2_bias))  # shape = [batch_size,target_size]
    return model_output

model_output = my_conv_net(x_input)
test_model_output = my_conv_net(evaluation_input)

# 损失函数
reg = 0.0001
# loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model_output, labels=y_target))
# loss1 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=test_model_output, labels=evaluation_target))
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model_output, labels=y_target))+tf.nn.l2_loss(conv1_weight)*reg+\
       tf.nn.l2_loss(conv2_weight)*reg+tf.nn.l2_loss(full1_weight)*reg+tf.nn.l2_loss(full2_weight)*reg
loss1 = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=test_model_output, labels=evaluation_target))+tf.nn.l2_loss(conv1_weight)*reg+\
       tf.nn.l2_loss(conv2_weight)*reg+tf.nn.l2_loss(full1_weight)*reg+tf.nn.l2_loss(full2_weight)*reg
# loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(logits=model_output, onehot_labels=y_target))

# 预测与评估
prediction = tf.nn.softmax(model_output)
test_prediction = tf.nn.softmax(test_model_output)

def get_accuracy(logits, targets):
    batch_predictions = np.argmax(logits, axis=1)  # 返回每行最大的数所在位置
    print(batch_predictions)
    print(targets)
    num_correct = np.sum(np.equal(batch_predictions, targets))
    return 100 * num_correct / batch_predictions.shape[0]


def get_ABC_num(logits, targets):
    judegeA, judgeB, judgeC = {0: 0, 1: 0, 2: 0}, {0: 0, 1: 0, 2: 0}, {0: 0, 1: 0, 2: 0}
    numA,numB,numC = 0,0,0
    batch_predictions = np.argmax(logits, axis=1)  # 返回每行最大的数所在位置
    for i in range(len(batch_predictions)):
        if targets[i] == 0:
            numA+=1
            judegeA[batch_predictions[i]]+=1
        elif targets[i] == 1:
            numB += 1
            judgeB[batch_predictions[i]]+=1
        else:
            numC += 1
            judgeC[batch_predictions[i]]+=1
    print('==========对比============')
    print(judegeA, judgeB, judgeC)

# 创建优化器
opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_step = opt.minimize(loss)

# 初始化变量
init = tf.initialize_all_variables()
sess.run(init)

def mainCnn(train_List,train_judge,test_List,test_judge):
    train_xdata = np.array([np.reshape(x, [300, 23]) for x in train_List])
    test_xdata = np.array([np.reshape(x, [300, 23]) for x in test_List])

    train_labels = np.array(train_judge)
    test_labels = np.array(test_judge)
    # 开始训练
    train_loss = []
    test_loss = []
    test_loss2 = []
    train_acc = []
    test_acc = []
    test_acc2 = []
    Learning_rate_vec = []
    for i in range(generations):
        # rand_index = sample(np.random.permutation(np.arange(len(train_xdata))).tolist(),batch_size)
        # print(rand_index)
        rand_index = np.random.choice(len(train_xdata), size=batch_size)
        rand_x = train_xdata[rand_index]
        rand_x = np.expand_dims(rand_x, 3)
        rand_y = train_labels[rand_index]
        num0, num1, num2 = 0, 0, 0
        for k in range(len(rand_y)):
            if (rand_y[k] == 0):
                num0 += 1
            if rand_y[k] == 1:
                num1 += 1
            if rand_y[k] == 2:
                num2 += 1
        print(num0, num1, num2)
        Learning_rate_vec.append(sess.run(learning_rate, feed_dict={global_step: i}))
        train_dict = {x_input: rand_x, y_target: rand_y}

        sess.run(train_step, feed_dict={x_input: rand_x, y_target: rand_y, global_step: i})
        temp_train_loss = sess.run(loss, feed_dict=train_dict)
        temp_train_prediction = sess.run(prediction, feed_dict=train_dict)
        temp_train_acc = get_accuracy(temp_train_prediction, rand_y)
        print('第',i+1,'准确率为',temp_train_acc,'学习率',Learning_rate_vec)
        # 测试集
        if ((i + 1) % evaluation_step == 0) |(i == 0):
            eval_index = np.random.choice(len(test_xdata), size=evaluation_size)
            eval_x = test_xdata[eval_index]
            eval_x = np.expand_dims(eval_x, 3)
            eval_y = test_labels[eval_index]

            test_dict = {evaluation_input: eval_x, evaluation_target: eval_y}
            temp_test_preds,temp_test_loss = sess.run([test_prediction,loss1] ,feed_dict=test_dict)
            # temp_test_preds = sess.run(test_prediction, feed_dict=test_dict)


            temp_test_acc = get_accuracy(temp_test_preds, eval_y)
            get_ABC_num(temp_test_preds, eval_y)
            print('测试',temp_test_acc)
            test_acc.append(temp_test_acc)
            test_acc2.append(temp_test_acc)
            test_loss.append(temp_test_loss)
            test_loss2.append(temp_test_loss)
        else:
            test_acc2.append(0)
            test_loss.append(0)
        train_acc.append(temp_train_acc)
        train_loss.append(temp_train_loss)

    index = np.arange(start=0, stop=generations + 1, step=evaluation_step)
    # 画损失曲线
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(train_loss, 'k-',label='Train loss')
    ax.plot(index,test_loss2,'r--',label='Test loss')
    ax.set_xlabel('Generation')
    ax.set_ylabel('Softmax Loss')
    fig.suptitle('Softmax Loss per Generation')
    # 写入数据进list
    result1 = open('trainData.xls', 'w', encoding='gbk')
    for i in range(0, len(train_acc)):
        result1.write(str(train_acc[i]))
        result1.write('\n')
    result1.close()
    result2 = open('testData.xls', 'w', encoding='gbk')
    for i in range(0, len(test_acc2)):
        result2.write(str(test_acc2[i]))
        result2.write('\n')
    result2.close()
    result3 = open('losstrainData.xls', 'w', encoding='gbk')
    for i in range(0, len(train_loss)):
        result3.write(str(train_loss[i]))
        result3.write('\n')
    result3.close()
    result4 = open('losstestData.xls', 'w', encoding='gbk')
    for i in range(0, len(test_loss)):
        result4.write(str(test_loss[i]))
        result4.write('\n')
    result4.close()


    # 画准确度曲线

    fig2 = plt.figure()
    ax2 = fig2.add_subplot(111)
    #写入数据进list
    result = open('result.xls', 'w', encoding='gbk')
    print(train_acc)
    for i in range(0,len(train_acc)):
        result.write(str(train_acc[i]))
        result.write('\n')
    result.close()
    ax2.plot(train_acc, 'k-', label='Train Set Accuracy')
    ax2.plot(index, test_acc, 'r--', label='Test Set Accuracy')
    ax2.set_xlabel('Generation')
    ax2.set_ylabel('Accuracy')
    fig2.suptitle('Train and Test Set Accuracy')
    ax2.set_ylim(bottom=0.)

    # 画学习率
    fig4 = plt.figure()
    ax4 = fig4.add_subplot(111)
    ax4.plot(Learning_rate_vec, 'k-')
    ax4.set_xlabel('step')
    ax4.set_ylabel('Learning_rate')
    fig4.suptitle('Learning_rate')

    plt.show()

这是数据预处理部分

import pandas as pd
import numpy as np
from numpy import *
import random
from newCnnTensorflow import mainCnn
import operator

def splitFrame(data):
    num = data.index.values[0]
    split1 = []
    for i in range(len(data.index.values)):
        if i != len(data.index.values) - 1:
            if 1 + data.index.values[i] != data.index.values[i + 1]:
                split1.append([num,data.index.values[i]])
                num = data.index.values[i + 1]
                # print(split1)

        else:
            split1.append([num,data.index.values[i]])
            # print(data.loc[2404:2406])

    return split1


#传入start-end的数据 将其按照一定步长转化为300行一块
def dataDeal(data,start,end):
    print(data.loc[start,'judge'])
    #判断标签
    if data.loc[start,'judge']<200:
        num = 0
    elif 200

你可能感兴趣的:(cnn处理数据)