python + tensorflow 神经网络多分类

新电脑的所有项目资料都没了,现在想搞一个简单的CNN多分类问题都还得重新写好麻烦,简单记录一下实现步骤,用于今后备用。

 

1、以服装分类为例,在百度图片上搜了各类衣服的图片,裁成方块,分成5类,每一类都放到一个文件夹,为了方便取名0-4

python + tensorflow 神经网络多分类_第1张图片

2、裁剪成固定尺寸并且镜面旋转后阈值化操作(这里采用的是自适应阈值,也可不使用阈值化,灰度图亦可)重新保存。

(原图路径D:/photo,保存路径D:/photos)

import cv2
import os


def Threshold(img):
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    GrayImage = cv2.medianBlur(img, 5)       # 中值滤波
    th = cv2.adaptiveThreshold(GrayImage,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
                        cv2.THRESH_BINARY_INV,3,5)   # 阈值化
    return th


def process(path, save_path):
    x = -1
    for root, dirs, files in os.walk(path):
        savepath = save_path + '/' + str(x)
        x = x + 1
        for index, file in enumerate(files):
            img_path = root + '/' + file
            img = cv2.imread(img_path, 1)
            img_L = cv2.resize(img, (64, 64))
            y = 100 + index
            img_R = cv2.flip(img_L, 1, dst=None)
            th1 = Threshold(img_L)

            th2 = Threshold(img_R)

            img_saving_path1 = savepath + '/' + str(index) + '.jpg'
            img_saving_path2 = savepath + '/' + str(y) + '.jpg'
            cv2.imwrite(img_saving_path1, th1)
            cv2.imwrite(img_saving_path2, th2)

if __name__ == '__main__':
    path = 'D:/photo'
    save = 'D:/photos'
    process(path, save)
    print('Finished validation!')

附上结果:

python + tensorflow 神经网络多分类_第2张图片python + tensorflow 神经网络多分类_第3张图片

3、写一个get_batch函数,用于训练时获取数据,新建一个Date.py文件用来存储

(按照4:1的比例分割训练集和测试集)

import numpy as np
import os
from PIL import Image


class Conversion:

    def __init__(self, batch_size, data_path, ratio=0.8):
        """
        :param batch_size:
        :param data_path:
        """
        self.batch_size = batch_size
        self.data_path = data_path
        self.labels = []
        self.image_names = []
        self.count1 = 0
        self.count2 = 0

        self.dir_names = os.listdir(self.data_path)
        for name in self.dir_names:
            image_path = os.path.join(self.data_path, name)
            image_names = os.listdir(image_path)
            for image1 in image_names:
                self.labels.append(name)
                path = os.path.join(image_path, image1)
                self.image_names.append(path)

        self.shuffle_set()
        s = np.int(len(self.labels) * ratio)
        self.x_train = self.image_names[:s]
        self.y_train = self.labels[:s]

        self.x_test = self.image_names[s:]
        self.y_test = self.labels[s:]

        print("Data Loading finished!")

    def shuffle_set(self):
        np.random.seed(12)
        np.random.shuffle(self.image_names)
        np.random.seed(12)
        np.random.shuffle(self.labels)

    def get_batch_data(self):

        if self.count1 + self.batch_size >= len(self.x_train):
            self.count1 = 0
        if self.count2 + self.batch_size >= len(self.x_test):
            self.count2 = 0

        train_name_batch = self.x_train[self.count1: (self.count1 + self.batch_size)]
        train_labels = self.y_train[self.count1: (self.count1 + self.batch_size)]

        test_name_batch = self.x_test[self.count2: (self.count2 + self.batch_size)]
        test_labels = self.y_test[self.count2: (self.count2 + self.batch_size)]

        images_train = []
        images_test = []

        for images_path in train_name_batch:
            image = Image.open(images_path).convert('L')
            image = np.array(image) / 255.0
            images_train.append(image)
        datas_train = np.array(images_train)
        datas_train = datas_train.reshape((-1, 64, 64, 1))
        train_labels = np.array(train_labels)

        for images_path in test_name_batch:
            image = Image.open(images_path).convert('L')
            image = np.array(image) / 255.0
            images_test.append(image)
        datas_test = np.array(images_test)
        datas_test = datas_test.reshape((-1, 64, 64, 1))
        test_labels = np.array(test_labels)

        self.count1 = self.count1 + self.batch_size
        self.count2 = self.count2 + self.batch_size

        return datas_train, train_labels, datas_test, test_labels

4、写一个神经网络模型,模型都是在网上搜到的代码,在稍加修改。以下的模型是LeNet5,与train和test函数一起封装为对象。文件名称:Model.py

import tensorflow as tf
import numpy as np


class LeNet5:
    def __init__(self, num_channels, label_num):
        self.num_channels = num_channels
        self.label_num = label_num
        self.conv1_size = 5
        self.conv1_deep = 32
        self.conv2_size = 5
        self.conv2_deep = 64
        self.fc_size = 512
        self.loss_data = []
        self.acctr = []
        self.accte = []

    def inference(self, input, train, regularizer=None):
        with tf.variable_scope('layer1-conv1'):
            conv1_w = tf.get_variable('w', [self.conv1_size, self.conv1_size, self.num_channels, self.conv1_deep],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
            conv1_b = tf.get_variable('b', [self.conv1_deep], initializer=tf.constant_initializer(0.0))
            self.conv1 = tf.nn.conv2d(input, conv1_w, strides=[1,1,1,1], padding='SAME')
            self.relu1 = tf.nn.relu(tf.nn.bias_add(self.conv1, conv1_b))
        with tf.name_scope('layer2-pool1'):
            self.pool1 = tf.nn.max_pool(self.relu1, ksize=[1, 2, 2, 1],strides=[1,2,2,1],padding='SAME')
        with tf.variable_scope('layer3-conv2'):
            conv2_w = tf.get_variable('w', [self.conv2_size, self.conv2_size, self.conv1_deep, self.conv2_deep],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
            conv2_b = tf.get_variable('b', [self.conv2_deep], initializer=tf.constant_initializer(0.0))
            self.conv2 = tf.nn.conv2d(self.pool1, conv2_w, strides=[1,1,1,1], padding='SAME')

            self.relu2 = tf.nn.relu(tf.nn.bias_add(self.conv2, conv2_b))
        #池化层 7*7*64
        with tf.name_scope('layer4-pool2'):
            self.pool2 = tf.nn.max_pool(self.relu2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
        pool_shape = self.pool2.get_shape()
        nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
        reshaped = tf.reshape(self.pool2, [-1, int(nodes)])

        with tf.variable_scope('layer5-fc1'):
            fc1_w = tf.get_variable('w', [nodes, self.fc_size],
                                    initializer=tf.truncated_normal_initializer(stddev=0.1))
            fc1_b = tf.get_variable('b', [self.fc_size], initializer=tf.constant_initializer(0.1))
            if regularizer != None:
                tf.add_to_collection('losses', regularizer(fc1_w))
            self.fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_w)+fc1_b)
            if train == True:
                self.fc1 = tf.nn.dropout(self.fc1, 0.5)
        with tf.variable_scope('layer6-fc2'):
            fc2_w = tf.get_variable('w', [self.fc_size, self.label_num], initializer=tf.truncated_normal_initializer(stddev=0.1))
            fc2_b = tf.get_variable('b', [self.label_num], initializer=tf.constant_initializer(0.1))
            if regularizer != None:
                tf.add_to_collection('losses', regularizer(fc2_w))
            self.logis = tf.matmul(self.fc1, fc2_w)+fc2_b
        return self.logis


    def train(self, data, steps, learning_rate, save_dir, regular_rate=0.0001, moving_deacy=0.99):
        self.learing_rate = learning_rate
        self.save_dir = save_dir
        x = tf.placeholder(tf.float32, [None, 64, 64, 1], name='x-input')
        y = tf.placeholder(tf.int64, [None, 5], name='y-input')

        regularizer = tf.contrib.layers.l2_regularizer(regular_rate)
        y_ = self.inference(x, True, regularizer)
        global_step = tf.Variable(0, trainable=False)
        variable_averages = tf.train.ExponentialMovingAverage(moving_deacy, global_step)
        variable_averages_op = variable_averages.apply(tf.trainable_variables())
        cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_, labels=y))
        loss = cross_entropy + tf.add_n(tf.get_collection('losses'))
        train_step = tf.train.GradientDescentOptimizer(learning_rate=self.learing_rate).minimize(loss,
                                                                                                 global_step=global_step)
        correct_prediction = tf.equal(tf.arg_max(y_, 1), tf.arg_max(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        with tf.control_dependencies([train_step, variable_averages_op]):
            train_op = tf.no_op(name='train')
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            for i in range(steps):
                x_train, y_train, x_test, y_test = data.get_batch_data()
                train_y = sess.run(tf.one_hot(y_train, self.label_num))
                test_y = sess.run(tf.one_hot(y_test, self.label_num))

                _, loss_value, step, acc = sess.run([train_op, loss, global_step, accuracy],
                                                    feed_dict={x: x_train, y: train_y})
                acc_test = sess.run(accuracy, feed_dict={x: x_test, y: test_y})
                if i == 0:
                    print('%d轮的损失率为%g, 训练集的正确率为%.2f%%, 测试集的正确率为%.2f%%' % (
                    i, loss_value, acc * 100.0, acc_test * 100.0))
                    self.loss_data.append(loss_value)
                    self.acctr.append(acc)
                    self.accte.append(acc_test)
                if (i + 1) % 10 == 0:
                    print('%d轮之后的损失率为%g, 训练集的正确率为%.2f%%, 测试集的正确率为%.2f%%' % (
                    i + 1, loss_value, acc * 100.0, acc_test * 100.0))
                    self.loss_data.append(loss_value)
                    self.acctr.append(acc)
                    self.accte.append(acc_test)

            saver.save(sess, self.save_dir)


    def test(self, image, model_path):
        x = tf.placeholder(tf.float32, [None, 64, 64, 1], name='x-input')
        y_ = self.inference(x, False)
        image = np.array(image) / 255.0
        image = np.reshape(image, (-1, 64, 64, 1))
        logit = tf.arg_max(y_, 1)
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            saver.restore(sess, model_path)
            y, label = sess.run((y_, logit), feed_dict={x:image})
        return label

5、一个新的文件中调用,实现训练和测试,命名为main.py

import Process
import Model
import Data
import cv2


def train(model, batch_size, path, steps, learning_rate, save_dir):
    data = Data.Conversion(batch_size, path)
    model.train(data=data, steps=steps, learning_rate=learning_rate, save_dir=save_dir)


def train_save():
    logs_train_dir = "D:/model/model/model.ckpt"
    model = Model.LeNet5(1, 5)
    train(model, batch_size=20, path='D:/photos/', steps=500, learning_rate=0.05, save_dir=logs_train_dir)


def restore_test():
    image_path = "D:/image/test/0.jpg"
    image_data = cv2.imread(image_path, 1)
    img_size = cv2.resize(image_data, (64, 64))
    image = Process.Threshold(img_size)

    model = Model.LeNet5(1, 5)
    path = "D:/model/model/model.ckpt"
    sort = model.test(image, path)
    print("识别结果为:" + str(sort))


if __name__ == "__main__":
    #train_save()
    restore_test()

6、先运行train_save函数,再运行restore_test函数,模型保存位置为"D:/model/model/model.ckpt"

你可能感兴趣的:(深度学习,神经网络,深度学习,python,tensorflow)