# 卷积神经网络,从cifar100的文件中获取数据 import tensorflow as tf import numpy as np import pickle # 1.获取数据集 fo = open(r'test', 'rb') dict = pickle.load(fo, encoding='bytes') #用pickle加载数据 fo.close() #2.数据处理 imgArr = dict[b'data'].reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1) / 255 #提取data=X,并reshape(?,32,32,3),缩放 total = imgArr.shape[0] #总样本数total Y_one_hot = np.eye(100)[dict[b'fine_labels']] #fine_labels=y, np.eye(100):独热编码 # 训练集的比例 train_test = 0.9 # 自己实现next_batch函数,每次返回一批数据 def next_batch(size): global g_b xb = imgArr[g_b:g_b+size] yb = Y_one_hot[g_b:g_b+size] g_b = g_b + size return xb,yb # 参数 learning_rate = 0.001 # 学习率 training_epochs = 1 # 训练总周期 batch_size = 100 # 训练每批样本数 #定义占位符 X = tf.placeholder(tf.float32, [None, 32, 32, 3]) Y = tf.placeholder(tf.float32, [None, 100]) # 独热编码 # 第1层卷积,输入图片数据(?, 32, 32, 3) W1 = tf.Variable(tf.random_normal([3, 3, 3, 32])) #卷积核3x3,输入通道3,输出通道32 L1 = tf.nn.conv2d(X, W1, strides=[1, 1, 1, 1], padding='SAME') #卷积输出 (?, 32, 32, 32) L1 = tf.nn.relu(L1) L1 = tf.nn.max_pool(L1, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME') #池化输出 (?, 16, 16, 32) # 第2层卷积,输入图片数据(?, 16, 16, 32) W2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01)) #卷积核3x3,输入通道32,输出通道64 L2 = tf.nn.conv2d(L1, W2, strides=[1, 1, 1, 1], padding='SAME') #卷积输出 (?, 16, 16, 64) L2 = tf.nn.relu(L2) L2 = tf.nn.max_pool(L2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #池化输出 (?, 8, 8, 64) #全连接展平 dim = L2.get_shape()[1].value * L2.get_shape()[2].value * L2.get_shape()[3].value L2_flat = tf.reshape(L2,[-1, dim]) # 全连接 W3 = tf.get_variable("W3", shape=[dim, 100]) b = tf.Variable(tf.random_normal([100])) logits = tf.matmul(L2_flat, W3) + b #代价或损失函数, 优化器 cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # 优化器 # 测试模型检查准确率 correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # 创建会话 sess = tf.Session() sess.run(tf.global_variables_initializer()) #全局变量初始化 # 迭代训练 print('开始学习...') for epoch in range(training_epochs): total_batch = int(total * train_test / batch_size) # 计算总批次 g_b = 0 for i in range(total_batch): batch_xs, batch_ys = next_batch(batch_size) _ = sess.run([optimizer], feed_dict={X: batch_xs, Y: batch_ys}) print('学习完成') # 测试模型检查准确率 print('Accuracy:', sess.run(accuracy, feed_dict={X: imgArr[int(total * train_test):], Y: Y_one_hot[int(total * train_test):]}))