TensorFlow的tfrecords文件与queue队列组合读取数据方法

TensorFlow的tfrecords文件与queue队列组合读取数据方法

自己制作了数据集,然而读取老是莫名其妙的读不出数据,参考了以下的文章:
https://blog.csdn.net/julialove102123/article/details/80085871

我的测试文件:

import os
import tensorflow as tf
from PIL import Image


def read_tfRecord(file_tfRecord):  # 输入是.tfrecords文件地址
    queue = tf.train.string_input_producer([file_tfRecord])
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(queue)
    features = tf.parse_single_example(
        serialized_example,
        features={
            'img_raw': tf.FixedLenFeature([], tf.string),
            'label': tf.FixedLenFeature([], tf.int64)
        }
    )
    image = tf.decode_raw(features['img_raw'], tf.uint8)
    image = tf.reshape(image, [50, 50, 1])
    image = tf.cast(image, tf.float32)
    image = tf.image.per_image_standardization(image)
    label = tf.cast(features['label'], tf.int64)  # 这里设置了读取信息的格式
    return image, label


traindata, trainlabel = read_tfRecord("./tmp_train.tfrecords")
image_batch, label_batch = tf.train.shuffle_batch([traindata, trainlabel],
                        batch_size=100, capacity=2000, min_after_dequeue=1000)

with tf.Session() as sess:
    sess.run(tf.local_variables_initializer())
    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    train_steps = 10

    try:
        while not coord.should_stop():  # 如果线程应该停止则返回True
            example, label = sess.run([image_batch, label_batch])
            print(example.shape, label)

            train_steps -= 1
            print(train_steps)
            if train_steps <= 0:
                coord.request_stop()  # 请求该线程停止

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        # When done, ask the threads to stop. 请求该线程停止
        coord.request_stop()
        # And wait for them to actually do it. 等待被指定的线程终止
        coord.join(threads)

你可能感兴趣的:(人工智能与机器学习)