4,499
社区成员




keys_to_features = {
'image/encoded': tf.FixedLenFeature([], default_value='', dtype=tf.string, ),
'image/format': tf.FixedLenFeature([], default_value='jpeg', dtype=tf.string),
'image/class/label': tf.FixedLenFeature([], tf.int64, default_value=0),
'image/height': tf.FixedLenFeature([], tf.int64, default_value=0),
'image/width': tf.FixedLenFeature([], tf.int64, default_value=0)
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(image_key='image/encoded', format_key='image/format', channels=3),
'label': slim.tfexample_decoder.Tensor('image/class/label', shape=[]),
'height': slim.tfexample_decoder.Tensor('image/height', shape=[]),
'width': slim.tfexample_decoder.Tensor('image/width', shape=[])
}
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)
labels_to_names = None
items_to_descriptions = {
'image': 'An image with shape image_shape.',
'label': 'A single integer between 0 and 9.',
'height': 'float number',
'width': 'float number'}
dataset = slim.dataset.Dataset(
data_sources=tfrecord_path,
reader=tf.TFRecordReader,
decoder=decoder,
num_samples=1000,
items_to_descriptions=None,
num_classes=num_classes,
)
provider = slim.dataset_data_provider.DatasetDataProvider(dataset=dataset,
num_readers=4,
shuffle=False, # 这个改成False以后每次生成的batch都一样了
common_queue_capacity=256,
common_queue_min=128,
seed=None)
[image, label, height, width] = provider.get(['image', 'label', 'height', 'width'])
resized_image = tf.squeeze(tf.image.resize_bilinear([image], size=[resize_height, resize_width]))
images, labels = tf.train.batch([resized_image, label], batch_size=bsize, allow_smaller_final_batch=True, num_threads=1, capacity=5*bsize)