语义分割可以识别并理解图像中每一个像素的内容:其语义区域的标注和预测是像素级的
#功能类函数:
def read_voc_images(voc_dir,is_train=True):
"""读取所有voc图像并标注"""
def voc_colormap2label():
"""构建从RGB到VOC类别索引的映射 rgb->class一一对应"""
def voc_label_indices(colormap, colormap2label):
"""将VOC标签中的RGB值映射到它们的类别索引 rgb色块图->class索引图"""
def voc_rand_crop(feature,label,height,width):
"""固定尺寸随机裁剪特征和标签图像"""
class VOCSegDataset(torch.utils.data.Dataset):
"""一个用于加载VOC数据集的自定义数据集"""
# 将上述函数与类组合为一个函数
def load_data_voc(batch_size, crop_size):
功能:读取训练集/验证集所有voc图像并标注
参数:voc_dir,is_train
返回值:features,labels
import os
import torch
import torchvision
from d2l import torch as d2l
voc_dir =os.path.join('VOCdevkit','VOC2012')
def read_voc_images(voc_dir,is_train=True):
"""读取所有voc图像并标注"""
#文件路径
txt_fname=os.path.join(voc_dir,'ImageSets','Segmentation','train.txt'
if is_train else 'val.txt')
#rgb格式
mode=torchvision.io.image.ImageReadMode.RGB
#读取所有图像的名称
with open(txt_fname,'r') as f:
images=f.read().split()
features,labels=[],[]
#将图像与标签一一对应存取
for i,fname in enumerate(images):
features.append(
torchvision.io.read_image(
os.path.join(voc_dir,'JPEGImages',f'{fname}.jpg')
)
)
labels.append(
torchvision.io.read_image(
os.path.join(voc_dir,'SegmentationClass',f'{fname}.png'),mode
)
)
return features,labels
测试一下:
#获取训练图片与标签
train_features,train_labels=read_voc_images(voc_dir,True)
#绘制前五个输入图像及其标签
n=5
imgs=train_features[0:n]+train_labels[0:n]
imgs=[img.permute(1,2,0) for img in imgs]
d2l.show_images(imgs,2,n)
功能:构建从RGB到VOC类别索引的映射 rgb->class一一对应
返回值:256x256x256的张量
参数:两个全局变量如下
#列举RGB颜色值和类名
#@save
VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]]
#@save
VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
#@save
def voc_colormap2label():
"""构建从RGB到VOC类别索引的映射 rgb->class一一对应"""
colormap2label=torch.zeros(256**3,dtype=torch.long)
for i,colormap in enumerate(VOC_COLORMAP):
#哈希
colormap2label[(colormap[0]*256+colormap[1])*256+colormap[2]]=i
return colormap2label
功能:将VOC标签中的RGB值映射到它们的类别索引 rgb色块图->class索引图
参数:label, colormap2label(标签图RBG色块图,映射map)
返回值:colormap2label[idx](标签RBG图对应的class索引图)
#@save
def voc_label_indices(label, colormap2label):
"""将VOC标签中的RGB值映射到它们的类别索引 rgb色块图->class索引图"""
labelcolormap = label.permute(1, 2, 0).numpy().astype('int32')
idx = ((labelcolormap[:, :, 0] * 256 + labelcolormap[:, :, 1]) * 256
+ labelcolormap[:, :, 2])
return colormap2label[idx]
测试一下:
#第一张图片中飞机头部区域的类别索引为1,而背景索引为0。
y = voc_label_indices(train_labels[0], voc_colormap2label())
print(y[105:115, 130:140], VOC_CLASSES[1])
(array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 0., 0., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1.]]),
'aeroplane')
功能:固定尺寸随机裁剪特征和标签图像
参数:feature,label,height,width
返回值:feature,label
def voc_rand_crop(feature,label,height,width):
"""固定尺寸随机裁剪特征和标签图像"""
rect=torchvision.transforms.RandomCrop.get_params(
feature,(height,width)
)
feature=torchvision.transforms.functional.crop(feature,*rect)
label=torchvision.transforms.functional.crop(label,*rect)
return feature,label
检查第一张图的随机裁剪情况
#检查第一张图的随机裁剪情况
imgs=[]
for _ in range(n):
imgs += voc_rand_crop(train_features[0],train_labels[0],200,300)
imgs=[img.permute(1,2,0) for img in imgs]
d2l.show_images(imgs[::2]+imgs[1::2],2,n)
功能:一个用于加赞VOC数据集的自定义数据集
初始化:定义标准化格式,调用read_voc_images读取所有VOC图像并标注,然后对图像和标签做标准化,移除不符合尺寸的图像,最后调用voc_colormap2label()生成映射。
注意:使用迭代器的前提是getitem可用,即任意访问数据集中索引为idx的输入图像及其每个像素的类别索引
#@save
#继承高级API提供的Dataset类
#图片分割不好用resize,因为对label进行resize 会有歧义。但可以使用crop
class VOCSegDataset(torch.utils.data.Dataset):
"""一个用于加载VOC数据集的自定义数据集"""
def __init__(self, is_train, crop_size, voc_dir):
#定义标准化
self.transform = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
#统一尺寸
self.crop_size = crop_size
#读取所有voc图像并标注
features, labels = read_voc_images(voc_dir, is_train=is_train)
#对图像和标签做标准化,移除不符合尺寸的图像
self.features = [self.normalize_image(feature) for feature in self.filter(features)]
self.labels = self.filter(labels)
#rgb->class
self.colormap2label = voc_colormap2label()
print('read ' + str(len(self.features)) + ' examples')
#标准化
def normalize_image(self, img):
return self.transform(img.float() / 255)
#移除不符合尺寸的图像
def filter(self, imgs):
return [img for img in imgs if (
img.shape[1] >= self.crop_size[0] and
img.shape[2] >= self.crop_size[1])]
#任意访问数据集中索引为idx的输入图像及其每个像素的类别索引
def __getitem__(self, idx):
feature, label = voc_rand_crop(self.features[idx], self.labels[idx],
*self.crop_size)
return (feature, voc_label_indices(label, self.colormap2label))
def __len__(self):
return len(self.features)
功能:将以上功能整合成一个函数
参数:batchsize,crop_size
返回值:迭代器train_iter, test_iter
# 组合为一个函数
def load_data_voc(batch_size, crop_size):
"""Load the VOC semantic segmentation dataset."""
# num_workers = d2l.get_dataloader_workers()
train_iter = torch.utils.data.DataLoader(
VOCSegDataset(True, crop_size, voc_dir), batch_size, shuffle=True,drop_last=True)
#, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(
VOCSegDataset(False, crop_size, voc_dir), batch_size, drop_last=True)
#, num_workers=num_workers)
return train_iter, test_iter
测试:
设置统一裁剪大小,载入训练和测试迭代器
crop_size = (320, 480)
train_iter,test_iter=load_data_voc(64,crop_size)
for X, Y in train_iter:
print(X.shape)
print(Y.shape)
break
read 1114 examples
read 1078 examples
torch.Size([64, 3, 320, 480])
torch.Size([64, 320, 480])
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import os
import torch
import torchvision
from d2l import torch as d2l
voc_dir =os.path.join('VOCdevkit','VOC2012')
def read_voc_images(voc_dir,is_train=True):
"""读取所有voc图像并标注"""
#文件路径
txt_fname=os.path.join(voc_dir,'ImageSets','Segmentation','train.txt'
if is_train else 'val.txt')
#rgb格式
mode=torchvision.io.image.ImageReadMode.RGB
#读取所有图像的名称
with open(txt_fname,'r') as f:
images=f.read().split()
features,labels=[],[]
#将图像与标签一一对应存取
for i,fname in enumerate(images):
features.append(
torchvision.io.read_image(
os.path.join(voc_dir,'JPEGImages',f'{fname}.jpg')
)
)
labels.append(
torchvision.io.read_image(
os.path.join(voc_dir,'SegmentationClass',f'{fname}.png'),mode
)
)
return features,labels
# #获取训练图片与标签
# train_features,train_labels=read_voc_images(voc_dir,True)
#
# #绘制前五个输入图像及其标签
# n=5
# imgs=train_features[0:n]+train_labels[0:n]
# imgs=[img.permute(1,2,0) for img in imgs]
# d2l.show_images(imgs,2,n)
#列举RGB颜色值和类名
#@save
VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]]
#@save
VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
#@save
def voc_colormap2label():
"""构建从RGB到VOC类别索引的映射 rgb->class一一对应"""
colormap2label=torch.zeros(256**3,dtype=torch.long)
for i,colormap in enumerate(VOC_COLORMAP):
#哈希
colormap2label[(colormap[0]*256+colormap[1])*256+colormap[2]]=i
return colormap2label
#@save
def voc_label_indices(colormap, colormap2label):
"""将VOC标签中的RGB值映射到它们的类别索引 rgb色块图->class索引图"""
colormap = colormap.permute(1, 2, 0).numpy().astype('int32')
idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256
+ colormap[:, :, 2])
return colormap2label[idx]
# #第一张图片中飞机头部区域的类别索引为1,而背景索引为0。
# y = voc_label_indices(train_labels[0], voc_colormap2label())
# print(y[105:115, 130:140], VOC_CLASSES[1])
def voc_rand_crop(feature,label,height,width):
"""固定尺寸随机裁剪特征和标签图像"""
rect=torchvision.transforms.RandomCrop.get_params(
feature,(height,width)
)
feature=torchvision.transforms.functional.crop(feature,*rect)
label=torchvision.transforms.functional.crop(label,*rect)
return feature,label
# #检查第一张图的随机裁剪情况
# imgs=[]
# for _ in range(n):
# imgs += voc_rand_crop(train_features[0],train_labels[0],200,300)
# imgs=[img.permute(1,2,0) for img in imgs]
# d2l.show_images(imgs[::2]+imgs[1::2],2,n)
#@save
#继承高级API提供的Dataset类
#图片分割不好用resize,因为对label进行resize 会有歧义。但可以使用crop
class VOCSegDataset(torch.utils.data.Dataset):
"""一个用于加载VOC数据集的自定义数据集"""
def __init__(self, is_train, crop_size, voc_dir):
#定义标准化
self.transform = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
#统一尺寸
self.crop_size = crop_size
#读取所有voc图像并标注
features, labels = read_voc_images(voc_dir, is_train=is_train)
#对图像和标签做标准化,移除不符合尺寸的图像
self.features = [self.normalize_image(feature) for feature in self.filter(features)]
self.labels = self.filter(labels)
#rgb->class
self.colormap2label = voc_colormap2label()
print('read ' + str(len(self.features)) + ' examples')
#标准化
def normalize_image(self, img):
return self.transform(img.float() / 255)
#移除不符合尺寸的图像
def filter(self, imgs):
return [img for img in imgs if (
img.shape[1] >= self.crop_size[0] and
img.shape[2] >= self.crop_size[1])]
#任意访问数据集中索引为idx的输入图像及其每个像素的类别索引
def __getitem__(self, idx):
feature, label = voc_rand_crop(self.features[idx], self.labels[idx],
*self.crop_size)
return (feature, voc_label_indices(label, self.colormap2label))
def __len__(self):
return len(self.features)
# #读取数据集
# crop_size = (320, 480)
# voc_train = VOCSegDataset(True, crop_size, voc_dir)
# voc_test = VOCSegDataset(False, crop_size, voc_dir)
# #设批量大小为64,我们定义训练集的迭代器
# batch_size = 64
# train_iter = torch.utils.data.DataLoader(voc_train, batch_size, shuffle=True,
# drop_last=True)
# #num_workers=d2l.get_dataloader_workers())
# #标签是一个三维数组
# for X, Y in train_iter:
# print(X.shape)
# print(Y.shape)
# break
# 组合为一个函数
def load_data_voc(batch_size, crop_size):
"""Load the VOC semantic segmentation dataset."""
# num_workers = d2l.get_dataloader_workers()
train_iter = torch.utils.data.DataLoader(
VOCSegDataset(True, crop_size, voc_dir), batch_size, shuffle=True,drop_last=True)
#, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(
VOCSegDataset(False, crop_size, voc_dir), batch_size, drop_last=True)
#, num_workers=num_workers)
return train_iter, test_iter
crop_size = (320, 480)
train_iter,test_iter=load_data_voc(64,crop_size)
for X, Y in train_iter:
print(X.shape)
print(Y.shape)
break