train函数中在for循环中进行enumerate(dataloader)时报错
Traceback (most recent call last):
File "G:/行人重识别学习/try/train_class.py", line 245, in <module>
main()
File "G:/行人重识别学习/try/train_class.py", line 228, in main
train(epoch, model, criterion_class, optimizer, trainloader, use_gpu)
File "G:/行人重识别学习/try/train_class.py", line 232, in train
for batch_idx, (imgs, pids, _) in enumerate(trainloader):
File "G:\anaconda1\lib\site-packages\torch\utils\data\dataloader.py", line 345, in __next__
data = self._next_data()
File "G:\anaconda1\lib\site-packages\torch\utils\data\dataloader.py", line 384, in _next_data
index = self._next_index() # may raise StopIteration
File "G:\anaconda1\lib\site-packages\torch\utils\data\dataloader.py", line 339, in _next_index
return next(self._sampler_iter) # may raise StopIteration
File "G:\anaconda1\lib\site-packages\torch\utils\data\sampler.py", line 200, in __iter__
for idx in self.sampler:
File "G:\anaconda1\lib\site-packages\torch\utils\data\sampler.py", line 62, in __iter__
return iter(range(len(self.data_source)))
TypeError: 'list' object cannot be interpreted as an integer
错误原因:由于行人重识别不同于一般的深度学习,需要自己构建数据集结构,数据集类有三个关键函数:init、len和getitem。
在重写dataset过程中,出现一个低级错误,__len__函数的返回值没加len()
# 数据库类三个必备函数 init len getitem
class ImageDataset(Dataset):
def __init__(self, dataset, transform=None):
self.dataset = dataset
self.transform = transform
def __len__(self):
return self.dataset #这行出大问题 len()忘记加了
return len(self.dataset) #正确写法
def __getitem__(self, index):
img_path, pid, camid = self.dataset[index]
img = read_image(img_path)
if self.transform is not None:
img = self.transform(img)
return img, pid, camid
少了个括号,占用了别的参数的位置
transform_test = T.Compose([
# T.Resize(args.height, args.width),这行出错
T.Resize((args.height, args.width)) #
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
Traceback (most recent call last):
File "G:/行人重识别学习/try/train_class.py", line 305, in <module>
main()
File "G:/行人重识别学习/try/train_class.py", line 227, in main
test(model, queryloader, galleryloader, use_gpu)
File "G:/行人重识别学习/try/train_class.py", line 88, in test
for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
File "G:\anaconda1\lib\site-packages\torch\utils\data\dataloader.py", line 345, in __next__
data = self._next_data()
File "G:\anaconda1\lib\site-packages\torch\utils\data\dataloader.py", line 385, in _next_data
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
File "G:\anaconda1\lib\site-packages\torch\utils\data\_utils\fetch.py", line 44, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "G:\anaconda1\lib\site-packages\torch\utils\data\_utils\fetch.py", line 44, in <listcomp>
data = [self.dataset[idx] for idx in possibly_batched_index]
File "G:\行人重识别学习\try\data_loader.py", line 40, in __getitem__
img = self.transform(img)
File "G:\anaconda1\lib\site-packages\torchvision\transforms\transforms.py", line 70, in __call__
img = t(img)
File "G:\anaconda1\lib\site-packages\torchvision\transforms\transforms.py", line 207, in __call__
return F.resize(img, self.size, self.interpolation)
File "G:\anaconda1\lib\site-packages\torchvision\transforms\functional.py", line 250, in resize
return img.resize((ow, oh), interpolation)
File "G:\anaconda1\lib\site-packages\PIL\Image.py", line 1742, in resize
raise ValueError("unknown resampling filter")
ValueError: unknown resampling filter
p_local_features = local_features[p_inds] # N*H * C
# p_inds是一个floatTensor 利用.long()转化为整形
# IndexError: tensors used as indices must be long, byte or bool tensors
# 要将tensor作为索引使用,tensor必须是一个long byte或者bool tensor
p_inds, n_inds = p_inds.long(), n_inds.long()
d_loss_fake = criterion(fake_out,fake_label) #修改前
d_loss_fake = criterion(fake_out.squeeze(-1),fake_label) # 修改后
计算loss时出现的错误,预测的batch维度与真实的batch维度不同,要压缩一个,具体压缩那个看输出信息
D:\ProgramData\Anaconda3\envs\pytorch\lib\site-packages\torch\nn\modules\loss.py:443: UserWarning: Using a target size (torch.Size([10])) that is different to the input size (torch.Size([1, 10])). This will likely lead to incorrect results due to broadcasting. Please ensure they have the same size.
return F.mse_loss(input, target, reduction=self.reduction)