微信公众号:小白图像与视觉
关于技术、关注
yysilence00
。有问题或建议,请公众号留言。
'''
解压数据集
'''
#!unzip -q -o data/data1917/train_new.zip
#!unzip -q -o data/data1917/test_new.zip
'\n解压数据集\n'
'''
加载相关类库
'''
import zipfile
import paddle
import paddle.fluid as fluid
import matplotlib.pyplot as plt
import matplotlib.image as mping
import json
import numpy as np
import cv2
import sys
import time
import h5py
from matplotlib import pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import scipy
from matplotlib import cm as CM
from paddle.utils.plot import Ploter
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
'''
查看train.json相关信息,重点关注annotations中的标注信息
'''
f = open('/home/aistudio/data/data1917/train.json',encoding='utf-8')
content = json.load(f) #字典的类型
#print(content)
'''
将上面的到的content中的name中的“stage1/”去掉
'''
for j in range(len(content['annotations'])):
content['annotations'][j]['name'] = content['annotations'][j]['name'].lstrip('stage1').lstrip('/')
'''
使用高斯滤波变换生成密度图
'''
def gaussian_filter_density(gt):
# 初始化密度图
density = np.zeros(gt.shape, dtype=np.float32)
# 获取gt中不为0的元素的个数
gt_count = np.count_nonzero(gt)
# 如果gt全为0,就返回全0的密度图
if gt_count == 0:
return density
pts = np.array(list(zip(np.nonzero(gt)[1].ravel(), np.nonzero(gt)[0].ravel())))
for i, pt in enumerate(pts):
pt2d = np.zeros(gt.shape, dtype=np.float32)
pt2d[pt[1],pt[0]] = 1.
if gt_count > 1:
# sigma = (distances[i][1]+distances[i][2]+distances[i][3])*0.1
sigma = 25
else:
sigma = np.average(np.array(gt.shape))/2./2.
density += scipy.ndimage.filters.gaussian_filter(pt2d, sigma, mode='constant')
return density
'''
图片操作:对图片进行resize、归一化,将方框标注变为点标注
返回:resize后的图片 和 gt
'''
def picture_opt(img,ann):
size_x,size_y = img.size
train_img_size = (640,480)
img = img.resize(train_img_size,Image.ANTIALIAS)
img = np.array(img)
img = img / 255.0
gt = []
for b_l in range(len(ann)):
# 假设人体是使用方框标注的,通过求均值的方法将框变为点
if 'w' in ann[b_l].keys():
x = (ann[b_l]['x']+(ann[b_l]['x']+ann[b_l]['w']))/2
y = ann[b_l]['y']+20
x = (x*640/size_x)/8
y = (y*480/size_y)/8
gt.append((x,y))
else:
x = ann[b_l]['x']
y = ann[b_l]['y']
x = (x*640/size_x)/8
y = (y*480/size_y)/8
gt.append((x,y))
return img,gt
'''
密度图处理
'''
def ground(img,gt):
imgs = img
x = imgs.shape[0]/8
y = imgs.shape[1]/8
k = np.zeros((int(x),int(y)))
for i in range(0,len(gt)):
if int(gt[i][1]) < int(x) and int(gt[i][0]) < int(y):
k[int(gt[i][1]),int(gt[i][0])]=1
k = gaussian_filter_density(k)
return k
'''
定义数据生成器
'''
def train_set():
def inner():
for ig_index in range(2000): #遍历所有图片
if len(content['annotations'][ig_index]['annotation']) == 2:continue
if len(content['annotations'][ig_index]['annotation']) == 3:continue
if content['annotations'][ig_index]['ignore_region']: #把忽略区域都用像素为0填上
ig_list = [] #存放忽略区1的数据
ig_list1 = [] #存放忽略区2的数据
# print(content['annotations'][ig_index]['ignore_region'])
if len(content['annotations'][ig_index]['ignore_region'])==1: #因为每张图的忽略区域最多2个,这里是为1的情况
# print('ig1',ig_index)
ign_rge = content['annotations'][ig_index]['ignore_region'][0] #取第一个忽略区的数据
for ig_len in range(len(ign_rge)): #遍历忽略区坐标个数,组成多少变型
ig_list.append([ign_rge[ig_len]['x'],ign_rge[ig_len]['y']]) #取出每个坐标的x,y然后组成一个小列表放到ig_list
ig_cv_img = cv2.imread(content['annotations'][ig_index]['name']) #用cv2读取一张图片
pts = np.array(ig_list,np.int32) #把ig_list转成numpy.ndarray数据格式,为了填充需要
cv2.fillPoly(ig_cv_img,[pts],(0,0,0),cv2.LINE_AA) #使用cv2.fillPoly方法对有忽略区的图片用像素为0填充
ig_img = Image.fromarray(cv2.cvtColor(ig_cv_img,cv2.COLOR_BGR2RGB)) #cv2转PIL
ann = content['annotations'][ig_index]['annotation'] #把所有标注的信息读取出来
ig_im,gt = picture_opt(ig_img,ann)
k = ground(ig_im,gt)
groundtruth = np.asarray(k)
groundtruth = groundtruth.T.astype('float32')
ig_im = ig_im.transpose().astype('float32')
yield ig_im,groundtruth
if len(content['annotations'][ig_index]['ignore_region'])==2: #有2个忽略区域
# print('ig2',ig_index)
ign_rge = content['annotations'][ig_index]['ignore_region'][0]
ign_rge1 = content['annotations'][ig_index]['ignore_region'][1]
for ig_len in range(len(ign_rge)):
ig_list.append([ign_rge[ig_len]['x'],ign_rge[ig_len]['y']])
for ig_len1 in range(len(ign_rge1)):
ig_list1.append([ign_rge1[ig_len1]['x'],ign_rge1[ig_len1]['y']])
ig_cv_img2 = cv2.imread(content['annotations'][ig_index]['name'])
pts = np.array(ig_list,np.int32)
pts1 = np.array(ig_list1,np.int32)
cv2.fillPoly(ig_cv_img2,[pts],(0,0,0),cv2.LINE_AA)
cv2.fillPoly(ig_cv_img2,[pts1],(0,0,0),cv2.LINE_AA)
ig_img2 = Image.fromarray(cv2.cvtColor(ig_cv_img2,cv2.COLOR_BGR2RGB)) #cv2转PIL
ann = content['annotations'][ig_index]['annotation'] #把所有标注的信息读取出来
ig_im,gt = picture_opt(ig_img2,ann)
k = ground(ig_im,gt)
k = np.zeros((int(ig_im.shape[0]/8),int(ig_im.shape[1]/8)))
groundtruth = np.asarray(k)
groundtruth = groundtruth.T.astype('float32')
ig_im = ig_im.transpose().astype('float32')
yield ig_im,groundtruth
else:
img = Image.open(content['annotations'][ig_index]['name'])
ann = content['annotations'][ig_index]['annotation'] #把所有标注的信息读取出来
im,gt = picture_opt(img,ann)
k = ground(im,gt)
groundtruth = np.asarray(k)
groundtruth = groundtruth.T.astype('float32')
im = im.transpose().astype('float32')
yield im,groundtruth
return inner
BATCH_SIZE= 3 #每次取3张
# 设置训练reader
train_reader = paddle.batch(
paddle.reader.shuffle(
train_set(), buf_size=512),
batch_size=BATCH_SIZE)
class CNN(fluid.dygraph.Layer):
'''
网络
'''
def __init__(self):
super(CNN, self).__init__()
self.conv01_1 = fluid.dygraph.Conv2D(num_channels=3, num_filters=64,filter_size=3,padding=1,act="relu")
self.pool01=fluid.dygraph.Pool2D(pool_size=2,pool_type='max',pool_stride=2)
self.conv02_1 = fluid.dygraph.Conv2D(num_channels=64, num_filters=128,filter_size=3, padding=1,act="relu")
self.pool02=fluid.dygraph.Pool2D(pool_size=2,pool_type='max',pool_stride=2)
self.conv03_1 = fluid.dygraph.Conv2D(num_channels=128, num_filters=256,filter_size=3, padding=1,act="relu")
self.pool03=fluid.dygraph.Pool2D(pool_size=2,pool_type='max',pool_stride=2)
self.conv04_1 = fluid.dygraph.Conv2D(num_channels=256, num_filters=512,filter_size=3, padding=1,act="relu")
self.conv05_1 = fluid.dygraph.Conv2D(num_channels=512, num_filters=512,filter_size=3,padding=1, act="relu")
self.conv06 = fluid.dygraph.Conv2D(num_channels=512,num_filters=256,filter_size=3,padding=1,act='relu')
self.conv07 = fluid.dygraph.Conv2D(num_channels=256,num_filters=128,filter_size=3,padding=1,act='relu')
self.conv08 = fluid.dygraph.Conv2D(num_channels=128,num_filters=64,filter_size=3,padding=1,act='relu')
self.conv09 = fluid.dygraph.Conv2D(num_channels=64,num_filters=1,filter_size=1,padding=0,act=None)
def forward(self, inputs, label=None):
"""前向计算"""
out = self.conv01_1(inputs)
out = self.pool01(out)
out = self.conv02_1(out)
out = self.pool02(out)
out = self.conv03_1(out)
out = self.pool03(out)
out = self.conv04_1(out)
out = self.conv05_1(out)
out = self.conv06(out)
out = self.conv07(out)
out = self.conv08(out)
out = self.conv09(out)
return out
'''
模型训练
'''
with fluid.dygraph.guard(place = fluid.CUDAPlace(0)):
cnn = CNN()
optimizer=fluid.optimizer.AdamOptimizer(learning_rate=0.001,parameter_list=cnn.parameters())
for epoch_num in range(5):
for batch_id, data in enumerate(train_reader()):
dy_x_data = np.array([x[0] for x in data]).astype('float32')
y_data = np.array([x[1] for x in data]).astype('float32')
y_data = y_data[:,np.newaxis]
#将Numpy转换为DyGraph接收的输入
img = fluid.dygraph.to_variable(dy_x_data)
label = fluid.dygraph.to_variable(y_data)
label.stop_gradient = True
out = cnn(img,label)
loss = fluid.layers.square_error_cost(out, label)
avg_loss = fluid.layers.mean(loss)
#使用backward()方法可以执行反向网络
avg_loss.backward()
optimizer.minimize(avg_loss)
#将参数梯度清零以保证下一轮训练的正确性
cnn.clear_gradients()
dy_param_value = {}
for param in cnn.parameters():
dy_param_value[param.name] = param.numpy
if batch_id % 10 == 0:
print("Loss at epoch {} step {}: {}".format(epoch_num, batch_id, avg_loss.numpy()))
#保存模型参数
fluid.save_dygraph(cnn.state_dict(), "cnn")
print("Final loss: {}".format(avg_loss.numpy()))
Loss at epoch 0 step 0: [1.0834869]
Loss at epoch 0 step 10: [0.00012348]
Loss at epoch 0 step 20: [0.00044698]
Loss at epoch 0 step 30: [0.00022439]
Loss at epoch 0 step 40: [6.484951e-05]
Loss at epoch 0 step 50: [1.2802284e-05]
Loss at epoch 0 step 60: [3.9751012e-05]
Loss at epoch 0 step 70: [5.551168e-05]
Loss at epoch 0 step 80: [7.742354e-05]
Loss at epoch 0 step 90: [1.1342347e-05]
Loss at epoch 0 step 100: [8.411234e-05]
Loss at epoch 0 step 110: [0.00025222]
Loss at epoch 0 step 120: [5.5180863e-06]
Loss at epoch 0 step 130: [7.0355036e-06]
Loss at epoch 0 step 140: [1.2307882e-05]
Loss at epoch 0 step 150: [1.0126916e-05]
Loss at epoch 0 step 160: [6.2591157e-06]
Loss at epoch 0 step 170: [6.749572e-06]
Loss at epoch 0 step 180: [7.069355e-06]
Loss at epoch 0 step 190: [1.005789e-05]
Loss at epoch 0 step 200: [4.274577e-06]
Loss at epoch 0 step 210: [2.1217402e-05]
Loss at epoch 0 step 220: [1.1628632e-05]
Loss at epoch 0 step 230: [1.2997001e-05]
Loss at epoch 0 step 240: [7.23666e-06]
Loss at epoch 0 step 250: [1.0360234e-05]
Loss at epoch 0 step 260: [2.9688827e-05]
Loss at epoch 0 step 270: [3.7016157e-06]
Loss at epoch 0 step 280: [1.0001635e-05]
Loss at epoch 0 step 290: [3.4521895e-06]
Loss at epoch 0 step 300: [3.6605393e-06]
Loss at epoch 0 step 310: [8.0940645e-06]
Loss at epoch 0 step 320: [8.438425e-06]
Loss at epoch 0 step 330: [2.5927875e-06]
Loss at epoch 0 step 340: [1.4306535e-05]
Loss at epoch 0 step 350: [8.284808e-06]
Loss at epoch 0 step 360: [2.1752267e-06]
Loss at epoch 0 step 370: [3.171177e-06]
Loss at epoch 0 step 380: [2.617118e-05]
Loss at epoch 0 step 390: [2.5359177e-06]
Loss at epoch 0 step 400: [6.3463317e-06]
Loss at epoch 0 step 410: [1.2995605e-05]
Loss at epoch 0 step 420: [5.028638e-06]
Loss at epoch 0 step 430: [2.4964536e-05]
Loss at epoch 0 step 440: [3.6480312e-06]
Loss at epoch 0 step 450: [4.550785e-06]
Loss at epoch 0 step 460: [2.5165635e-05]
Loss at epoch 0 step 470: [0.00014445]
Loss at epoch 0 step 480: [4.5817565e-06]
Loss at epoch 0 step 490: [2.147204e-06]
Loss at epoch 0 step 500: [2.1270926e-06]
Loss at epoch 0 step 510: [2.6731793e-06]
Loss at epoch 0 step 520: [2.6923035e-06]
Loss at epoch 0 step 530: [8.694671e-06]
Loss at epoch 0 step 540: [1.7111013e-06]
Loss at epoch 0 step 550: [9.761963e-06]
Loss at epoch 0 step 560: [0.00018832]
Loss at epoch 0 step 570: [5.9077533e-06]
Loss at epoch 0 step 580: [1.4325014e-06]
Loss at epoch 0 step 590: [3.8664066e-06]
Loss at epoch 0 step 600: [3.4820816e-06]
Loss at epoch 0 step 610: [1.0429635e-06]
Loss at epoch 0 step 620: [3.4442294e-06]
Loss at epoch 0 step 630: [7.3455826e-06]
Loss at epoch 0 step 640: [9.5161704e-07]
Loss at epoch 1 step 0: [1.8283549e-06]
Loss at epoch 1 step 10: [2.5891447e-06]
Loss at epoch 1 step 20: [5.7733705e-06]
Loss at epoch 1 step 30: [1.5374715e-06]
Loss at epoch 1 step 40: [2.465067e-06]
Loss at epoch 1 step 50: [2.98126e-06]
Loss at epoch 1 step 60: [8.530356e-06]
Loss at epoch 1 step 70: [8.230327e-06]
Loss at epoch 1 step 80: [8.386429e-06]
Loss at epoch 1 step 90: [5.5213667e-07]
Loss at epoch 1 step 100: [3.3569574e-06]
Loss at epoch 1 step 110: [2.9213215e-06]
Loss at epoch 1 step 120: [4.699274e-06]
Loss at epoch 1 step 130: [3.3514264e-06]
Loss at epoch 1 step 140: [5.7652785e-05]
Loss at epoch 1 step 150: [7.980583e-05]
Loss at epoch 1 step 160: [1.2431719e-05]
Loss at epoch 1 step 170: [2.057808e-06]
Loss at epoch 1 step 180: [1.07746e-05]
Loss at epoch 1 step 190: [7.2272494e-05]
Loss at epoch 1 step 200: [2.96809e-06]
Loss at epoch 1 step 210: [5.456779e-06]
Loss at epoch 1 step 220: [2.6062596e-06]
Loss at epoch 1 step 230: [3.3623814e-06]
Loss at epoch 1 step 240: [2.5549018e-06]
Loss at epoch 1 step 250: [1.370639e-06]
Loss at epoch 1 step 260: [6.066956e-07]
Loss at epoch 1 step 270: [3.5431412e-06]
Loss at epoch 1 step 280: [1.8683022e-06]
Loss at epoch 1 step 290: [0.00013194]
Loss at epoch 1 step 300: [1.1669585e-05]
Loss at epoch 1 step 310: [1.7182597e-06]
Loss at epoch 1 step 320: [1.522547e-06]
Loss at epoch 1 step 330: [1.8295765e-06]
Loss at epoch 1 step 350: [1.1604943e-06]
Loss at epoch 1 step 360: [3.3662247e-05]
Loss at epoch 1 step 370: [2.7660467e-06]
Loss at epoch 1 step 380: [1.3421741e-06]
Loss at epoch 1 step 390: [2.1552287e-06]
Loss at epoch 1 step 400: [0.00010807]
Loss at epoch 1 step 410: [2.782704e-06]
Loss at epoch 1 step 420: [3.2628814e-06]
Loss at epoch 1 step 430: [2.8413801e-06]
Loss at epoch 1 step 440: [1.7916601e-06]
Loss at epoch 1 step 450: [6.9441935e-06]
Loss at epoch 1 step 460: [4.4844337e-06]
Loss at epoch 1 step 470: [7.78917e-06]
Loss at epoch 1 step 480: [1.5758569e-06]
Loss at epoch 1 step 490: [1.4109366e-05]
Loss at epoch 1 step 500: [5.838397e-06]
Loss at epoch 1 step 510: [2.7454212e-06]
Loss at epoch 1 step 520: [7.487558e-07]
Loss at epoch 1 step 530: [5.128227e-06]
Loss at epoch 1 step 540: [3.3832382e-06]
Loss at epoch 1 step 550: [3.7640355e-06]
Loss at epoch 1 step 560: [3.075478e-06]
Loss at epoch 1 step 570: [2.3919015e-06]
Loss at epoch 1 step 580: [1.6584809e-06]
Loss at epoch 1 step 590: [1.0832925e-05]
Loss at epoch 1 step 600: [9.36219e-06]
Loss at epoch 1 step 610: [3.1457816e-06]
Loss at epoch 1 step 620: [2.4816938e-05]
Loss at epoch 1 step 630: [1.1282402e-05]
Loss at epoch 1 step 640: [7.691813e-06]
Loss at epoch 2 step 0: [0.00020355]
Loss at epoch 2 step 10: [0.00165866]
Loss at epoch 2 step 20: [0.00018117]
Loss at epoch 2 step 30: [7.2166117e-06]
Loss at epoch 2 step 40: [1.9791001e-05]
Loss at epoch 2 step 50: [2.5036261e-05]
Loss at epoch 2 step 60: [1.7196904e-05]
Loss at epoch 2 step 70: [1.4412498e-05]
Loss at epoch 2 step 80: [8.029074e-05]
Loss at epoch 2 step 90: [4.565244e-06]
Loss at epoch 2 step 100: [1.2869046e-06]
Loss at epoch 2 step 110: [3.2674961e-06]
Loss at epoch 2 step 120: [5.5307697e-05]
Loss at epoch 2 step 130: [2.1947619e-05]
Loss at epoch 2 step 140: [2.958641e-06]
Loss at epoch 2 step 150: [1.4350769e-06]
Loss at epoch 2 step 160: [2.1741848e-06]
Loss at epoch 2 step 170: [1.9906925e-05]
Loss at epoch 2 step 180: [0.00011083]
Loss at epoch 2 step 190: [0.00012322]
Loss at epoch 2 step 200: [5.5782522e-05]
Loss at epoch 2 step 210: [1.17806185e-05]
Loss at epoch 2 step 220: [1.1516691e-05]
Loss at epoch 2 step 230: [1.267605e-06]
Loss at epoch 2 step 240: [6.9784473e-06]
Loss at epoch 2 step 250: [2.9044777e-06]
Loss at epoch 2 step 260: [6.6343405e-06]
Loss at epoch 2 step 270: [1.927803e-06]
Loss at epoch 2 step 280: [1.7679715e-05]
Loss at epoch 2 step 290: [3.4601733e-05]
Loss at epoch 2 step 300: [5.794344e-06]
Loss at epoch 2 step 310: [3.6268414e-06]
Loss at epoch 2 step 320: [1.5621783e-06]
Loss at epoch 2 step 330: [3.025988e-06]
Loss at epoch 2 step 340: [1.5369914e-06]
Loss at epoch 2 step 350: [9.643331e-07]
Loss at epoch 2 step 360: [2.0016467e-05]
Loss at epoch 2 step 370: [1.4675657e-06]
Loss at epoch 2 step 380: [1.3408237e-06]
Loss at epoch 2 step 390: [3.9258694e-06]
Loss at epoch 2 step 400: [4.243343e-06]
Loss at epoch 2 step 410: [1.7010354e-05]
Loss at epoch 2 step 420: [1.49599155e-05]
Loss at epoch 2 step 430: [2.3475507e-06]
Loss at epoch 2 step 440: [4.4463304e-06]
Loss at epoch 2 step 450: [2.3278028e-06]
Loss at epoch 2 step 460: [7.787102e-06]
Loss at epoch 2 step 470: [4.653457e-05]
Loss at epoch 2 step 480: [5.96685e-06]
Loss at epoch 2 step 490: [8.128779e-06]
Loss at epoch 2 step 500: [2.7714873e-06]
Loss at epoch 2 step 510: [1.9879208e-06]
Loss at epoch 2 step 520: [3.7188233e-06]
Loss at epoch 2 step 530: [3.251088e-06]
Loss at epoch 2 step 540: [2.4709188e-06]
Loss at epoch 2 step 550: [1.7664646e-06]
Loss at epoch 2 step 560: [4.7877293e-06]
Loss at epoch 2 step 570: [3.2378284e-06]
Loss at epoch 2 step 580: [3.5530697e-06]
Loss at epoch 2 step 590: [0.00049748]
Loss at epoch 2 step 600: [1.5376781e-05]
Loss at epoch 2 step 610: [1.6611825e-06]
Loss at epoch 2 step 620: [0.00010716]
Loss at epoch 2 step 630: [0.00024664]
Loss at epoch 2 step 640: [5.068539e-05]
Loss at epoch 3 step 0: [8.8244615e-06]
Loss at epoch 3 step 10: [2.0971697e-06]
Loss at epoch 3 step 20: [2.5484221e-06]
Loss at epoch 3 step 30: [2.4962233e-06]
Loss at epoch 3 step 40: [8.679301e-06]
Loss at epoch 3 step 50: [4.720587e-06]
Loss at epoch 3 step 60: [1.6386789e-06]
Loss at epoch 3 step 70: [6.8821105e-06]
Loss at epoch 3 step 80: [1.4162216e-05]
Loss at epoch 3 step 90: [3.0598317e-06]
Loss at epoch 3 step 100: [1.3950951e-06]
Loss at epoch 3 step 110: [9.487518e-07]
Loss at epoch 3 step 120: [2.9660393e-06]
Loss at epoch 3 step 130: [2.6172138e-06]
Loss at epoch 3 step 140: [7.367973e-06]
Loss at epoch 3 step 150: [0.00024955]
Loss at epoch 3 step 160: [6.0746428e-05]
Loss at epoch 3 step 170: [1.803961e-05]
Loss at epoch 3 step 180: [9.81207e-06]
Loss at epoch 3 step 190: [2.029069e-06]
Loss at epoch 3 step 200: [1.0195761e-06]
Loss at epoch 3 step 210: [4.655475e-06]
Loss at epoch 3 step 220: [4.5937522e-06]
Loss at epoch 3 step 230: [8.675173e-06]
Loss at epoch 3 step 240: [2.3202047e-06]
Loss at epoch 3 step 250: [1.4407667e-06]
Loss at epoch 3 step 260: [2.772674e-06]
Loss at epoch 3 step 270: [3.596594e-06]
Loss at epoch 3 step 280: [4.089893e-06]
Loss at epoch 3 step 290: [4.37003e-06]
Loss at epoch 3 step 300: [2.3360863e-06]
Loss at epoch 3 step 310: [3.8431617e-06]
Loss at epoch 3 step 320: [4.6474142e-06]
Loss at epoch 3 step 330: [1.697555e-06]
Loss at epoch 3 step 340: [2.1765263e-06]
Loss at epoch 3 step 350: [6.9326243e-06]
Loss at epoch 3 step 360: [4.2023794e-06]
Loss at epoch 3 step 370: [3.1789227e-06]
Loss at epoch 3 step 380: [1.742633e-05]
Loss at epoch 3 step 390: [1.660477e-05]
Loss at epoch 3 step 400: [8.869489e-06]
Loss at epoch 3 step 410: [3.4958653e-06]
Loss at epoch 3 step 420: [1.3707985e-06]
Loss at epoch 3 step 430: [3.73331e-06]
Loss at epoch 3 step 440: [2.983811e-06]
Loss at epoch 3 step 450: [2.2020863e-06]
Loss at epoch 3 step 460: [2.0785155e-06]
Loss at epoch 3 step 470: [1.2400021e-06]
Loss at epoch 3 step 480: [6.162514e-06]
Loss at epoch 3 step 490: [1.0097044e-05]
Loss at epoch 3 step 500: [1.4657505e-06]
Loss at epoch 3 step 510: [2.9479859e-06]
Loss at epoch 3 step 520: [1.1606833e-05]
Loss at epoch 3 step 530: [3.1140469e-06]
Loss at epoch 3 step 540: [3.4061177e-06]
Loss at epoch 3 step 550: [4.357465e-06]
Loss at epoch 3 step 560: [2.7456579e-06]
Loss at epoch 3 step 570: [3.4726315e-06]
Loss at epoch 3 step 580: [2.061573e-06]
Loss at epoch 3 step 590: [3.1015413e-06]
Loss at epoch 3 step 600: [4.6015346e-05]
Loss at epoch 3 step 610: [4.2019424e-06]
Loss at epoch 3 step 620: [7.880092e-05]
Loss at epoch 3 step 630: [3.4570871e-06]
Loss at epoch 3 step 640: [1.5548549e-06]
Loss at epoch 4 step 0: [1.7857853e-06]
Loss at epoch 4 step 10: [2.3031243e-06]
Loss at epoch 4 step 20: [6.714331e-05]
Loss at epoch 4 step 30: [1.624486e-06]
Loss at epoch 4 step 40: [0.00026637]
Loss at epoch 4 step 50: [7.710394e-06]
Loss at epoch 4 step 60: [9.526638e-06]
Loss at epoch 4 step 70: [2.2413778e-06]
Loss at epoch 4 step 80: [3.8988956e-06]
Loss at epoch 4 step 90: [1.2721368e-06]
Loss at epoch 4 step 100: [3.7368645e-06]
Loss at epoch 4 step 110: [7.696285e-07]
Loss at epoch 4 step 120: [8.608629e-06]
Loss at epoch 4 step 130: [1.6236472e-06]
Loss at epoch 4 step 140: [1.4947584e-06]
Loss at epoch 4 step 150: [1.0412463e-06]
Loss at epoch 4 step 160: [7.287373e-06]
Loss at epoch 4 step 170: [5.7332063e-06]
Loss at epoch 4 step 180: [2.3345926e-06]
Loss at epoch 4 step 190: [2.1681758e-06]
Loss at epoch 4 step 200: [4.0212362e-06]
Loss at epoch 4 step 210: [4.1857243e-06]
Loss at epoch 4 step 220: [1.2029922e-06]
Loss at epoch 4 step 230: [2.5018824e-06]
Loss at epoch 4 step 240: [2.6265334e-06]
Loss at epoch 4 step 250: [1.14908125e-05]
Loss at epoch 4 step 260: [1.1081633e-05]
Loss at epoch 4 step 270: [1.0572361e-05]
Loss at epoch 4 step 280: [4.418386e-06]
Loss at epoch 4 step 290: [1.9535508e-05]
Loss at epoch 4 step 300: [9.421634e-07]
Loss at epoch 4 step 310: [2.0216407e-06]
Loss at epoch 4 step 320: [3.2348507e-06]
Loss at epoch 4 step 330: [2.5199574e-06]
Loss at epoch 4 step 340: [2.7430096e-06]
Loss at epoch 4 step 350: [2.5210143e-06]
Loss at epoch 4 step 360: [5.7762027e-06]
Loss at epoch 4 step 370: [3.7231284e-06]
Loss at epoch 4 step 380: [2.4149317e-06]
Loss at epoch 4 step 390: [1.9162617e-06]
Loss at epoch 4 step 400: [2.7322085e-06]
Loss at epoch 4 step 410: [1.227936e-05]
Loss at epoch 4 step 420: [1.5845803e-06]
Loss at epoch 4 step 430: [1.6695558e-06]
Loss at epoch 4 step 440: [1.0741109e-05]
Loss at epoch 4 step 450: [1.578242e-06]
Loss at epoch 4 step 460: [2.186862e-06]
Loss at epoch 4 step 470: [1.5110928e-06]
Loss at epoch 4 step 480: [1.8394323e-06]
Loss at epoch 4 step 490: [4.123317e-06]
Loss at epoch 4 step 500: [0.00019208]
Loss at epoch 4 step 510: [5.2542164e-05]
Loss at epoch 4 step 520: [0.00042489]
Loss at epoch 4 step 530: [5.032204e-06]
Loss at epoch 4 step 540: [5.8784703e-06]
Loss at epoch 4 step 550: [2.5214717e-06]
Loss at epoch 4 step 560: [5.869485e-06]
Loss at epoch 4 step 570: [2.5715922e-06]
Loss at epoch 4 step 580: [1.9284741e-06]
Loss at epoch 4 step 590: [1.684038e-06]
Loss at epoch 4 step 600: [3.2320972e-06]
Loss at epoch 4 step 610: [6.774776e-06]
Loss at epoch 4 step 620: [3.2225316e-06]
Loss at epoch 4 step 630: [2.6986977e-06]
Loss at epoch 4 step 640: [0.00017034]
Final loss: [1.3935475e-05]
data_dict = {}
'''
模型预测
'''
with fluid.dygraph.guard():
model, _ = fluid.dygraph.load_dygraph("cnn")
cnn = CNN()
cnn.load_dict(model)
cnn.eval()
#获取预测图片列表
test_zfile = zipfile.ZipFile("/home/aistudio/data/data1917/test_new.zip")
l_test = []
for test_fname in test_zfile.namelist()[1:]:
l_test.append(test_fname)
for index in range(len(l_test)):
test_img = Image.open(l_test[index])
test_img = test_img.resize((640,480))
test_im = np.array(test_img)
test_im = test_im / 255.0
test_im = test_im.transpose().reshape(3,640,480).astype('float32')
l_test[index] = l_test[index].lstrip('test').lstrip('/')
dy_x_data = np.array(test_im).astype('float32')
dy_x_data=dy_x_data[np.newaxis,:, : ,:]
img = fluid.dygraph.to_variable(dy_x_data)
out = cnn(img)
temp=out[0][0]
temp=temp.numpy()
people =np.sum(temp)
data_dict[l_test[index]]=int(people)
import csv
with open('results.csv', 'w') as csvfile:
fieldnames = ['id', 'predicted']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for k,v in data_dict.items():
writer.writerow({'id': k, 'predicted':v})
print("结束")
结束
更多请参考: