使用caffe ssd模型进行快速标注物体

本例:为使用ssd模型对行人数据集进行标注 提取行人坐标信息,提取的结果好坏取决于使用的模型的好坏.需要手动指定保存的类别,修改第129行,图片格式指定在127行

#encoding=utf8
import os
import sys
import argparse,time
import numpy as np
import cv2
# Make sure that caffe is on the python path:
caffe_root = '/home/caffe-ssd'
sys.path.insert(0, os.path.join(caffe_root, 'python'))
#os.environ['GLOG_minloglevel'] = '2' # 将caffe的输出log信息不显示,必须放到import caffe前
import caffe

from google.protobuf import text_format
from caffe.proto import caffe_pb2
from tqdm import tqdm

#read name from labelmap.prototxt
def get_labelname(labelmap, labels):
    num_labels = len(labelmap.item)
    labelnames = []
    if type(labels) is not list:
        labels = [labels]
    for label in labels:
        found = False
        for i in range(0, num_labels):
            if label == labelmap.item[i].label:
                found = True
                labelnames.append(labelmap.item[i].display_name)
                break
        assert found == True
    return labelnames

class CaffeDetection:
    def __init__(self, model_def, model_weights, image_resize, labelmap_file):
        #switch mode:cpu/gpu
        caffe.set_device(0)
        caffe.set_mode_gpu()
        #caffe.set_mode_cpu()

        self.image_resize = image_resize
        # Load the net in the test phase for inference, and configure input preprocessing.
        self.net = caffe.Net(model_def,      # defines the structure of the model
                             model_weights,  # contains the trained weights
                             caffe.TEST)     # use test mode (e.g., don't perform dropout)
         # input preprocessing: 'data' is the name of the input blob == net.inputs[0]

        # load PASCAL VOC labels
        file = open(labelmap_file, 'r')
        self.labelmap = caffe_pb2.LabelMap()
        text_format.Merge(str(file.read()), self.labelmap)
    
    def detect(self, image, conf_thresh=0.7, topn=3):
        '''
        SSD detection
        '''
        # set net to batch size of 1
        # image_resize = 300
        self.net.blobs['data'].reshape(1, 3, self.image_resize, self.image_resize)
        
        #Run the net and examine the top_k results
        image = cv2.resize(image,(self.image_resize, self.image_resize))
        #image *= 255#[0,1]->0,255
        image1 = np.asarray(image,np.float32)
        image1 -= [127.5]
        image1 *= 0.007843
        #RGB-->BGR
        #image1 = image1[:, :, (2, 1, 0)]
        #[high,weight,channels] --> [channels,high,weight]
        image1 = image1.transpose(2,0,1)
        #obtain the machine time
        now = time.time()
        self.net.blobs['data'].data[...] = image1

        # Forward pass.
        detections = self.net.forward()['detection_out']
        #print('inference time:',time.time() - now)   #time
        # Parse the outputs.
        det_label = detections[0,0,:,1]
        det_conf = detections[0,0,:,2]
        det_xmin = detections[0,0,:,3]
        det_ymin = detections[0,0,:,4]
        det_xmax = detections[0,0,:,5]
        det_ymax = detections[0,0,:,6]

        # Get detections with confidence higher than 0.6.
        top_indices = [i for i, conf in enumerate(det_conf) if conf >= conf_thresh]

        top_conf = det_conf[top_indices]
        top_label_indices = det_label[top_indices].tolist()
        top_labels = get_labelname(self.labelmap, top_label_indices)
        top_xmin = det_xmin[top_indices]
        top_ymin = det_ymin[top_indices]
        top_xmax = det_xmax[top_indices]
        top_ymax = det_ymax[top_indices]

        result = []
        for i in range(min(topn, top_conf.shape[0])):
            xmin = top_xmin[i] # xmin = int(round(top_xmin[i] * image.shape[1]))
            ymin = top_ymin[i] # ymin = int(round(top_ymin[i] * image.shape[0]))
            xmax = top_xmax[i] # xmax = int(round(top_xmax[i] * image.shape[1]))
            ymax = top_ymax[i] # ymax = int(round(top_ymax[i] * image.shape[0]))
            score = top_conf[i]
            label = int(top_label_indices[i])
            label_name = top_labels[i]
            result.append([xmin, ymin, xmax, ymax, label, score, label_name])
        return result

def main(args):
    '''main '''
    detection = CaffeDetection(args.model_def, args.model_weights,
                               args.image_resize, args.labelmap_file)
    #traverse folder args.image                           
    imagefile_list = os.listdir(args.image)
    for image_name in tqdm(imagefile_list):
        #print(image_name)
        #create path of each image 
        image_path=os.path.join(args.image,image_name)
        frame = cv2.imread(image_path)
        #detect the input-image
        result = detection.detect(frame,conf_thresh=0.7)
        #obtain the input-image's width and height
        height = frame.shape[0]
        width = frame.shape[1]
        #continue if no result is found
        if len(result) == 0:
            continue
        #create information file for each image
        fout=open(os.path.join(args.txt,image_name.replace('.jpg','.txt')),'w')
        for item in result:
            if item[-1] not in ['person',]:
                continue
            
            xmin = int(round(item[0] * width))  #the x-intercept of top left corner
            ymin = int(round(item[1] * height)) #the y-intercept of top left corner
            xmax = int(round(item[2] * width))  #the x-intercept of bottom right corner
            ymax = int(round(item[3] * height)) #the y-intercept of bottom right corner
            fout.write("%s %d %d %d %d\n"%(item[-1],xmin,ymin,xmax,ymax)) #write the label and coordinate of boundingbox
        fout.close()


def parse_args():
    '''parse args'''
    parser = argparse.ArgumentParser()
    parser.add_argument('--labelmap_file','-lf',
                        default=None)
    parser.add_argument('--model_def','-md',
                        default=None)
    parser.add_argument('--image_resize', '-ir',default=300, type=int)
    parser.add_argument('--model_weights','-mw',
                        default=None)
    parser.add_argument('--image','-im',
                        default=None,help='image path to read')
    parser.add_argument('--txt','-t',type=str,
                        default='None',help='txt path to save')
    return parser.parse_args()

if __name__ == '__main__':
    args = parse_args()
    #exit if either of them is not found
    if args.labelmap_file == None or args.model_def == None or args.model_weights == None:
        exit(0)
    main(args)

你可能感兴趣的:(使用caffe ssd模型进行快速标注物体)