darknet 源码阅读(网络层代码)——yolov3 yolo_layer

yolov3 yolo_layer原理解析和代码解读

简介

yolo 层是 yolov2 v3的最后一层,输出x,y,w,h,confidence, class。本篇博客主要介绍yolov3
的loss。
darknet 源码阅读(网络层代码)——yolov3 yolo_layer_第1张图片

yolov3 loss 解读

  • yolov3 loss 由 box、confidence 、class 三部分loss组成。
    box loss 采用MSE, confidence 和class采用是交叉嫡。
    loss function:
    ∑ i = 1 a l l _ p r e _ b o x ( − w e i g h t × [ y × log ⁡ c o n f i d e n c e y − ( 1 − y ) × l o g ( 1 − c o n f i d e n c e y ) ] ) \sum_{i=1}^{all\_pre\_box}(-weight \times [y \times \log confidence _y - (1- y) \times log(1-confidence_y)]) i=1all_pre_box(weight×[y×logconfidencey(1y)×log(1confidencey)])
    + ∑ i = 1 m a x b o x ( − y × log ⁡ c l a s s y − ( 1 − y ) × log ⁡ ( 1 − c l a s s y ) ) +\sum_{i=1}^{maxbox}(- y \times \log class{_y} - (1- y) \times \log(1-class_y)) +i=1maxbox(y×logclassy(1y)×log(1classy))
    + ∑ i = 1 m a x b o x [ ( x i − x i ^ ) 2 + ( y i − y i ^ ) 2 2 + ( w i − w i ^ ) 2 + ( h i − h i ^ ) 2 2 ] + \sum_{i=1}^{maxbox}[\frac{(x_i -\widehat {x_{i}})^2+ (y_i -\widehat{y_{i}})^2}{2}+ \frac{(w_i -\widehat{w_{i}})^2+ (h_i -\widehat{h_{i}})^2}{2}] +i=1maxbox[2(xixi )2+(yiyi )2+2(wiwi )2+(hihi )2]

  • 交叉嫡
    参考: https://xmfbit.github.io/2018/04/01/paper-yolov3/
    损失函数:
    J = − ( y ∗ l o g y ^ + ( 1 − y ) ∗ l o g ( 1 − y ^ ) ) ( 1 ) J = -(y*log\widehat y + (1-y)*log(1-\widehat y)) \qquad(1) J=(ylogy +(1y)log(1y ))(1)
    y ^ = h ( x ) = 1 1 + e − x ( 2 ) \widehat y = h(x) = \frac{1}{1+ e^{-x}}\qquad (2) y =h(x)=1+ex1(2)

    y y y 代表ground truth ,也就是标注真实信息
    y ^ \widehat y y 代表预测信息
    h ( x ) h(x) h(x) 代表 logistic 激活函数

    • 对损失函数 J J J 求导:
      ∂ J ∂ x = ∂ J ∂ h ( x ) ∂ h ( x ) ∂ x ( 3 ) \frac{\partial J}{\partial x} = \frac{\partial J}{\partial h(x)} \frac {\partial h(x)}{\partial x}\qquad (3) xJ=h(x)Jxh(x)(3)
      ∂ J ∂ h ( x ) = − y ∗ 1 h ( x ) + ( 1 − y ) ∗ 1 ( 1 − h ( x ) ) ( 4 ) \frac{\partial J}{\partial h(x)} = -y* \frac{1}{h(x)} +(1-y)*\frac{1}{(1-h(x))}\qquad (4) h(x)J=yh(x)1+(1y)(1h(x))1(4)
      ∂ h ( x ) ∂ x = ( 1 − h ( x ) ) ∗ h ( x ) ( 5 ) \frac {\partial h(x)}{\partial x} = (1-h(x))*h(x)\qquad (5) xh(x)=(1h(x))h(x)(5)
      所以:
      ∂ J ∂ x = h ( x ) − y ( 6 ) \frac{\partial J}{\partial x} = h(x) - y \qquad (6) xJ=h(x)y(6)

代码阅读

  • 初始化层, w, h :为net 输入大小,n:是feature对应anchor个数(3),total:是所有anchors数目(9),mask->所有anchor, classes类别数目,max_boxes:ground truth最大个数
layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes, int max_boxes)
{
     
    int i;
    layer l = {
     0};
    l.type = YOLO;

    l.n = n;
    l.total = total;
    l.batch = batch;
    l.h = h;
    l.w = w;
    l.c = n*(classes + 4 + 1);
    l.out_w = l.w;
    l.out_h = l.h;
    l.out_c = l.c;
    l.classes = classes;
    l.cost = calloc(1, sizeof(float));
    l.biases = calloc(total*2, sizeof(float));
    if(mask) l.mask = mask;
    else{
     
        l.mask = calloc(n, sizeof(int));
        for(i = 0; i < n; ++i){
     
            l.mask[i] = i;
        }
    }
    l.bias_updates = calloc(n*2, sizeof(float));
    l.outputs = h*w*n*(classes + 4 + 1);
    l.inputs = l.outputs;
    l.max_boxes = max_boxes;
    l.truths = l.max_boxes*(4 + 1);    // 90*(4 + 1);
    l.delta = calloc(batch*l.outputs, sizeof(float));
    l.output = calloc(batch*l.outputs, sizeof(float));
    for(i = 0; i < total*2; ++i){
     
        l.biases[i] = .5;
    }

    l.forward = forward_yolo_layer;
    l.backward = backward_yolo_layer;
#ifdef GPU
    l.forward_gpu = forward_yolo_layer_gpu;
    l.backward_gpu = backward_yolo_layer_gpu;
    l.output_gpu = cuda_make_array(l.output, batch*l.outputs);
    l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs);
#endif

    fprintf(stderr, "yolo\n");
    srand(0);

    return l;
}
  • gpu模式下
  • 函数 forward_yolo_layer_gpu,功能:yolo层前向传播,计算梯度。
void forward_yolo_layer_gpu(const layer l, network_state state)
{
    copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1);
    int b, n;
    // x,y ,confidence class通过激活函数logistic,公式(2)计算 
    for (b = 0; b < l.batch; ++b){
        for(n = 0; n < l.n; ++n){
            int index = entry_index(l, b, n*l.w*l.h, 0);
            activate_array_ongpu(l.output_gpu + index, 2*l.w*l.h, LOGISTIC);
            index = entry_index(l, b, n*l.w*l.h, 4);
            activate_array_ongpu(l.output_gpu + index, (1+l.classes)*l.w*l.h, LOGISTIC);
        }
    }

    if(!state.train || l.onlyforward){
        cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
        return;
    }

    //cuda_pull_array(l.output_gpu, state.input, l.batch*l.inputs);
    float *in_cpu = calloc(l.batch*l.inputs, sizeof(float));


    cuda_pull_array(l.output_gpu, in_cpu, l.batch*l.inputs);



    float *truth_cpu = 0;
    if (state.truth) {
        int num_truth = l.batch*l.truths;
        truth_cpu = calloc(num_truth, sizeof(float));
        cuda_pull_array(state.truth, truth_cpu, num_truth);
    }
    network_state cpu_state = state;
    cpu_state.net = state.net;
    cpu_state.index = state.index;
    cpu_state.train = state.train;
    cpu_state.truth = truth_cpu;
    cpu_state.input = in_cpu;
    // 计算梯度和loss
    forward_yolo_layer(l, cpu_state);
    //forward_yolo_layer(l, state);
    cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs);
    free(in_cpu);
    if (cpu_state.truth) free(cpu_state.truth);
}

  • 函数 forward_yolo_layer(l, cpu_state) ,功能计算梯度和loss.
void forward_yolo_layer(const layer l, network_state state)
{
    int i,j,b,t,n;
    memcpy(l.output, state.input, l.outputs*l.batch*sizeof(float));

#ifndef GPU
    for (b = 0; b < l.batch; ++b){
        for(n = 0; n < l.n; ++n){
            int index = entry_index(l, b, n*l.w*l.h, 0);
            activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
            index = entry_index(l, b, n*l.w*l.h, 4);
            activate_array(l.output + index, (1+l.classes)*l.w*l.h, LOGISTIC);
        }
    }
#endif
   // 初始化梯度
    memset(l.delta, 0, l.outputs * l.batch * sizeof(float));
    if(!state.train) return;
    float avg_iou = 0; // IOU
    float recall = 0;    // recall 值
    float recall75 = 0;
    float avg_cat = 0;
    float avg_obj = 0;
    float avg_anyobj = 0;
    int count = 0;
    int class_count = 0;
    *(l.cost) = 0; // 总 loss
    // 下面四个for循环是依次取n个预测的box的 x,y, w,h,confidence,class,然后依次和所有groud true 计算IOU,取IOU最大的groud true.
    for (b = 0; b < l.batch; ++b) {
        for (j = 0; j < l.h; ++j) {
            for (i = 0; i < l.w; ++i) {
                for (n = 0; n < l.n; ++n) {
                    int box_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 0); // 取pre box的索引
                    box pred = get_yolo_box(l.output, l.biases, l.mask[n], box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.w*l.h); //  获取pre box的x,y confidence , class
                    float best_iou = 0; // 最大IOU
                    int best_t = 0;       // 和pre对用最大IOU的groud truth的索引
                    for(t = 0; t < l.max_boxes; ++t){ // 计算 最大IOU及其索引
                        box truth = float_to_box_stride(state.truth + t*(4 + 1) + b*l.truths, 1);
                        int class_id = state.truth[t*(4 + 1) + b*l.truths + 4];
                        if (class_id >= l.classes) {
                            printf(" Warning: in txt-labels class_id=%d >= classes=%d in cfg-file. In txt-labels class_id should be [from 0 to %d] \n", class_id, l.classes, l.classes - 1);
                            getchar();
                            continue; // if label contains class_id more than number of classes in the cfg-file
                        }
                        if(!truth.x) break;  // continue;
                        float iou = box_iou(pred, truth);
                        if (iou > best_iou) {
                            best_iou = iou;
                            best_t = t;
                        }
                    }
                    int obj_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 4);
                    avg_anyobj += l.output[obj_index];
                    // 计算 confidence的偏差
                    l.delta[obj_index] = 0 - l.output[obj_index];
                    // 大于IOU设置的阈值 confidence梯度设为0
                    if (best_iou > l.ignore_thresh) {
                        l.delta[obj_index] = 0;
                    }
                    // yolov3这段代码不会执行,因为 l.truth_thresh值为1
                    if (best_iou > l.truth_thresh) {
                        l.delta[obj_index] = 1 - l.output[obj_index];

                        int class_id = state.truth[best_t*(4 + 1) + b*l.truths + 4];
                        if (l.map) class_id = l.map[class_id];
                        int class_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 4 + 1);
                        delta_yolo_class(l.output, l.delta, class_index, class_id, l.classes, l.w*l.h, 0, l.focal_loss);
                        box truth = float_to_box_stride(state.truth + best_t*(4 + 1) + b*l.truths, 1);
                        delta_yolo_box(truth, l.output, l.biases, l.mask[n], box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.delta, (2-truth.w*truth.h), l.w*l.h);
                    }
                }
            }
        }
        // box,class 的梯度,只计算groud truth对应的预测框的梯: 先计算groud truth和所有anchoiou,然后选最大IOU的索引,若这个索引在mask里,计算梯度和loss.
        for(t = 0; t < l.max_boxes; ++t){
            box truth = float_to_box_stride(state.truth + t*(4 + 1) + b*l.truths, 1);
            int class_id = state.truth[t*(4 + 1) + b*l.truths + 4];
            if (class_id >= l.classes) continue; // if label contains class_id more than number of classes in the cfg-file

            if(!truth.x) break;  // continue;
            float best_iou = 0;
            int best_n = 0;
            i = (truth.x * l.w); // pre对应中心坐标y
            j = (truth.y * l.h); // pred 对应中心坐标x
            box truth_shift = truth;
            truth_shift.x = truth_shift.y = 0;
            for(n = 0; n < l.total; ++n){ // 和所有anchor计算IOU
                box pred = {0};
                pred.w = l.biases[2*n]/ state.net.w;
                pred.h = l.biases[2*n+1]/ state.net.h;
                float iou = box_iou(pred, truth_shift);
                if (iou > best_iou){
                    best_iou = iou;
                    best_n = n;
                }
            }
           // 判断最好AIOU对应的索引best_n 是否在mask里面,若没有,返回-1
            int mask_n = int_index(l.mask, best_n, l.n);
            if(mask_n >= 0){
                int box_index = entry_index(l, b, mask_n*l.w*l.h + j*l.w + i, 0);
                // 计算box梯度
                float iou = delta_yolo_box(truth, l.output, l.biases, best_n, box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.delta, (2-truth.w*truth.h), l.w*l.h);

                int obj_index = entry_index(l, b, mask_n*l.w*l.h + j*l.w + i, 4);
                avg_obj += l.output[obj_index];
                // 计算梯度,公式(6),梯度前面要加个”-“号, 1代表是真实标签
                l.delta[obj_index] = 1 - l.output[obj_index];
       
                int class_id = state.truth[t*(4 + 1) + b*l.truths + 4];
                if (l.map) class_id = l.map[class_id];
                int class_index = entry_index(l, b, mask_n*l.w*l.h + j*l.w + i, 4 + 1);
                // 计算类别的梯度
                delta_yolo_class(l.output, l.delta, class_index, class_id, l.classes, l.w*l.h, &avg_cat, l.focal_loss);

                ++count;
                ++class_count;
                if(iou > .5) recall += 1;
                if(iou > .75) recall75 += 1;
                avg_iou += iou;
            }
        }
    }
    *(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2);
    printf("Region %d Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, .5R: %f, .75R: %f,  count: %d\n", state.index, avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, recall75/count, count);
}
  • 函数delta_yolo_box,计算box的梯度。
float delta_yolo_box(box truth, float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, float *delta, float scale, int stride)
{
    box pred = get_yolo_box(x, biases, n, index, i, j, lw, lh, w, h, stride);
    float iou = box_iou(pred, truth); // 计算IOU

    float tx = (truth.x*lw - i); // groud truth 相对于框左上角坐标
    float ty = (truth.y*lh - j);
    float tw = log(truth.w*w / biases[2*n]); // 因为bw = pw*exp tw.所以 tw = log(bw/w)
    float th = log(truth.h*h / biases[2*n + 1]);
 //  mse的求导, scale 平衡大小框 scale = 2 - truth.w*truth.h
    delta[index + 0*stride] = scale * (tx - x[index + 0*stride]); 
    delta[index + 1*stride] = scale * (ty - x[index + 1*stride]);
    delta[index + 2*stride] = scale * (tw - x[index + 2*stride]);
    delta[index + 3*stride] = scale * (th - x[index + 3*stride]);
    return iou;
}
  • 函数 delta_yolo_class,计算class的梯度
void delta_yolo_class(float *output, float *delta, int index, int class_id, int classes, int stride, float *avg_cat, int focal_loss)
{
    int n;
    // 参考公式(6),已经有梯度,就只计算此类
    if (delta[index + stride*class_id]){
        delta[index + stride*class_id] = 1 - output[index + stride*class_id];
        if(avg_cat) *avg_cat += output[index + stride*class_id];
        return;
    }
    // Focal loss
    if (focal_loss) {
        // Focal Loss
        float alpha = 0.5;    // 0.25 or 0.5
        //float gamma = 2;    // hardcoded in many places of the grad-formula

        int ti = index + stride*class_id;
        float pt = output[ti] + 0.000000000000001F;
        // http://fooplot.com/#W3sidHlwZSI6MCwiZXEiOiItKDEteCkqKDIqeCpsb2coeCkreC0xKSIsImNvbG9yIjoiIzAwMDAwMCJ9LHsidHlwZSI6MTAwMH1d
        float grad = -(1 - pt) * (2 * pt*logf(pt) + pt - 1);    // http://blog.csdn.net/linmingan/article/details/77885832
        //float grad = (1 - pt) * (2 * pt*logf(pt) + pt - 1);    // https://github.com/unsky/focal-loss

        for (n = 0; n < classes; ++n) {
            delta[index + stride*n] = (((n == class_id) ? 1 : 0) - output[index + stride*n]);

            delta[index + stride*n] *= alpha*grad;

            if (n == class_id) *avg_cat += output[index + stride*n];
        }
    }
    else {
        // default
        for (n = 0; n < classes; ++n) {
            delta[index + stride*n] = ((n == class_id) ? 1 : 0) - output[index + stride*n]; // 公式(6)
            if (n == class_id && avg_cat) *avg_cat += output[index + stride*n];
        }
    }
}

你可能感兴趣的:(计算机视觉,深度学习,darknet,darknet)