数据包的发送流程——基于RTL8139网卡(1)

 

 

 

 int dev_queue_xmit(strcut sk_buff *skb){
        struct net_device *dev = skb->dev;
        struct Qdisc *q;
        int rc = -ENOMEM;
       
        if (netif_needs_gso(dev,skb))
                     goto gso;         //
如果是gso数据包 则直接跳过下面几步 从而进行优化

       
        if (skb_shinfo(skb)->frag_list && !(dev->features & NETIF_F_FRAGLIST) && __skb_linearize(skb))    //
如果发送的数据包是分片 但网卡不支持skb的碎片列表,则需要调用函数__skb_linearize把这些碎片重组到一个完整的skb
                     goto out_kfree_skb;     
        if (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || illegal_highdma(dev,skb)) && __skb_linearize(skb))   //
如果要发送的数据包使用了分散/聚合i/o 但网卡不支持或分片中至少有一个在高端内存中,并且网卡不支持dma,则同样需要调用函数__skb_linearize进行线性化处理
                     goto out_kfree_skb;  
       
        if(skb->ip_summed == CHECKSUM_PARTIAL){
                     skb_set_transport_header(skb,skb->csum_start-skb_headroom(skb));
                     if (!dev_can_checksum(dev,skb) && skb_checksum_help(skb))
                             goto out_kfree_skb;       //
如果网卡不支持硬件checksum,则在这里用软件实现对数据包的
checksum
                    //
若上述几步出现错误时直接跳转到out_kfree_skb 释放
skb
        }
gso:  
     spin_lock_prefetch(&dev->queue_lock);     //
将锁变量queue_lock取到一级cache,此处还尚未加锁,为了避免下面加锁的同时发生cache失效而导致可能的加锁失败

        rcu_read_lock_bh();     //
暂停本地软中断
        q = rcu_dereference(dev->qdisc);     //
通过rcu操作保护链表 在读临界区时取出一个rcu保护指针 通过延迟写来提高同步性能
        ....
        if(q->enqueue){   //
有队列设备
                spin_lock(&dev->queue_lock);      //
获得queue_lock         
                q = dev->qdisc;
                if (q->enqueue){
                        ....       
                        rc = q->enqueue(skb,q);    //
skb放入队列

                        qdisc_run(dev);                  //qos
函数
                        spin_unlock(&dev->queue_lock);       //
释放锁
                       
                        rc = rc == NET_XMIT_BYPASS?NET_XMIT_SUCCESS :rc;
                        goto out;
                }
               spin_unlock(&dev->queue_lock);
        }
        //
无队列设备    如回环设备 虚拟设备   直接调用dev_hard_start_xmit() 不走qos
        if (dev->flags & IFF_UP){
                int cpu = smp_processor_id();
                if(dev->xmit_lock_owner != cpu){
                        HARD_TX_LOCK(dev,cpu);
                        if(!netif_queue_stopped(dev) && !netif_subqueue_stopped(dev,skb)){
                                rc = 0;
                                if (!dev_hard_start_xmit(skb,dev)) {     
                                         HARD_TX_LOCK(dev);
                                         goto out;
                                 }                  
                        }
                         HARD_TX_UNLOCK(dev);
                         if(net_ratelimit())
                                    printk(...);
                }else {
                        ....
                }
        }
        rc = - ENETDOWN;
        rcu_read_unlock_bh();
  
out_kfree_skb:
        kfree_skb(skb);       
        return rc;
out:
        rcu_read_unlock_bh();
        return rc;    
}

void __qdisc_run(struct Qdisc *q)

{

       unsigned long start_time = jiffies;

 

       while (qdisc_restart(q)) {

// qdisc_restart返回非0的唯一情况是队列中还有数据包,发送完成以及出错的情况下都会返回0,跳出循环

// 但是出错返回0前会调用__netif_schedule来等待下次的成功的调度

              /*

               * Postpone processing if

               * 1. another process needs the CPU; 不能长时间占有cpu

               * 2. we've been doing it for too long.

               */

              if (need_resched() || jiffies != start_time) {

                             /* 即使还有数据包要发送,也要退出运行然后再次调度,

CPU提交另一次软中断调用申请,这在__netif_schedule 函数内完成*/

                     __netif_schedule(q);

                     break;

              }

       }

           /*  清楚队列运行状态*/

       clear_bit(__QDISC_STATE_RUNNING, &q->state);

}

 

static inline void netif_schedule(struct net_device *dev){
        if(!test_bit(__LINK_STATE_XOFF,&dev->state))
                    __netif_schedule(dev);
}

 

void __netif_schedule(struct net_device *dev){   //将设备链入softnet_dataoutput_queue队列中,说明该设备还有数据要发送或有skb可以被释放  触发net_tx_softirq软中断 将上述任务推迟到下半部 net_tx_action()中执行
        if(!test_and_set_bit(__LINK_STATE_SCHED,&dev->state)){
                unsigned long flags;
                struct softnet_data *sd;
                local_irq_save(flags);
                sd=&__get_cpu_var(softnet_data);
                dev->next_sched = sd->output_queue;
                sd->output_queue=dev;
                raise_softirq_irqoff(NET_TX_SOFTIRQ);
                local_irq_restore(flags); 
        }
}

 

static inline int qdisc_restart(struct net_device *dev){
        struct Qdisc *q = dev->qdisc;
        struct sk_buff *skb;
        int ret = NETDEV_TX_BUSY;
        if (unlikely ((skb=dev_dequeue_skb(dev,q)) == NULL))      //
依据队列出队策略即qdequeue方法,从队列中取出一个skb, .
                  return 0;
        spin_unlock(&dev->queue_lock);
       
        HARD_TX_LOCK(dev,smp_processor_id());
        if(!netif_subqueue_stopped(dev,skb))
                   ret = dev_head_start_xmit(skb,dev);       //
将取出的skb交给设备驱动层的发送函数作进一步处理,如果发送失败返回非0
        HARD_TX_UNLOCK(dev);
       
        spin_lock(&dev->queue_lcok);
        q=dev->qdisc;
       
        switch (ret){
            case NETDEV_TX_OK:
                      ret = qdisc_qlen(q);                            //
队列剩下的长度
                      break;
            case NETDEV_TX_LOCKED:          //
未得到锁 说明存在cpu争用 调用handle_dev_cpu_collision()处理争用情况
                      ret = handle_dev_cpu_collision(skb,dev,q);
                      break;
            default:
                      if(unlikely(ret != NETDEV_TX_BUSY && net_ratellimit()))
                             ...
                      ret = dev_requeue_skb(skb,dev,q);
                      break;     

// 上面2种情况,函数都返回0
         }
         return ret;
}

 

static inline int handle_dev_cpu_collision(struct sk_buff *skb,struct net_device *dev,struct Qdisc *q){
        int ret;
        if(unlikely(dev->xmit_lock_owner == smp_processor_id())){  //
判断如果获得锁的cpu就是当前cpu,则该skb可能是在发送过程中出的错, 故直接将其drop. 否则就是其他cpu占用了该锁正在发送其他数据包 此时说明存在拥塞了 将当前的skb再放回queue
                kfree_skb(skb);
                if(net_ratelimit())  //
内核调试信息打印.
                          printk(...);  .
                ret = qdisc_qlen(q);
        }else{           //
                __get_cpu_var(netdev_rx_stat).cpu_collision++;
                ret = dev_requeue_skb(skb,dev,q);
        }
        return ret;
}

 

int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,

                     struct netdev_queue *txq)

{

       if (likely(!skb->next)) {

              if (!list_empty(&ptype_all))

                     dev_queue_xmit_nit(skb, dev);   // 发送数据包时,也要向注册的HOOK通告

 

              if (netif_needs_gso(dev, skb)) {

                     if (unlikely(dev_gso_segment(skb)))

                            goto out_kfree_skb;

                     if (skb->next)

                            goto gso;

              }

 

              return dev->hard_start_xmit(skb, dev);

       }

 

gso:

       do {

              struct sk_buff *nskb = skb->next;

              int rc;

 

              skb->next = nskb->next;

              nskb->next = NULL;

              rc = dev->hard_start_xmit(nskb, dev);

              if (unlikely(rc)) {

                     nskb->next = skb->next;

                     skb->next = nskb;

                     return rc;

              }

              if (unlikely(netif_tx_queue_stopped(txq) && skb->next))

                     return NETDEV_TX_BUSY;

       } while (skb->next);

 

       skb->destructor = DEV_GSO_CB(skb)->destructor;

 

out_kfree_skb:

       kfree_skb(skb);

       return 0;

}

 

如果说上面的过程很顺利的话,根本就用不上内核的发送软中断,也即net_tx_action

但是作为一个复杂的系统来说,是不可能这么一帆风顺的;在上面的代码中我们也看到了很多

出错的处理,而对这些错误的处理操作则大多与我们注册的软中断net_tx_action有关。

 

 

你可能感兴趣的:(cache,list,struct,null,processing,action)