linux协议栈之链路层上的数据传输之网卡驱动

文章来源:http://blog.chinaunix.net/u1/51562/showart_405963.html

http://ericxiao.cublog.cn/对内核源码分析的比较详细,感谢文章作者无私奉献

本文欢迎转载!

转载请注明出处:http://ericxiao.cublog.cn/

------------------------------------------

<>第三季的第五集,终于在翘首企盼中姗姗来迟了,scofid用它惊人的智慧一次次化险为夷,但在邪恶的sona监狱他将如何逃脱呢?这我们不得而知,但我们可以分析Linux网络驱动来得到数据包是怎么通过物理接口的这一层“prison”束缚来达到通信目的:-

         一:预备知识

         关于I/O内存映射。

         设备通过控制总线,数据总线,状态总线与CPU相连。控制总数传送控制信号,例如,网卡的启用。数据总线控制数据传输,例如,网卡发送数据,状态总数一般都是读取设备的当前状态,例如读取网卡的MAC地址。

         在传统的操作中,都是通过读写设备寄存器的值来实现。但是这样耗费了CPU时钟。而且每取一次值都要读取设备寄存器,造成了效率的低下。在现代操作系统中。引用了I/O内存映射。即把寄存器的值映身到主存。对设备寄存器的操作,转换为对主存的操作,这样极大的提高了效率。

         关于DMA

         这是关于设备数据处理的一种方式。传统的处理方法为:当设备接收到数据,向CPU报告中断。CPU处理中断,把数据放到内存。

         在现代操作系统中引入的DMA是指,设备接收到数据时,把数据放至DMA内存,再向CPU产生中断。这样节省了大量的CPU时间

         关于软中断与NAPI

         在现代操作系统中,对中断的处理速度要求越来越高。为了响应中断,将中断分为两部份,即上半部与下半部。上半部将数据推入处理队列,响应中断。然后再由下半部调度完成余下的任务。

         NAPI2.6新引入的一个概念,它在发生中断的时候,禁用中断。然后处理数据。之后,每隔一定的时候,它会主动向设备询用是否有数据要处理。

         I/O,DMA在后续代码分析中会讨论在linux2.6.21中的实现。软中断与NAPI的详细知识将会在分析中断处理的时候,一一为你道来

http://blog.chinaunix.net/u1/51562/showart_405967.html

 

 

二:从网卡驱动说起。

         intel 100M 网卡驱动为例简要概述数据包的接收与发送流程。代码见(drivers/net/e100.c

网卡是属于PCI设备,它的注册跟一般的PCI设备注册没什么两样。

static int __init e100_init_module(void)

{

         if(((1 << debug) - 1) & NETIF_MSG_DRV) {

                   printk(KERN_INFO PFX "%s, %s/n", DRV_DESCRIPTION, DRV_VERSION);

                   printk(KERN_INFO PFX "%s/n", DRV_COPYRIGHT);

         }

 

         //注册PCI

        return pci_module_init(&e100_driver);

}

其中e100_driver对应为网卡的pci_driver.

static struct pci_driver e100_driver = {

         //驱动对应的名字

         .name =         DRV_NAME,

         //匹配类型

         .id_table =     e100_id_table,

         //侦测函数

         .probe =        e100_probe,

         //移除函数,设备移除时将调用此函数

         .remove =       __devexit_p(e100_remove),

#ifdef CONFIG_PM

         .suspend =      e100_suspend,

         .resume =       e100_resume,

#endif

}

当总数探测到PCI设备符合e100_id_table中的参数时,将会调用e100_probe,开始设备的初始化

e100_probe中:

static int __devinit e100_probe(struct pci_dev *pdev,

         const struct pci_device_id *ent)

{

         struct net_device *netdev;

         struct nic *nic;

         int err;

 

         //分配net_device并为其赋值

         //alloc_etherdev为以太网接口的net_device分配函数。它是alloc_netdev的封装函数

         if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {

                   if(((1 << debug) - 1) & NETIF_MSG_PROBE)

                            printk(KERN_ERR PFX "Etherdev alloc failed, abort./n");

                   return -ENOMEM;

         }

        

         //netdev中的函数指针赋初值

         netdev->open = e100_open;

         netdev->stop = e100_close;

         netdev->hard_start_xmit = e100_xmit_frame;

         netdev->get_stats = e100_get_stats;

         netdev->set_multicast_list = e100_set_multicast_list;

         netdev->set_mac_address = e100_set_mac_address;

         netdev->change_mtu = e100_change_mtu;

         netdev->do_ioctl = e100_do_ioctl;

         //支持ethtool工具时有效

         SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);

         netdev->tx_timeout = e100_tx_timeout;

         netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;

         //轮询函数

         netdev->poll = e100_poll;

         netdev->weight = E100_NAPI_WEIGHT;

#ifdef CONFIG_NET_POLL_CONTROLLER

         netdev->poll_controller = e100_netpoll;

#endif

 

         //获得net_device私有数据区,并对其赋值

         //私有数据大小是由alloc_etherdev()参数中指定的

         nic = netdev_priv(netdev);

         nic->netdev = netdev;

         nic->pdev = pdev;

         nic->msg_enable = (1 << debug) - 1;

         pci_set_drvdata(pdev, netdev);

 

         //启动网卡.为之后DMAI/O内存映射做准备

//它实际上是对PCI的控制寄存器赋值来实现的

         if((err = pci_enable_device(pdev))) {

                   DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting./n");

                   goto err_out_free_dev;

         }

 

         //获取该资源相关联的标志

         //如果该设备存在I/O内存,则置IORESOURCE_MEM

         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {

                   DPRINTK(PROBE, ERR, "Cannot find proper PCI device "

                            "base address, aborting./n");

                   err = -ENODEV;

                   goto err_out_disable_pdev;

         }

 

         //PCI6个寄存器都会调用资源分配函数进行申请

         if((err = pci_request_regions(pdev, DRV_NAME))) {

                   DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting./n");

                   goto err_out_disable_pdev;

         }

 

         //探制设备的DMA能力。如果设备支持DMApci_set_dma_mask返回0

         pci_set_master(pdev);

         if((err = pci_set_dma_mask(pdev, 0xFFFFFFFFULL))) {

                   DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting./n");

                   goto err_out_free_res;

         }

 

         SET_MODULE_OWNER(netdev);

         SET_NETDEV_DEV(netdev, &pdev->dev);

 

         //映射设备对应的I/O。以后对设备寄存器的操作可以直接转换为对内存的操作

         nic->csr = ioremap(pci_resource_start(pdev, 0), sizeof(struct csr));

         if(!nic->csr) {

                   DPRINTK(PROBE, ERR, "Cannot map device registers, aborting./n");

                   err = -ENOMEM;

                   goto err_out_free_res;

         }

 

         if(ent->driver_data)

                   nic->flags |= ich;

         else

                   nic->flags &= ~ich;

 

         spin_lock_init(&nic->cb_lock);

         spin_lock_init(&nic->cmd_lock);

         //设置定时器。

         init_timer(&nic->watchdog);

         nic->watchdog.function = e100_watchdog;

         nic->watchdog.data = (unsigned long)nic;

         init_timer(&nic->blink_timer);

         nic->blink_timer.function = e100_blink_led;

         nic->blink_timer.data = (unsigned long)nic;

 

         //nic->mem建立线性DMA。只是在支持ethtool的时候才有用

if((err = e100_alloc(nic))) {

                   DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting./n");

                   goto err_out_iounmap;

         }

         //nic成员赋初值

         e100_get_defaults(nic);

         e100_hw_reset(nic);

         e100_phy_init(nic);

         //读取网卡的EEPROM。其中存放着网卡的MAC地址。

         //EEPROM是通过对I/O映射内存的操作实现的,即nic->csr

         if((err = e100_eeprom_load(nic)))

                   goto err_out_free;

         //设置netdev->dev_addr

         memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);

         if(!is_valid_ether_addr(netdev->dev_addr)) {

                   DPRINTK(PROBE, ERR, "Invalid MAC address from "

                            "EEPROM, aborting./n");

                   err = -EAGAIN;

                   goto err_out_free;

         }

 

         /* Wol magic packet can be enabled from eeprom */

         if((nic->mac >= mac_82558_D101_A4) &&

            (nic->eeprom[eeprom_id] & eeprom_id_wol))

                   nic->flags |= wol_magic;

 

         pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));

         //注册网络设备

         if((err = register_netdev(netdev))) {

                   DPRINTK(PROBE, ERR, "Cannot register net device, aborting./n");

                   goto err_out_free;

         }

 

         DPRINTK(PROBE, INFO, "addr 0x%lx, irq %d, "

                   "MAC addr %02X:%02X:%02X:%02X:%02X:%02X/n",

                   pci_resource_start(pdev, 0), pdev->irq,

                   netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],

                   netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);

 

         return 0;

 

err_out_free:

         e100_free(nic);

err_out_iounmap:

         iounmap(nic->csr);

err_out_free_res:

         pci_release_regions(pdev);

err_out_disable_pdev:

         pci_disable_device(pdev);

err_out_free_dev:

         pci_set_drvdata(pdev, NULL);

         free_netdev(netdev);

         return err;

}

 

http://blog.chinaunix.net/u1/51562/showart_405970.html

它主要完成:对网应对应的net net_device赋初值。并向内核调用register_netdev完成网络设备的注册,网络设备注册我们在上一节中已经说过,这里不再赘述。

看一下net_device中几个关键的函数:

//在设备将打开的时候,调用此函数

netdev->open = e100_open;

//在设备停用的时候调用此函数

netdev->stop = e100_close;

//设备发送数据的时候调用此函数

netdev->hard_start_xmit = e100_xmit_frame;

到此时,网卡的初始化工作已经完成了。之后就可以操作网卡了。

那网卡应该怎么使用呢?必须首先唤起网卡,即使之UP,例如 ifconfig eth0 up

此时,内核会根据接口名字“eth0”找到对应的net_device.然后调用 net_device-> open.即:e100_open

分析如下:

static int e100_open(struct net_device *netdev)

{

         struct nic *nic = netdev_priv(netdev);

         int err = 0;

 

         //网卡正在UP,关闭载波信号

         netif_carrier_off(netdev);

         if((err = e100_up(nic)))

                   DPRINTK(IFUP, ERR, "Cannot open interface, aborting./n");

         return err;

}

我们关心的是e100_up。跟踪如下:

static int e100_up(struct nic *nic)

{

         int err;

 

         //分配收包队列

         if((err = e100_rx_alloc_list(nic)))

                   return err;

         //分配控制队列

         if((err = e100_alloc_cbs(nic)))

                   goto err_rx_clean_list;

         //硬件初始化

         if((err = e100_hw_init(nic)))

                   goto err_clean_cbs;

         //多播

         e100_set_multicast_list(nic->netdev);

         //开始接收数据

         e100_start_receiver(nic);

         mod_timer(&nic->watchdog, jiffies);

         //注册中断例程

         if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ,

                   nic->netdev->name, nic->netdev)))

                   goto err_no_irq;

         //启用中断

         e100_enable_irq(nic);

         netif_wake_queue(nic->netdev);

         return 0;

 

err_no_irq:

         del_timer_sync(&nic->watchdog);

err_clean_cbs:

         e100_clean_cbs(nic);

err_rx_clean_list:

         e100_rx_clean_list(nic);

         return err;

}

在此函数中,我们可以看到,它主要完成了:接立接收环形DMA缓冲区。注册了中断处理函数

关于环形DMA缓冲区接立是由e100_rx_alloc_list(nic)完成的

static int e100_rx_alloc_list(struct nic *nic)

{

         struct rx *rx;

         // nic->params.rfds.count,接收缓存的总个数

         unsigned int i, count = nic->params.rfds.count;

         //rx_to_use:正在存在数据的位置

         //rx_to_clean:数据的初始为止。所以。数据的有限位置是从rx_to_userx_to_use

         nic->rx_to_use = nic->rx_to_clean = NULL;

         if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC)))

                   return -ENOMEM;

         memset(nic->rxs, 0, sizeof(struct rx) * count);

         //遍历并建立循环链表

         for(rx = nic->rxs, i = 0; i < count; rx++, i++) {

                   rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;

                   rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;

                   if(e100_rx_alloc_skb(nic, rx)) {

                            e100_rx_clean_list(nic);

                            return -ENOMEM;

                   }

         }

         //初始化起如位置为nic->rxs

         nic->rx_to_use = nic->rx_to_clean = nic->rxs;

 

         return 0;

}

为设备建立DMA映射的主函数为e100_rx_alloc_skb().分析如下:

static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)

{

         unsigned int rx_offset = 2; /* u32 align protocol headers */

         if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + rx_offset)))

                   return -ENOMEM;

         /* Align, init, and map the RFD. */

         rx->skb->dev = nic->netdev;

         //在数据存储区之前空出offset空间

skb_reserve(rx->skb, rx_offset);

//skb->data前部置RFD

         memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));

         //DMA内存映射,映射至skb->data

         rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,

                   RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);

 

         /* Link the RFD to end of RFA by linking previous RFD to

l        this one, and clearing EL bit of previous.  */

//初始化前一个skb中的控制信息

         if(rx->prev->skb) {

                   struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;

                   put_unaligned(cpu_to_le32(rx->dma_addr),

                            (u32 *)&prev_rfd->link);

                   wmb();

                   prev_rfd->command &= ~cpu_to_le16(cb_el);

                   pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,

                            sizeof(struct rfd), PCI_DMA_TODEVICE);

         }

 

         return 0;

}

在这个函数里,主要完成了:DMA环形链表的建立。在这里涉及到了一个重要的数据结构sk_buff.稍后再给出它的结构分析。在这里我们只要知道在skb->data里储存的是接收数据就OK了。值得一提的是,Intel 100M 网卡对接收数据的处理,跟平时遇到的网卡不一样,接收数据时会由接收控制RU写入接收信息,由此判断接收是否完全等信息。也就是我们在代码里面看到的rfd.所以,在skb->data对应的就是rfd+网络传过来的数据.

到这里,接收准备工作已经完成了。

 

分析一下网络设备数据结构的定义。

Linux素以优秀的网络管理能力而著称,linux为何具有这么高的效率?我们从网络设备的管理说起。

     Linux为何要对网络设备单独管理呢?这是因为。协议栈很多地方都会涉及到网络设备。小至IP地址的设置。大至IP路由的更新。都离不开高效的网络设备管理。将网络设备单独管理可以提高效率!

     每个网络设备,在linux中都会对应一个数据结构,net_device。 就从这个结构说起

Linux 2621中,对net_device定义如下:

struct net_device

{

     //设备的名称,例如常见的“eth0”

     char          name[IFNAMSIZ];

     //共享内存的起始,结束地址

     unsigned long      mem_end; /* shared mem end  */

     unsigned long      mem_start;    /* shared mem start    */

     //网络设备的I/O基地址

     unsigned long      base_addr;    /* device I/O address  */

     //被赋予的中断号

     unsigned int       irq;     /* device IRQ number   */

     //在多端口设备上使用哪一个端口

     unsigned char      if_port; /* Selectable AUI, TP,..*/

     //为设备分配的DMA通道

     unsigned char      dma;     /* DMA channel         */

     //设备的状态

     unsigned long      state;

     // 下一个net_device

     struct net_device  *next;

     //初始化函数。

     int           (*init)(struct net_device *dev);

     struct net_device  *next_sched;

 

     /* Interface index. Unique device identifier   */

     //设备在内核中对应的序号

     int           ifindex;

     int           iflink;

 

     //获得接口状态的函数指针

     struct net_device_stats* (*get_stats)(struct net_device *dev);

     struct iw_statistics*  (*get_wireless_stats)(struct net_device *dev);

 

     struct iw_handler_def * wireless_handlers;

     struct ethtool_ops *ethtool_ops;

     //传输状态。检查传输是否被锁住

     unsigned long      trans_start;  /* Time (in jiffies) of last Tx  */

     //最使使用的时间

     unsigned long      last_rx; /* Time of last Rx */

     //接口标志

     unsigned short         flags;   /* interface flags (a la BSD)    */

     unsigned short         gflags;

        unsigned short          priv_flags; /* Like 'flags' but invisible to userspace. */

        unsigned short          unused_alignment_fixer; /* Because we need priv_flags,

                                                         * and we want to be 32-bit aligned.

                                                         */

 

     unsigned      mtu; /* interface MTU value      */

     unsigned short         type;    /* interface hardware type  */

     unsigned short         hard_header_len;   /* hardware hdr length */

     void          *priv;   /* pointer to private data  */

 

     struct net_device  *master; /* Pointer to master device of a group,

                         * which this device is member of.

                         */

 

     /* Interface address info. */

     unsigned char      broadcast[MAX_ADDR_LEN];    /* hw bcast add    */

     unsigned char      dev_addr[MAX_ADDR_LEN]; /* hw address */

     unsigned char      addr_len; /* hardware address length  */

 

     struct dev_mc_list *mc_list; /* Multicast mac addresses  */

     int           mc_count; /* Number of installed mcasts    */

     int           promiscuity;

     int           allmulti;

 

     int           watchdog_timeo;

     struct timer_list  watchdog_timer;

 

     /* Protocol specific pointers */

    

     void               *atalk_ptr;   /* AppleTalk link */

     void          *ip_ptr; /* IPv4 specific data  */ 

     void                    *dn_ptr;        /* DECnet specific data */

     void                    *ip6_ptr;       /* IPv6 specific data */

     void          *ec_ptr; /* Econet specific data */

     void          *ax25_ptr;    /* AX.25 specific data */

 

     struct list_head   poll_list;    /* Link to poll list   */

     int           quota;

     int           weight;

 

     struct Qdisc       *qdisc;

     struct Qdisc       *qdisc_sleeping;

     struct Qdisc       *qdisc_ingress;

     struct list_head   qdisc_list;

     unsigned long      tx_queue_len; /* Max frames per queue allowed */

 

     /* ingress path synchronizer */

     spinlock_t         ingress_lock;

     /* hard_start_xmit synchronizer */

     spinlock_t         xmit_lock;

     /* cpu id of processor entered to hard_start_xmit or -1,

        if nobody entered there.

      */

     int           xmit_lock_owner;

     /* device queue lock */

     spinlock_t         queue_lock;

     /* Number of references to this device */

     atomic_t      refcnt;

     /* delayed register/unregister */

     struct list_head   todo_list;

     /* device name hash chain */

     struct hlist_node  name_hlist;

     /* device index hash chain */

     struct hlist_node  index_hlist;

 

     /* register/unregister state machine */

     enum { NETREG_UNINITIALIZED=0,

            NETREG_REGISTERING,  /* called register_netdevice */

            NETREG_REGISTERED,   /* completed register todo */

            NETREG_UNREGISTERING,     /* called unregister_netdevice */

            NETREG_UNREGISTERED, /* completed unregister todo */

            NETREG_RELEASED,     /* called free_netdev */

     } reg_state;

 

     /* Net device features */

     int           features;

#define NETIF_F_SG     1    /* Scatter/gather IO. */

#define NETIF_F_IP_CSUM     2    /* Can checksum only TCP/UDP over IPv4. */

#define NETIF_F_NO_CSUM     4    /* Does not require checksum. F.e. loopack. */

#define NETIF_F_HW_CSUM     8    /* Can checksum all the packets. */

#define NETIF_F_HIGHDMA     32   /* Can DMA to high memory. */

#define NETIF_F_FRAGLIST    64   /* Scatter/gather IO. */

#define NETIF_F_HW_VLAN_TX  128  /* Transmit VLAN hw acceleration */

#define NETIF_F_HW_VLAN_RX  256  /* Receive VLAN hw acceleration */

#define NETIF_F_HW_VLAN_FILTER   512  /* Receive filtering on VLAN */

#define NETIF_F_VLAN_CHALLENGED  1024 /* Device cannot handle VLAN packets */

#define NETIF_F_TSO         2048 /* Can offload TCP/IP segmentation */

#define NETIF_F_LLTX        4096 /* LockLess TX */

 

     /* Called after device is detached from network. */

     void          (*uninit)(struct net_device *dev);

     /* Called after last user reference disappears. */

     void          (*destructor)(struct net_device *dev);

 

     /* Pointers to interface service routines.     */

     //打开函数指针

     int           (*open)(struct net_device *dev);

     //设备停用时调用此函数

     int           (*stop)(struct net_device *dev);

     //初始化数据包的传输

     int           (*hard_start_xmit) (struct sk_buff *skb,

                                struct net_device *dev);

#define HAVE_NETDEV_POLL

     //轮询函数

     int           (*poll) (struct net_device *dev, int *quota);

     //建立硬件头信息

     int           (*hard_header) (struct sk_buff *skb,

                            struct net_device *dev,

                            unsigned short type,

                            void *daddr,

                            void *saddr,

                            unsigned len);

     //ARP解析之后,重构头部

     int           (*rebuild_header)(struct sk_buff *skb);

#define HAVE_MULTICAST     

     //多播支持函数    

     void          (*set_multicast_list)(struct net_device *dev);

#define HAVE_SET_MAC_ADDR         

     int           (*set_mac_address)(struct net_device *dev,

                               void *addr);

#define HAVE_PRIVATE_IOCTL

     int           (*do_ioctl)(struct net_device *dev,

                           struct ifreq *ifr, int cmd);

#define HAVE_SET_CONFIG

     int           (*set_config)(struct net_device *dev,

                             struct ifmap *map);

#define HAVE_HEADER_CACHE

     int           (*hard_header_cache)(struct neighbour *neigh,

                                 struct hh_cache *hh);

     void          (*header_cache_update)(struct hh_cache *hh,

                                   struct net_device *dev,

                                   unsigned char *  haddr);

#define HAVE_CHANGE_MTU

     int           (*change_mtu)(struct net_device *dev, int new_mtu);

 

#define HAVE_TX_TIMEOUT

     void          (*tx_timeout) (struct net_device *dev);

 

     void          (*vlan_rx_register)(struct net_device *dev,

                                struct vlan_group *grp);

     void          (*vlan_rx_add_vid)(struct net_device *dev,

                               unsigned short vid);

     void          (*vlan_rx_kill_vid)(struct net_device *dev,

                                unsigned short vid);

 

     int           (*hard_header_parse)(struct sk_buff *skb,

                                 unsigned char *haddr);

     int           (*neigh_setup)(struct net_device *dev, struct neigh_parms *);

     int           (*accept_fastpath)(struct net_device *, struct dst_entry*);

#ifdef CONFIG_NETPOLL

     int           netpoll_rx;

#endif

#ifdef CONFIG_NET_POLL_CONTROLLER

     void                    (*poll_controller)(struct net_device *dev);

#endif

 

     /* bridge stuff */

     //对应的网桥端口(以后分析)

     struct net_bridge_port *br_port;

 

#ifdef CONFIG_NET_DIVERT

     /* this will get initialized at each interface type init routine */

     struct divert_blk  *divert;

#endif /* CONFIG_NET_DIVERT */

 

     /* class/net/name entry */

     struct class_device    class_dev;

     /* how much padding had been added by alloc_netdev() */

     int padded;

}

晕,太多的成员。太庞大了。不要紧,等到要使用到相应成员的时候再来解释好了。

注意到这么庞大的结构中,有个成员叫: struct net_device  *next,呵呵,很熟悉吧,就是用它来建立网络设备的链表。

每一个网络设备启动的时候,都会调用register_netdev() (drivers/net/net_init.c)

跟踪这个函数:

int register_netdev(struct net_device *dev)

{

     int err;

 

     rtnl_lock();

 

     /*

      *   If the name is a format string the caller wants us to

      *   do a name allocation

      */

      

     if (strchr(dev->name, '%'))

     {

         err = dev_alloc_name(dev, dev->name);

         if (err < 0)

              goto out;

     }

    

     /*

      *   Back compatibility hook. Kill this one in 2.5

      */

    

     if (dev->name[0]==0 || dev->name[0]==' ')

     {

         err = dev_alloc_name(dev, "eth%d");

         if (err < 0)

              goto out;

     }

 

     err = register_netdevice(dev);

 

out:

     rtnl_unlock();

     return err;

}

 

跟踪至: register_netdevice(struct net_device *dev)  (net/core/dev.c)

int register_netdevice(struct net_device *dev)

{

     struct hlist_head *head;

     struct hlist_node *p;

     int ret;

 

     BUG_ON(dev_boot_phase);

     ASSERT_RTNL();

 

     /* When net_device's are persistent, this will be fatal. */

     BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);

 

     spin_lock_init(&dev->queue_lock);

     spin_lock_init(&dev->xmit_lock);

     dev->xmit_lock_owner = -1;

#ifdef CONFIG_NET_CLS_ACT

     spin_lock_init(&dev->ingress_lock);

#endif

 

     ret = alloc_divert_blk(dev);

     if (ret)

         goto out;

 

     dev->iflink = -1;

 

     /* Init, if this function is available */

     //如果dev -> init 被赋值,那么调用此函数

     if (dev->init) {

         ret = dev->init(dev);

         if (ret) {

              if (ret > 0)

                   ret = -EIO;

              goto out_err;

         }

     }

 

     //判断name 是否合法

     if (!dev_valid_name(dev->name)) {

         ret = -EINVAL;

         goto out_err;

     }

     //为此设备分配一个index

     dev->ifindex = dev_new_index();

     if (dev->iflink == -1)

         dev->iflink = dev->ifindex;

 

     /* Check for existence of name */

 

     //所有网络设备,以名字作为哈希主键存在dev_name_head中,该变量是一个哈希数组

     //找到该名字对应的链表

     //如果内核中已经含有此名字的网络设备,出错退出

     head = dev_name_hash(dev->name);

     hlist_for_each(p, head) {

         struct net_device *d

              = hlist_entry(p, struct net_device, name_hlist);

         if (!strncmp(d->name, dev->name, IFNAMSIZ)) {

              ret = -EEXIST;

              goto out_err;

         }

     }

 

     /* Fix illegal SG+CSUM combinations. */

     if ((dev->features & NETIF_F_SG) &&

         !(dev->features & (NETIF_F_IP_CSUM |

                     NETIF_F_NO_CSUM |

                     NETIF_F_HW_CSUM))) {

         printk("%s: Dropping NETIF_F_SG since no checksum feature./n",

                dev->name);

         dev->features &= ~NETIF_F_SG;

     }

 

     /*

      *   nil rebuild_header routine,

      *   that should be never called and used as just bug trap.

      */

 

     //rebuild_header赋默认值

     if (!dev->rebuild_header)

         dev->rebuild_header = default_rebuild_header;

 

     /*

      *   Default initial state at registry is that the

      *   device is present.

      */

 

     set_bit(__LINK_STATE_PRESENT, &dev->state);

 

     dev->next = NULL;

     dev_init_scheduler(dev);

     write_lock_bh(&dev_base_lock);

     //初始化的时候,有struct net_device **dev_tail = &dev_base;

     //这段代码的意思实际就是:把dev加入dev_base为首结点队链表的尾部

     *dev_tail = dev;

     dev_tail = &dev->next;

     //把此结点加入到以名字为哈希主键的链表数组dev_name_head

     hlist_add_head(&dev->name_hlist, head);

     //把此结点加到以序号为主键的链表数组dev_index_head

     hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));

     dev_hold(dev);

     dev->reg_state = NETREG_REGISTERING;

     write_unlock_bh(&dev_base_lock);

 

     /* Notify protocols, that a new device appeared. */

     //在通知链表上发送事件

     notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);

 

     /* Finish registration after unlock */

     net_set_todo(dev);

     ret = 0;

 

out:

     return ret;

out_err:

     free_divert_blk(dev);

     goto out;

}

 

从此可以看出。新加入一个设备时,会插入三个位置:以名字为哈希值组织的dev_name_head ,以序号为主链的哈希数组dev_index_head.还有dev_base.它为快速查找网络设备提供了基础。事实上。在内核中,经常要根据index找到dev. 或者根据name找到dev.我们遇到的时候再分析

 

到现在,我们可以在内核中顺藤摸瓜的找到每一个网络设备了。

还有很重要的。设备更改了配置,要怎么通知跟他相关的子系统呢?例如,网卡更新了IP,如何使路由得到更新?

接着往下看:

注意到上面注册代码中所调用的一个函数notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev).

该函数的作用是,在通知链表上netdev_chain上发送NETDEV_REGISTER消息,所有在与该通知链表关联的子系统都可以收到此消息。以此,可以快速的更新整个系统的配置消息。

以路由子系统为例,来讲述该过程:

IPV4子系统加载的时候,加调用ip_init(),接着调用fib_init(),然后再调用ip_fib_init()

跟踪一下此函数:

void __init ip_fib_init(void)

{

#ifndef CONFIG_IP_MULTIPLE_TABLES

     ip_fib_local_table = fib_hash_init(RT_TABLE_LOCAL);

     ip_fib_main_table  = fib_hash_init(RT_TABLE_MAIN);

#else

     fib_rules_init();

#endif

 

     register_netdevice_notifier(&fib_netdev_notifier);

     register_inetaddr_notifier(&fib_inetaddr_notifier);

}

register_netdevice_notifier是做什么的呢?往下跟踪:

int register_netdevice_notifier(struct notifier_block *nb)

{

     struct net_device *dev;

     int err;

 

     rtnl_lock();

     //注册通知链

     err = notifier_chain_register(&netdev_chain, nb);

     if (!err) {

         for (dev = dev_base; dev; dev = dev->next) {

              nb->notifier_call(nb, NETDEV_REGISTER, dev);

 

              if (dev->flags & IFF_UP)

                   nb->notifier_call(nb, NETDEV_UP, dev);

         }

     }

     rtnl_unlock();

     return err;

}

呵呵,它在netdev_chain上注册了通知链,当此链上有事件发生时,会调用fib_netdev_notifiers中的相关信息处理,看一下fib_netdev_notifier的信息:

 

struct notifier_block fib_netdev_notifier = {

     .notifier_call =fib_netdev_event,

};

 

OK,现在越来越具体了,如果netdev_chain有事件,会调用fib_netdev_event处理。继续跟踪:

static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)

{

     struct net_device *dev = ptr;

     struct in_device *in_dev = __in_dev_get(dev);

     //设备注销

     if (event == NETDEV_UNREGISTER) {

         fib_disable_ip(dev, 2);

         return NOTIFY_DONE;

     }

 

     if (!in_dev)

         return NOTIFY_DONE;

 

     switch (event) {

     //设备UP

     case NETDEV_UP:

         for_ifa(in_dev) {

              fib_add_ifaddr(ifa);

         } endfor_ifa(in_dev);

#ifdef CONFIG_IP_ROUTE_MULTIPATH

         fib_sync_up(dev);

#endif

         rt_cache_flush(-1);

         break;

     //设备DOWN

     case NETDEV_DOWN:

         fib_disable_ip(dev, 0);

         break;

     //设备参数改变

     case NETDEV_CHANGEMTU:

     case NETDEV_CHANGE:

         rt_cache_flush(0);

         break;

     }

     return NOTIFY_DONE;

}

 

路由部份的代码将在后续的笔记中给出。至此,整个网络设备的架构非常的清晰了!

你可能感兴趣的:(linux协议栈之链路层上的数据传输之网卡驱动)