DPDK网卡设备加载流程

1、bus总线网卡设备扫描

函数在目录dpdk/lib/librte_eal/common/eal_common_bus.c
初始化流程在EAL环境初始化调用 ret_bus_scan函数完成的。内部会调用所有bus->scan接口的目的是扫描所有bus下注册的设备。
下面有bus设备注册、网卡驱动注册、以PCI设备的注册来详细说明注册流程。

1.1 bus设备注册

dpdk/lib/librte_eal/include/rte_bus.h
是由宏RTE_REGISTER_BUS,在程序启动时完成的注册,目前查询宏使用的地方如下。

/**
 * Helper for Bus registration.
 * The constructor has higher priority than PMD constructors.
 */
#define RTE_REGISTER_BUS(nm, bus) \
RTE_INIT_PRIO(businitfn_ ##nm, BUS) \
{\
	(bus).name = RTE_STR(nm);\
	rte_bus_register(&bus); \
}
RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);/*drivers/bus/dpaa/dpaa_bus.c*/
RTE_REGISTER_BUS(FSLMC_BUS_NAME, rte_fslmc_bus.bus)/*drivers/bus/fslmc/fslmc_bus.c*/
RTE_REGISTER_BUS(IFPGA_BUS_NAME, rte_ifpga_bus);/*drivers/bus/ifpga/ifpga_bus.c*/
RTE_REGISTER_BUS(pci, rte_pci_bus.bus);/*drivers/bus/pci/pci_common.c*/
RTE_REGISTER_BUS(vdev, rte_vdev_bus)/*drivers/bus/vdev/vdev.c*/
RTE_REGISTER_BUS(vmbus, rte_vmbus_bus.bus);/*drivers/bus/vmbus/vmbus_common.c*/

1.2 网卡全局rte_pci_bus的注册

我们重点学习网卡设备的注册,网卡设备全部注册到全局变量rte_bus_list链表上,遍历全局变量rte_bus_list,调用对应的回调函数scan,我看查下一下注册代码及通过链表找到pci的bus。

struct rte_pci_bus rte_pci_bus = {
	.bus = {
		.scan = rte_pci_scan,
		.probe = pci_probe,
		.find_device = pci_find_device,
		.plug = pci_plug,
		.unplug = pci_unplug,
		.parse = pci_parse,
		.dma_map = pci_dma_map,
		.dma_unmap = pci_dma_unmap,
		.get_iommu_class = rte_pci_get_iommu_class,
		.dev_iterate = rte_pci_dev_iterate,
		.hot_unplug_handler = pci_hot_unplug_handler,
		.sigbus_handler = pci_sigbus_handler,
	},
	.device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list),
	.driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list),
};
RTE_REGISTER_BUS(pci, rte_pci_bus.bus);
/*打印对应pci 内容*/
(gdb) p rte_bus_list.tqh_first[0].next.tqe_next[0].next.tqe_next[0].next.tqe_next[0]
$7 = {
  next = {
    tqe_next = 0x399cd40 <rte_vdev_bus>,
    tqe_prev = 0x399cb40 <rte_ifpga_bus>
  },
  name = 0x33f0f9f "pci", /*注册时的name*/
  scan = 0x485183 <rte_pci_scan>,
  probe = 0x489590 <rte_pci_probe>,/*最新的代码是pci_probe*/
  find_device = 0x489a35 <pci_find_device>,
  plug = 0x489cf8 <pci_plug>,
  unplug = 0x489d26 <pci_unplug>,
  parse = 0x489835 <pci_parse>,
  dma_map = 0x489d90 <pci_dma_map>,
  dma_unmap = 0x489e76 <pci_dma_unmap>,
  conf = {
    scan_mode = RTE_BUS_SCAN_BLACKLIST
  },
  get_iommu_class = 0x489fc8 <rte_pci_get_iommu_class>,
  dev_iterate = 0x488dc0 <rte_pci_dev_iterate>,
  hot_unplug_handler = 0x489bd6 <pci_hot_unplug_handler>,
  sigbus_handler = 0x489c70 <pci_sigbus_handler>
}

1.3 网卡驱动注册

网卡驱动注册宏在drivers/bus/pci/rte_bus_pci.h文件中具体如下:

/*讲驱动PMD添加到全局驱动链表中rte_pci_bus.driver_list,在probe函数中会使用*/
void rte_pci_register(struct rte_pci_driver *driver);

/** Helper for PCI device registration from driver (eth, crypto) instance */
#define RTE_PMD_REGISTER_PCI(nm, pci_drv) \
RTE_INIT(pciinitfn_ ##nm) \
{\
	(pci_drv).driver.name = RTE_STR(nm);\
	rte_pci_register(&pci_drv); \
} \

下面是i40e网卡注册,主要是有个全局变量pci_id_i40e_map,存储的是网卡的id信息;在probe函数中通过通过struct rte_pci_id 中的vendor_id、device_id来匹配到当前的驱动。

static struct rte_pci_driver rte_i40e_pmd = {
	.id_table = pci_id_i40e_map, /*是个全局遍量通过struct rte_pci_id 中的vendor_id、device_id来匹配到当前的驱动*/
	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
	.probe = eth_i40e_pci_probe,
	.remove = eth_i40e_pci_remove,
};
RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");

举例如下:
我当前的X710网卡的vendor和device信息如下,正好可以匹配到pci_id_i40e_map表中。

[root@domain 0000:00:07.0]# cat vendor
0x8086
[root@domain 0000:00:07.0]# cat device
0x1572

pci_id_i40e_map表中对应的vendor和device信息如下:

/*pci_id_i40e_map{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) }*/
/* Vendor ID */
#define I40E_INTEL_VENDOR_ID		0x8086
/* Device IDs */
#define I40E_DEV_ID_SFP_XL710		0x1572

通过rte_pci_bus.driver_list遍历到rte_i40e_pmd并打印rte_i40e_pmd信息如下:

(gdb)  p rte_pci_bus.driver_list.tqh_first.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next
$1 = (struct rte_pci_driver *) 0x39b3e40 <rte_i40e_pmd>
(gdb) p *$1
$3 = {
  next = {
    tqe_next = 0x39b3ec0 <rte_i40evf_pmd>,
    tqe_prev = 0x39b3500 <rte_hns3vf_pmd>
  },
  driver = {
    next = {
      tqe_next = 0x0,
      tqe_prev = 0x0
    },
    name = 0x34abd6e "net_i40e",
    alias = 0x0
  },
  bus = 0x399dc40 <rte_pci_bus>,
  probe = 0x1561c12 <eth_i40e_pci_probe>,
  remove = 0x1561e4e <eth_i40e_pci_remove>,
  dma_map = 0x0,
  dma_unmap = 0x0,
  id_table = 0x34aae20 <pci_id_i40e_map>,
  drv_flags = 9
}

1.4 网卡设备扫描

相关调用关系如下:

rte_bus_scan()->rte_pci_scan()->pci_scan_one() /*drivers/bus/pci/linux/pci.c*/

扫描目录“/sys/bus/pci/devices,”的网卡设备,根据我们设置的网卡黑名单或白名单进行过滤,符合条件的设备,调用pci_scan_one实例化网卡设备rte_pci_device,并进行一些数据的填充(rte_pci_addr(网卡pci编号),struct rte_pci_id (网卡身份信息vendor_id、device_id来确定使用那个网卡驱动)max_vfs(sriov支持最大数量)、numa_node)。
比较重要的是当前网卡的驱动类型enum rte_kernel_driver kdrv。

[root@domain 0000:00:07.0]# ls driver -l /*当前网卡驱动是igb_uio*/
lrwxrwxrwx 1 root root 0 75 12:00 driver -> ../../../bus/pci/drivers/igb_uio

网卡物理资源初始化pci_parse_sysfs_resource来对资源进行填充mem_resource[6]

[root@domain 0000:00:07.0]# cat resource
resource      resource0     resource0_wc  resource3     resource3_wc
[root@domain 0000:00:07.0]# cat resource
0x00000000fc800000 0x00000000fcffffff 0x000000000014220c
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x00000000fd008000 0x00000000fd00ffff 0x000000000014220c
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x00000000fea80000 0x00000000feafffff 0x000000000004e200
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000

并且将实例化后的dev 添加到rte_pci_bus.device_list链表中,下面有通过全局找到我们的网卡设备。
pci_scan_one 函数只是填充网卡的一些基本信息,并没有申请相应的资源,资源是由函数rte_bus_probe完成的
1、查询当前环境中的PCI设备

root@domain 0000:00:07.0]# cd /sys/bus/pci/devices/
[root@domain devices]# ls /*当前目录的pci设备编码如下*/
0000:00:00.0  0000:00:01.0  0000:00:01.1  0000:00:01.2  0000:00:01.3  0000:00:02.0  0000:00:03.0  0000:00:04.0  0000:00:05.0  0000:00:06.0  0000:00:07.0  0000:00:08.0

2、通过lspci | grep Eth ,我们可以知道哪些是Eth网卡设备

[root@domain devices]# lspci | grep Eth /*但是只有四个数据以太网卡设备*/
00:03.0 Ethernet controller: Red Hat, Inc. Virtio network device
00:04.0 Ethernet controller: Red Hat, Inc. Virtio network device
00:06.0 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 02)
00:07.0 Ethernet controller: Intel Corporation Ethernet Controller X710 for 10GbE SFP+ (rev 02)

3、以PCi 0000:00:07.0为例,查看目录下的文件信息

root@domain 0000:00:07.0]# ls /*读取当前目录下文件,用来填充实例化后rte_pci_device 响应的字段*/
broken_parity_status  consistent_dma_mask_bits  dma_mask_bits    enable         local_cpulist  modalias  numa_node  rescan    resource0     resource3_wc  subsystem_device  uio class                 d3cold_allowed            driver           firmware_node  local_cpus     msi_bus   power      reset     resource0_wc  rom           subsystem_vendor  vendor
config                device                    driver_override  irq            max_vfs        msi_irqs  remove     resource  resource3     subsystem     uevent

4、通过全局rte_pci_bus.device_list找到对应的网卡0000:00:07.0,gdb 打印处填充内容。

(gdb) p rte_pci_bus.device_list.tqh_first.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next.next.tqe_next[0]
$6 = {
  next = {  /*用于在全局网卡rte_pci_bus.device_list链表挂接使用*/
    tqe_next = 0x4d08b70,
    tqe_prev = 0x4cee8b0
  },
  device = {
    next = {
      tqe_next = 0x0,
      tqe_prev = 0x0
    },
    name = 0x4d02330 "0000:00:07.0",  /*指向当前结构体的name字段*/
    driver = 0x39b3e50 <rte_i40e_pmd+16>,/*对应的PMD驱动是i40E*/
    bus = 0x399dc40 <rte_pci_bus>,      /*全局的PCI bus 结构体*/
    numa_node = 0,
    devargs = 0x0
  },
  addr = {/*pci编号分割后的赋值0000:00:07.0*/
    domain = 0,
    bus = 0 '\000',
    devid = 7 '\a',
    function = 0 '\000'
  },
  id = {
    class_id = 131072, /*对应系统文件的数值/sys/bus/pci/devices/0000:00:07.0/class*/
    vendor_id = 32902, /*对应系统文件的数值/sys/bus/pci/devices/0000:00:07.0/vendor*/
    device_id = 5490,/*对应系统文件的数值/sys/bus/pci/devices/0000:00:07.0/device*/
    subsystem_vendor_id = 32902,/*对应系统文件的数值/sys/bus/pci/devices/0000:00:07.0/subsystem_vendor*/
    subsystem_device_id = 0/*对应系统文件的数值/sys/bus/pci/devices/0000:00:07.0/subsystem_device*/
  },
  mem_resource = {{ /*对应资源文件中的内容/sys/bus/pci/devices/0000:00:07.0/resource*/
      phys_addr = 4236247040,/*物理地址和len是在scan的时候读取资源文件填充的*/
      len = 8388608,  /*对应资源的文件大小 /sys/bus/pci/devices/0000:00:07.0/resource0*/
      addr = 0x7f7c0080d000 /*虚拟地址是在probe函数调用的时候填充的*/
    }, {
      phys_addr = 0,
      len = 0,
      addr = 0x0
    }, {
      phys_addr = 0,
      len = 0,
      addr = 0x0
    }, {
      phys_addr = 4244668416,
      len = 32768,  /*对应资源的文件大小 /sys/bus/pci/devices/0000:00:07.0/resource3*/
      addr = 0x7f7c0100d000
    }, {
      phys_addr = 0,
      len = 0,
      addr = 0x0
    }, {
      phys_addr = 0,
      len = 0,
      addr = 0x0
    }},
  intr_handle = {
    {
      vfio_dev_fd = 25,
      uio_cfg_fd = 25 /*uio配置文件描述符号,对应文件/sys/class/uio/uio1/device/config*/
    },
    fd = 24, /*文件描述符,对应文件 /dev/uio1*/
    type = RTE_INTR_HANDLE_UIO,/*表示当前使用的是 igb——uio 驱动*/
    max_intr = 0,
    nb_efd = 0,
    efd_counter_size = 0 '\000',
    efds = {0 <repeats 512 times>},
    elist = {{
        status = 0,
        fd = 0,
        epfd = 0,
        epdata = {
          event = 0,
          data = 0x0,
          cb_fun = 0x0,
          cb_arg = 0x0
        }
      } <repeats 512 times>},
    intr_vec = 0x0
  },
  driver = 0x39b3e40 <rte_i40e_pmd>,
  max_vfs = 0,/*sriov VF的个数*/
  kdrv = RTE_KDRV_IGB_UIO, /*表示当前使用的是 igb_uio 驱动*/
  name = "0000:00:07.0\000\000\000\000\000",/*当前网卡pci编号*/
  vfio_req_intr_handle = {
    {
      vfio_dev_fd = 0,
      uio_cfg_fd = 0
    },
    fd = 0,
    type = RTE_INTR_HANDLE_UNKNOWN,
    max_intr = 0,
    nb_efd = 0,
    efd_counter_size = 0 '\000',
    efds = {0 <repeats 512 times>},
    elist = {{
        status = 0,
        fd = 0,
        epfd = 0,
        epdata = {
          event = 0,
          data = 0x0,
          cb_fun = 0x0,
          cb_arg = 0x0
        }
      } <repeats 512 times>},
    intr_vec = 0x0
  }
}

2、网卡硬件资源映射到用户态

DPDK通过内核的UIO机制将硬件资源(MMIO, IO,interrupt)映射到用户态。可以参考博客

2.1 通过igb_uio驱动来映射到用户态

uio驱动在目录kernel/linux/igb_uio/igb_uio.c中。在dpdk EAL初始化中通过调用rte_bus_probe(目录文件lib/librte_eal/common/eal_common_bus.c)函数来完成的。下面我们来简单分析一下。
在用户态pci driver probe过程中,如果driver设置了RTE_PCI_DRV_NEED_MAPPING标记,就会执行地址映射,对应函数调用如下:

rte_bus_probe()->
   /*遍历rte_pci_bus.device_list链表调用pci_probe_all_drivers */
   pci_probe(drivers/bus/pci/pci_common.c)->
   /*遍历全局网卡驱动链表rte_pci_bus.driver_list链表和当前网卡的struct rte_pci_id 信息来匹配确定相应的的驱动PMD,驱动已经在设备启动是注册完成*/
   pci_probe_all_drivers()->rte_pci_probe_one_driver()->
       rte_pci_match()/*网卡驱动匹配函数*/
       rte_pci_map_device()/*通过igb_uio 来给当前设备映射资源mem_resource[6]*/
           pci_uio_map_resource()->pci_uio_alloc_resource()
                /*读取目录/sys/bus/pci/devices/0000:00:07.0/uio下目录uio1,获取uio num = 1,
                 *进入uio1目录读取dev文件获取主次设备号,创建字符设备,具体看下面链接。*/
                pci_get_uio_dev()
                /*scan模块读取mem_resource资源物理地址和大小,这里为资源mmap虚拟地址*/
                pci_uio_map_resource_by_index()
       dr->probe=eth_i40e_pci_probe() /*网卡驱动probe*/
  • pci_get_uio_dev 读取主次设备号及uio编号,创建/dev/uio1 字符设备。
[root@domain uio]# pwd
/sys/bus/pci/devices/0000:00:07.0/uio
[root@domain uio]# ls
uio1         /*1、读取uio文件,来获取uio_num = 1*/
[root@domain uio]# cd uio1/
[root@domain uio1]# ls
dev  device  event  maps  name  power  subsystem  uevent  version
[root@domain uio1]# cat dev
242:1  /* 2、create the char device "mknod /dev/uioX c major minor" 获取主设备和次设备好,创建字符设备
	snprintf(filename, sizeof(filename), "/dev/uio%u", uio_num);
	dev = makedev(major, minor);
	ret = mknod(filename, S_IFCHR | S_IRUSR | S_IWUSR, dev);*/
[root@domain uio1]# ls /sys/dev/char/ -l | grep uio
lrwxrwxrwx 1 root root 0 712 13:45 242:0 -> ../../devices/pci0000:00/0000:00:06.0/uio/uio0
lrwxrwxrwx 1 root root 0 712 13:45 242:1 -> ../../devices/pci0000:00/0000:00:07.0/uio/uio1
lrwxrwxrwx 1 root root 0 712 13:45 242:2 -> ../../devices/pci0000:00/0000:00:04.0/uio/uio2
[root@domain uio1]# ls /dev/uio1 /*mknod*/
/dev/uio1

2.2 网卡资源链表管理

全局变量rte_uio_tailq是用来的网卡通过igb驱动map后的资源链表。由函数pci_uio_map_resource_by_index来完成
在大页中找一个靠近结尾的地址,根据mem长度使用mmap(pci_map_resource)进行映射。

    /* try mapping somewhere close to the end of hugepages */
	if (pci_map_addr == NULL)
		pci_map_addr = pci_find_max_end_va();

	mapaddr = pci_map_resource(pci_map_addr, fd, 0,
			(size_t)dev->mem_resource[res_idx].len, 0)
(gdb) p rte_uio_tailq.head[0] /*全局资源尾队列头*/
$7 = {
  tailq_head = {
    tqh_first = 0x7f577fd9e200,
    tqh_last = 0x7f577fbb6c00
  },
  name = "UIO_RESOURCE_LIST", '\000' <repeats 14 times>
}
/*查询到PCI:0000:00:07.0对应mapped_pci_resource资源,gdb输出map资源信息*/
(gdb) p (struct mapped_pci_resource)rte_uio_tailq.head->tailq_head.tqh_first.next.tqe_next.next.tqe_next.next.tqe_next
$18 = {
  next = {
    tqe_next = 0x0,
    tqe_prev = 0x7f577fd83dc0
  },
  pci_addr = {
    domain = 0,
    bus = 0 '\000',
    devid = 7 '\a',
    function = 0 '\000'
  },
  path = "/dev/uio1",
  nb_maps = 2,
  maps = {{
      addr = 0x7f7c0080d000,
      path = 0x7f577fbb5b80 "/sys/bus/pci/devices/0000:00:07.0/resource0",
      offset = 0,
      size = 8388608,
      phaddr = 4236247040
    }, {
      addr = 0x7f7c0100d000,
      path = 0x7f577fbb4b00 "/sys/bus/pci/devices/0000:00:07.0/resource3",
      offset = 0,
      size = 32768,
      phaddr = 4244668416
    }, },
  msix_table = {
    bar_index = 0,
    offset = 0,
    size = 0
  }
}
(gdb)

DPDK网卡设备加载流程_第1张图片

2.3 eth_i40e_pci_probe 代码待分析

retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
		sizeof(struct i40e_adapter),
		eth_dev_pci_specific_init, pci_dev,
		eth_i40e_dev_init, NULL)
(gdb) p rte_eth_devices[2]
$30 = {
  rx_pkt_burst = 0x170be9b <i40e_recv_scattered_pkts_vec>, /*收包函数*/
  tx_pkt_burst = 0x16a47cb <i40e_xmit_pkts>, /*发包函数*/
  tx_pkt_prepare = 0x16c2900 <i40e_prep_pkts>,/*发包前的对报文的预处理函数*/
  data = 0x7f577fda2d80, /*对应全局共享数据rte_eth_dev_shared_data->data[2]*/
  process_private = 0x0,
  dev_ops = 0x398fa20 <i40e_eth_dev_ops>, /*驱动ops函数集合*/
  device = 0x4cfba20,  
  intr_handle = 0x4cfbb00,
  link_intr_cbs = {
    tqh_first = 0x0,
    tqh_last = 0x3f50700 <rte_eth_devices+33216>
  },
  post_rx_burst_cbs = {0x0 <repeats 1024 times>},
  pre_tx_burst_cbs = {0x0 <repeats 1024 times>},
  state = RTE_ETH_DEV_ATTACHED,
  security_ctx = 0x0,
  reserved_64s = {0, 0, 0, 0},
  reserved_ptrs = {0x0, 0x0, 0x0, 0x0}
}
(gdb) p rte_eth_dev_shared_data->data[2]
$22 = {
  name = "0000:00:07.0",
  rx_queues = 0x7f577fb3ed00,
  tx_queues = 0x7f577fcfd880,
  nb_rx_queues = 1,
  nb_tx_queues = 3,
  sriov = {
    active = 0 '\000',
    nb_q_per_pool = 0 '\000',
    def_vmdq_idx = 0,
    def_pool_q_idx = 0
  },
  dev_private = 0x7f577fbb2940,
  dev_link = {
    link_speed = 10000,
    link_duplex = 1,
    link_autoneg = 1,
    link_status = 1
  },
  dev_conf = {
    link_speeds = 3888,
    rxmode = {
      mq_mode = ETH_MQ_RX_NONE,
      max_rx_pkt_len = 9220,
      max_lro_pkt_size = 0,
      split_hdr_size = 0,
      offloads = 2048,
      reserved_64s = {0, 0},
      reserved_ptrs = {0x0, 0x0}
    },
    txmode = {
      mq_mode = ETH_MQ_TX_NONE,
      offloads = 32780,
      pvid = 0,
      hw_vlan_reject_tagged = 0 '\000',
      hw_vlan_reject_untagged = 0 '\000',
      hw_vlan_insert_pvid = 0 '\000',
      reserved_64s = {0, 0},
      reserved_ptrs = {0x0, 0x0}
    },
    lpbk_mode = 0,
    rx_adv_conf = {
      rss_conf = {
        rss_key = 0x0,
        rss_key_len = 0 '\000',
        rss_hf = 0
      },
      vmdq_dcb_conf = {
        nb_queue_pools = (unknown: 0),
        enable_default_pool = 0 '\000',
        default_pool = 0 '\000',
        nb_pool_maps = 0 '\000',
        pool_map = {{
            vlan_id = 0,
            pools = 0
          } <repeats 64 times>},
        dcb_tc = ""
      },
      dcb_rx_conf = {
        nb_tcs = (unknown: 0),
        dcb_tc = ""
      },
      vmdq_rx_conf = {
        nb_queue_pools = (unknown: 0),
        enable_default_pool = 0 '\000',
        default_pool = 0 '\000',
        enable_loop_back = 0 '\000',
        nb_pool_maps = 0 '\000',
        rx_mode = 0,
        pool_map = {{
            vlan_id = 0,
            pools = 0
          } <repeats 64 times>}
      }
    },
    tx_adv_conf = {
      vmdq_dcb_tx_conf = {
        nb_queue_pools = (unknown: 0),
        dcb_tc = ""
      },
      dcb_tx_conf = {
        nb_tcs = (unknown: 0),
        dcb_tc = ""
      },
      vmdq_tx_conf = {
        nb_queue_pools = (unknown: 0)
      }
    },
    dcb_capability_en = 0,
    fdir_conf = {
      mode = RTE_FDIR_MODE_NONE,
      pballoc = RTE_FDIR_PBALLOC_64K,
      status = RTE_FDIR_NO_REPORT_STATUS,
      drop_queue = 0 '\000',
      mask = {
        vlan_tci_mask = 0,
        ipv4_mask = {
          src_ip = 0,
          dst_ip = 0,
          tos = 0 '\000',
          ttl = 0 '\000',
          proto = 0 '\000'
        },
        ipv6_mask = {
          src_ip = {0, 0, 0, 0},
          dst_ip = {0, 0, 0, 0},
          tc = 0 '\000',
          proto = 0 '\000',
          hop_limits = 0 '\000'
        },
        src_port_mask = 0,
        dst_port_mask = 0,
        mac_addr_byte_mask = 0 '\000',
        tunnel_id_mask = 0,
        tunnel_type_mask = 0 '\000'
      },
      flex_conf = {
        nb_payloads = 0,
        nb_flexmasks = 0,
        flex_set = {{
            type = RTE_ETH_PAYLOAD_UNKNOWN,
            src_offset = {0 <repeats 16 times>}
          }},
        flex_mask = {{
            flow_type = 0,
            mask = ""
          } <repeats 24 times>}
      }
    },
    intr_conf = {
      lsc = 0,
      rxq = 0,
      rmv = 0
    }
  },
  mtu = 1500,
  min_rx_buf_size = 2176,
  rx_mbuf_alloc_failed = 0,
  mac_addrs = 0x7f577fb30500,
  mac_pool_sel = {0 <repeats 128 times>},
  hash_mac_addrs = 0x0,
  port_id = 2,
  promiscuous = 1 '\001',
  scattered_rx = 1 '\001',
  all_multicast = 1 '\001',
  dev_started = 1 '\001',
  lro = 0 '\000',
  rx_queue_state = "\001",
  tx_queue_state = "\001\001\001",
  dev_flags = 3,
  kdrv = RTE_KDRV_IGB_UIO,
  numa_node = 0,
  vlan_filter_conf = {
    ids = {0 <repeats 64 times>}
  },
  owner = {
    id = 0,
    name = ""
  },
  representor_id = 0,
  reserved_64s = {0, 0, 0, 0},
  reserved_ptrs = {0x0, 0x0, 0x0, 0x0}
}

参考文件:
1、linux 驱动代码事例解读
2、DPDK UIO驱动分析
3、linux内核UIO文件:
https://elixir.bootlin.com/linux/v3.10.108/source/include/linux/uio_driver.h#L101
https://elixir.bootlin.com/linux/v3.10.108/source/drivers/uio/uio.c#L812
4、liunux UIO驱动基本概念
5、DPDK i40e驱动文件解读

你可能感兴趣的:(VPP+DPDK)