熟悉Linux内核的同学都知道,设备和驱动都是挂在总线上的,一个驱动可以匹配多个设备,设备和驱动一旦匹配之后,就会调用驱动的probe函数对设备进行初始化。DPDK全称叫数据平面开发套件,它运行在用户态,里面也包含着许多设备驱动。那么DPDK中的总线、设备和驱动,又是使用什么模型呢?直接了当的说,DPDK中总线、设备和驱动的模型和Linux内核是一样的,加载方式也是类似的。下面主要介绍DPDK的总线注册、驱动注册和设备扫描挂载,以及设备和驱动匹配之后的设备探测。
总线注册
DPDK中有一个全局链表rte_bus_list,该链表默认初始化成空链表,结构和声明如下。
-
lib\eal\include\bus_driver.h
-
struct rte_bus_list {
-
struct rte_bus *tqh_first; /* first element */
-
struct rte_bus **tqh_last; /* addr of last next element */
-
TRACEBUF
-
}
-
-
lib\eal\common\eal_common_bus.c
-
static struct rte_bus_list rte_bus_list =
-
TAILQ_HEAD_INITIALIZER(rte_bus_list);
DPDK-22.11中支持pci、vdev、vmbux和auxiliary等8种总线。
这些总线都是通过RTE_REGISTER_BUS宏函数,将对应的bus插入到rte_bus_list链表中,相关定义如下所示。
-
lib\eal\common\eal_common_bus.c
-
void
-
rte_bus_register(struct rte_bus *bus)
-
{
-
RTE_VERIFY(bus);
-
RTE_VERIFY(rte_bus_name(bus) && strlen(rte_bus_name(bus)));
-
/* A bus should mandatorily have the scan implemented */
-
RTE_VERIFY(bus->scan);
-
RTE_VERIFY(bus->probe);
-
RTE_VERIFY(bus->find_device);
-
/* Buses supporting driver plug also require unplug. */
-
RTE_VERIFY(!bus->plug || bus->unplug);
-
-
TAILQ_INSERT_TAIL(&rte_bus_list, bus, next);
-
RTE_LOG(DEBUG, EAL, "Registered [%s] bus.\n", rte_bus_name(bus));
-
}
-
-
lib\eal\include\bus_driver.h
-
#define RTE_REGISTER_BUS(nm, bus) \
-
RTE_INIT_PRIO(businitfn_ ##nm, BUS) \
-
{\
-
(bus).name = RTE_STR(nm);\
-
rte_bus_register(&bus); \
-
}
RTE_REGISTER_BUS中nm表示bus的名字,bus表示通用的struct rte_bus,该结构体定义如下:
-
struct rte_bus {
-
RTE_TAILQ_ENTRY(rte_bus) next; /**< Next bus object in linked list */
-
const char *name; /**< Name of the bus */
-
rte_bus_scan_t scan; /**< Scan for devices attached to bus */
-
rte_bus_probe_t probe; /**< Probe devices on bus */
-
rte_bus_find_device_t find_device; /**< Find a device on the bus */
-
rte_bus_plug_t plug; /**< Probe single device for drivers */
-
rte_bus_unplug_t unplug; /**< Remove single device from driver */
-
rte_bus_parse_t parse; /**< Parse a device name */
-
rte_bus_devargs_parse_t devargs_parse; /**< Parse bus devargs */
-
rte_dev_dma_map_t dma_map; /**< DMA map for device in the bus */
-
rte_dev_dma_unmap_t dma_unmap; /**< DMA unmap for device in the bus */
-
struct rte_bus_conf conf; /**< Bus configuration */
-
rte_bus_get_iommu_class_t get_iommu_class; /**< Get iommu class */
-
rte_dev_iterate_t dev_iterate; /**< Device iterator. */
-
rte_bus_hot_unplug_handler_t hot_unplug_handler;
-
/**< handle hot-unplug failure on the bus */
-
rte_bus_sigbus_handler_t sigbus_handler;
-
/**< handle sigbus error on the bus */
-
rte_bus_cleanup_t cleanup; /**< Cleanup devices on bus */
-
};
该结构是对所有总线的一个抽象,但并不是各类总线都需要支持所有的钩子。但所有总线都会实现scan、probe和find_device钩子,如rte_bus_register()接口校验所示。对某类总线的抽象,除了rte_bus对象之外,一般都还会维护一个设备列表和驱动列表,如rte_pci_bus的结构如下:
-
struct rte_pci_bus {
-
struct rte_bus bus; /**< Inherit the generic class */
-
RTE_TAILQ_HEAD(, rte_pci_device) device_list; /**< List of PCI devices */
-
RTE_TAILQ_HEAD(, rte_pci_driver) driver_list; /**< List of PCI drivers */
-
};
-
-
struct rte_pci_bus rte_pci_bus = {
-
.bus = {
-
.scan = rte_pci_scan,
-
.probe = pci_probe,
-
.cleanup = pci_cleanup,
-
.find_device = pci_find_device,
-
.plug = pci_plug,
-
.unplug = pci_unplug,
-
.parse = pci_parse,
-
.devargs_parse = rte_pci_devargs_parse,
-
.dma_map = pci_dma_map,
-
.dma_unmap = pci_dma_unmap,
-
.get_iommu_class = rte_pci_get_iommu_class,
-
.dev_iterate = rte_pci_dev_iterate,
-
.hot_unplug_handler = pci_hot_unplug_handler,
-
.sigbus_handler = pci_sigbus_handler,
-
},
-
.device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list),
-
.driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list),
-
};
-
-
RTE_REGISTER_BUS(pci, rte_pci_bus.bus);
另外,还需说明的是,各类总线的注册不是在rte_eal_init()中完成的,而是在执行DPDK主函数(main函数)前就注册好的。
驱动注册
DPDK中的设备驱动和Linux内核设备驱动也是类似的。各种总线定义了该总线上注册驱动的接口,如下图所示:
每个总线上的设备驱动,一般会在驱动主文件的末尾调用对应的钩子注册驱动,如vdev总线上的bonding和failsafe驱动,注册代码实现如下。
-
drivers\net\bonding\rte_eth_bond_pmd.c
-
struct rte_vdev_driver pmd_bond_drv = {
-
.probe = bond_probe,
-
.remove = bond_remove,
-
};
-
RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
-
-
drivers\net\failsafe\failsafe.c
-
static struct rte_vdev_driver failsafe_drv = {
-
.probe = rte_pmd_failsafe_probe,
-
.remove = rte_pmd_failsafe_remove,
-
};
-
-
RTE_PMD_REGISTER_VDEV(net_failsafe, failsafe_drv);
驱动的注册实现基本都是类似的,下面以鲲鹏920系列处理器上的内置PCI网卡驱动为例来说明。hns3驱动的rte_pci_driver中定义的id_table,每个硬件网卡设备的vendor_id和device_id是不同的,使得一个驱动可以支持多种设备。
-
static const struct rte_pci_id pci_id_hns3_map[] = {
-
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_GE) },
-
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE) },
-
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_25GE_RDMA) },
-
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) },
-
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) },
-
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) },
-
{ .vendor_id = 0, }, /* sentinel */
-
};
-
-
static struct rte_pci_driver rte_hns3_pmd = {
-
.id_table = pci_id_hns3_map,
-
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
-
.probe = eth_hns3_pci_probe,
-
.remove = eth_hns3_pci_remove,
-
};
-
-
RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
rte_hns3_pmd是通过RTE_PMD_REGISTER_PCI()接口注册,并将rte_hns_pmd驱动挂接到rte_pci_bus下的驱动列表上。设备驱动的注册里面也是使用RTE_INIT宏定义constructor属性函数,其执行动作也在DPDK的主函数之前就完成的,但其设置了的优先级比总线注册的优先级低。因此,驱动注册完成于主函数执行前,总线注册之后。
设备挂载
设备挂载很多博文中都有介绍,但随着DPDK版本演进,中间过程有变化。本节主要基于刚发布的的DPDK-22.11版本来进行介绍。设备扫描挂载是在主函数中调用的rte_eal_init()中完成的。rte_eal_init()会调用rte_bus_scan(),该函数实现如下:
-
int
-
rte_bus_scan(void)
-
{
-
int ret;
-
struct rte_bus *bus = NULL;
-
-
TAILQ_FOREACH(bus, &rte_bus_list, next) {
-
ret = bus->scan();
-
if (ret)
-
RTE_LOG(ERR, EAL, "Scan for (%s) bus failed.\n",
-
rte_bus_name(bus));
-
}
-
-
return 0;
-
}
从该函数实现可以看出,eal层初始化时,会遍历全局总线链表,调用总线上的scan()接口扫描总线上的设备。下面以rte_pci_bus上的扫描过程为例进行介绍,该总线的scan()接口为rte_pci_scan(),该函数实现如下:
-
int
-
rte_pci_scan(void)
-
{
-
struct dirent *e;
-
DIR *dir;
-
char dirname[PATH_MAX];
-
struct rte_pci_addr addr;
-
-
/* for debug purposes, PCI can be disabled */
-
if (!rte_eal_has_pci())
-
return 0;
-
-
/* 打开/sys/bus/pci/devices/目录 */
-
dir = opendir(rte_pci_get_sysfs_path());
-
if (dir == NULL) {
-
RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n",
-
__func__, strerror(errno));
-
return -1;
-
}
-
-
/* 读取目录下所有的子目录,每个子目录为一个pci设备 */
-
while ((e = readdir(dir)) != NULL) {
-
if (e->d_name[0] == '.')
-
continue;
-
-
/* 根据目录名,得到rte_pci_addr中的domain、bus、devid和function */
-
if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &addr) != 0)
-
continue;
-
-
/*
-
* 如果不在白名单中设备则不会扫描,通过-b/-a控制,
-
* 如果都不指定则扫描被uio驱动接管的所有PCI设备。
-
*/
-
if (rte_pci_ignore_device(&addr))
-
continue;
-
-
snprintf(dirname, sizeof(dirname), "%s/%s",
-
rte_pci_get_sysfs_path(), e->d_name);
-
-
/* 扫描子目录文件夹 */
-
if (pci_scan_one(dirname, &addr) < 0)
-
goto error;
-
}
-
closedir(dir);
-
return 0;
-
-
error:
-
closedir(dir);
-
return -1;
-
}
/sys/bus/pci/devices/目录中每个子目录文件如下图所示。注意该文件夹的顺序是随着BDF号(pci设备地址)的顺序从小到大依次排列的。
每个子设备文件夹下的文件包含了该设备下的许多信息,如下图所示。
而rte_pci_device的结构域段就是和上图中的文件是类似的,如下图所示:
pci_scan_one()函数就是解析设备文件夹中的每个文件信息,从而初始化申请的rte_pci_device,并将其挂载到rte_pci_bus总线上的设备列表上,具体解析代码实现细节如下:
-
static int
-
pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
-
{
-
char filename[PATH_MAX];
-
unsigned long tmp;
-
struct rte_pci_device *dev;
-
char driver[PATH_MAX];
-
int ret;
-
-
dev = malloc(sizeof(*dev));
-
if (dev == NULL)
-
return -1;
-
-
memset(dev, 0, sizeof(*dev));
-
dev->device.bus = &rte_pci_bus.bus;
-
dev->addr = *addr;
-
-
/* get vendor id */
-
snprintf(filename, sizeof(filename), "%s/vendor", dirname);
-
if (eal_parse_sysfs_value(filename, &tmp) < 0) {
-
pci_free(dev);
-
return -1;
-
}
-
dev->id.vendor_id = (uint16_t)tmp;
-
-
/* get device id */
-
snprintf(filename, sizeof(filename), "%s/device", dirname);
-
if (eal_parse_sysfs_value(filename, &tmp) < 0) {
-
pci_free(dev);
-
return -1;
-
}
-
dev->id.device_id = (uint16_t)tmp;
-
-
/* get subsystem_vendor id */
-
snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
-
dirname);
-
if (eal_parse_sysfs_value(filename, &tmp) < 0) {
-
pci_free(dev);
-
return -1;
-
}
-
dev->id.subsystem_vendor_id = (uint16_t)tmp;
-
-
/* get subsystem_device id */
-
snprintf(filename, sizeof(filename), "%s/subsystem_device",
-
dirname);
-
if (eal_parse_sysfs_value(filename, &tmp) < 0) {
-
pci_free(dev);
-
return -1;
-
}
-
dev->id.subsystem_device_id = (uint16_t)tmp;
-
-
/* get class_id */
-
snprintf(filename, sizeof(filename), "%s/class",
-
dirname);
-
if (eal_parse_sysfs_value(filename, &tmp) < 0) {
-
pci_free(dev);
-
return -1;
-
}
-
/* the least 24 bits are valid: class, subclass, program interface */
-
dev->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
-
-
/* get max_vfs */
-
dev->max_vfs = 0;
-
snprintf(filename, sizeof(filename), "%s/max_vfs", dirname);
-
if (!access(filename, F_OK) &&
-
eal_parse_sysfs_value(filename, &tmp) == 0)
-
dev->max_vfs = (uint16_t)tmp;
-
else {
-
/* for non igb_uio driver, need kernel version >= 3.8 */
-
snprintf(filename, sizeof(filename),
-
"%s/sriov_numvfs", dirname);
-
if (!access(filename, F_OK) &&
-
eal_parse_sysfs_value(filename, &tmp) == 0)
-
dev->max_vfs = (uint16_t)tmp;
-
}
-
-
/* get numa node, default to 0 if not present */
-
snprintf(filename, sizeof(filename), "%s/numa_node", dirname);
-
-
if (access(filename, F_OK) == 0 &&
-
eal_parse_sysfs_value(filename, &tmp) == 0)
-
dev->device.numa_node = tmp;
-
else
-
dev->device.numa_node = SOCKET_ID_ANY;
-
-
pci_common_set(dev);
-
-
/* parse resources */
-
snprintf(filename, sizeof(filename), "%s/resource", dirname);
-
if (pci_parse_sysfs_resource(filename, dev) < 0) {
-
RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__);
-
pci_free(dev);
-
return -1;
-
}
-
-
/* parse driver */
-
snprintf(filename, sizeof(filename), "%s/driver", dirname);
-
ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver));
-
if (ret < 0) {
-
RTE_LOG(ERR, EAL, "Fail to get kernel driver\n");
-
pci_free(dev);
-
return -1;
-
}
-
-
if (!ret) {
-
if (!strcmp(driver, "vfio-pci"))
-
dev->kdrv = RTE_PCI_KDRV_VFIO;
-
else if (!strcmp(driver, "igb_uio"))
-
dev->kdrv = RTE_PCI_KDRV_IGB_UIO;
-
else if (!strcmp(driver, "uio_pci_generic"))
-
dev->kdrv = RTE_PCI_KDRV_UIO_GENERIC;
-
else
-
dev->kdrv = RTE_PCI_KDRV_UNKNOWN;
-
} else {
-
pci_free(dev);
-
return 0;
-
}
-
/* device is valid, add in list (sorted) */
-
if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
-
/* 总线设备列表为空,直接将设备添加到总线上 */
-
rte_pci_add_device(dev);
-
} else {
-
struct rte_pci_device *dev2;
-
int ret;
-
-
TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
-
ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
-
if (ret > 0)
-
continue;
-
-
if (ret < 0) {
-
rte_pci_insert_device(dev2, dev);
-
} else { /* already registered */
-
if (!rte_dev_is_probed(&dev2->device)) {
-
dev2->kdrv = dev->kdrv;
-
dev2->max_vfs = dev->max_vfs;
-
dev2->id = dev->id;
-
pci_common_set(dev2);
-
memmove(dev2->mem_resource,
-
dev->mem_resource,
-
sizeof(dev->mem_resource));
-
} else {
-
/**
-
* If device is plugged and driver is
-
* probed already, (This happens when
-
* we call rte_dev_probe which will
-
* scan all device on the bus) we don't
-
* need to do anything here unless...
-
**/
-
if (dev2->kdrv != dev->kdrv ||
-
dev2->max_vfs != dev->max_vfs ||
-
memcmp(&dev2->id, &dev->id, sizeof(dev2->id)))
-
/*
-
* This should not happens.
-
* But it is still possible if
-
* we unbind a device from
-
* vfio or uio before hotplug
-
* remove and rebind it with
-
* a different configure.
-
* So we just print out the
-
* error as an alarm.
-
*/
-
RTE_LOG(ERR, EAL, "Unexpected device scan at %s!\n",
-
filename);
-
else if (dev2->device.devargs !=
-
dev->device.devargs) {
-
rte_devargs_remove(dev2->device.devargs);
-
pci_common_set(dev2);
-
}
-
}
-
pci_free(dev);
-
}
-
return 0;
-
}
-
/* rte_pci_device初始化完成后,将设备添加到总线上 */
-
rte_pci_add_device(dev);
-
}
-
-
return 0;
-
}
以上过程的具体细节在此不进行展开,主要提一下以下几点:1)driver指定了当前设备的驱动,DPDK支持三种托管设备到用户态的uio驱动,即vfio_pci、igb_uio和uio_pci_generic;2) struct rte_pci_id中的信息,后面会用它和驱动的id_table进行设备和驱动的匹配;3) resource文件,该文件保存了PCI设备的Bar0-5的地址空间,其在pci_parse_sysfs_resource函数中被解析并保存到rte_pci_device中的mem_resource[]中。综上可以看出,设备就是按照以上方式将PCI设备按照BDF号从小到大的顺序将设备挂到rte_pci_bus总线上。
设备探测
设备扫描完成后,在rte_eal_init()中的后面就调用rte_bus_probe()函数,遍历所有总线,调用总线的probe函数,代码如下:
-
int
-
rte_bus_probe(void)
-
{
-
int ret;
-
struct rte_bus *bus, *vbus = NULL;
-
-
TAILQ_FOREACH(bus, &rte_bus_list, next) {
-
if (!strcmp(rte_bus_name(bus), "vdev")) {
-
vbus = bus;
-
continue;
-
}
-
-
ret = bus->probe();
-
if (ret)
-
RTE_LOG(ERR, EAL, "Bus (%s) probe failed.\n",
-
rte_bus_name(bus));
-
}
-
-
if (vbus) {
-
ret = vbus->probe();
-
if (ret)
-
RTE_LOG(ERR, EAL, "Bus (%s) probe failed.\n",
-
rte_bus_name(vbus));
-
}
-
-
return 0;
-
}
下面仍然以rte_pci_bus总线为例说明pci设备的探测过程。pci_probe()会按照PCI设备挂载顺序为该总线上的每个设备遍历总线上的所有驱动找到对应的驱动,代码实现如下:
-
static int
-
pci_probe(void)
-
{
-
struct rte_pci_device *dev = NULL;
-
size_t probed = 0, failed = 0;
-
int ret = 0;
-
-
FOREACH_DEVICE_ON_PCIBUS(dev) {
-
probed++;
-
/* 用该设备与所有驱动进行匹配 */
-
ret = pci_probe_all_drivers(dev);
-
if (ret < 0) {
-
if (ret != -EEXIST) {
-
RTE_LOG(ERR, EAL, "Requested device "
-
PCI_PRI_FMT " cannot be used\n",
-
dev->addr.domain, dev->addr.bus,
-
dev->addr.devid, dev->addr.function);
-
rte_errno = errno;
-
failed++;
-
}
-
ret = 0;
-
}
-
}
-
-
return (probed && probed == failed) ? -1 : 0;
-
}
pci_probe_all_drivers中就会去遍历rte_pci_bus上的所有驱动,并调用rte_pci_probe_one_driver(dr, dev)进行设备和驱动的匹配。rte_pci_probe_one_driver的匹配细节如下代码:
-
static int
-
rte_pci_probe_one_driver(struct rte_pci_driver *dr,
-
struct rte_pci_device *dev)
-
{
-
int ret;
-
bool already_probed;
-
struct rte_pci_addr *loc;
-
-
if ((dr == NULL) || (dev == NULL))
-
return -EINVAL;
-
-
loc = &dev->addr;
-
-
/* 使用rte_pci_device中的rte_pci_id的vendor_id,device_id,
-
* subsystem_vendor_id,subsystem_device_id和class_id去和
-
* rte_pci_driver中的id table数组的每一个rte_pci_id进行匹配。
-
* 不匹配则直接返回 */
-
if (!rte_pci_match(dr, dev))
-
/* Match of device and driver failed */
-
return 1;
-
-
RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
-
loc->domain, loc->bus, loc->devid, loc->function,
-
dev->device.numa_node);
-
-
/* 设备被制定了-b,则返回,不对其进行初始化 */
-
if (dev->device.devargs != NULL &&
-
dev->device.devargs->policy == RTE_DEV_BLOCKED) {
-
RTE_LOG(INFO, EAL, " Device is blocked, not initializing\n");
-
return 1;
-
}
-
-
if (dev->device.numa_node < 0 && rte_socket_count() > 1)
-
RTE_LOG(INFO, EAL, "Device %s is not NUMA-aware\n", dev->name);
-
-
/* 检测设备是否已被探测过,如已被探测,而又不支持重复,则返回失败 */
-
already_probed = rte_dev_is_probed(&dev->device);
-
if (already_probed && !(dr->drv_flags & RTE_PCI_DRV_PROBE_AGAIN)) {
-
RTE_LOG(DEBUG, EAL, "Device %s is already probed\n",
-
dev->device.name);
-
return -EEXIST;
-
}
-
-
RTE_LOG(DEBUG, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
-
dev->id.device_id, dr->driver.name);
-
-
if (!already_probed) {
-
enum rte_iova_mode dev_iova_mode;
-
enum rte_iova_mode iova_mode;
-
/* 通过使用的何种uio驱动,得到是iova还是pa模式 */
-
dev_iova_mode = pci_device_iova_mode(dr, dev);
-
iova_mode = rte_eal_iova_mode();
-
if (dev_iova_mode != RTE_IOVA_DC &&
-
dev_iova_mode != iova_mode) {
-
RTE_LOG(ERR, EAL, " Expecting '%s' IOVA mode but current mode is '%s', not initializing\n",
-
dev_iova_mode == RTE_IOVA_PA ? "PA" : "VA",
-
iova_mode == RTE_IOVA_PA ? "PA" : "VA");
-
return -EINVAL;
-
}
-
-
/* Allocate interrupt instance for pci device */
-
dev->intr_handle =
-
rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
-
if (dev->intr_handle == NULL) {
-
RTE_LOG(ERR, EAL,
-
"Failed to create interrupt instance for %s\n",
-
dev->device.name);
-
return -ENOMEM;
-
}
-
-
dev->vfio_req_intr_handle =
-
rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
-
if (dev->vfio_req_intr_handle == NULL) {
-
rte_intr_instance_free(dev->intr_handle);
-
dev->intr_handle = NULL;
-
RTE_LOG(ERR, EAL,
-
"Failed to create vfio req interrupt instance for %s\n",
-
dev->device.name);
-
return -ENOMEM;
-
}
-
-
/*
-
* Reference driver structure.
-
* This needs to be before rte_pci_map_device(), as it enables
-
* to use driver flags for adjusting configuration.
-
*/
-
dev->driver = dr;
-
/*
-
* 驱动的drv_flag中有NEED_MAPPING才需要mapping设备的Bar空间。
-
* 一般是使用uio驱动托管设备到用户态的才需要用,
-
* 像dpaa2和mlx的驱动都不是通过这种方式托管的。
-
*/
-
if (dev->driver->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
-
/*
-
* 该接口就会通过对应的uio驱动,mapping设备Bar空间得到Bar的
-
* 虚拟地址,保存在dev->mem_resource[i].addr中
-
*/
-
ret = rte_pci_map_device(dev);
-
if (ret != 0) {
-
dev->driver = NULL;
-
rte_intr_instance_free(dev->vfio_req_intr_handle);
-
dev->vfio_req_intr_handle = NULL;
-
rte_intr_instance_free(dev->intr_handle);
-
dev->intr_handle = NULL;
-
return ret;
-
}
-
}
-
}
-
-
RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%x) device: "PCI_PRI_FMT" (socket %i)\n",
-
dr->driver.name, dev->id.vendor_id, dev->id.device_id,
-
loc->domain, loc->bus, loc->devid, loc->function,
-
dev->device.numa_node);
-
/* 调用驱动的probe()函数 */
-
ret = dr->probe(dr, dev);
-
if (already_probed)
-
return ret; /* no rollback if already succeeded earlier */
-
if (ret) {
-
dev->driver = NULL;
-
if ((dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) &&
-
/* Don't unmap if device is unsupported and
-
* driver needs mapped resources.
-
*/
-
!(ret > 0 &&
-
(dr->drv_flags & RTE_PCI_DRV_KEEP_MAPPED_RES)))
-
rte_pci_unmap_device(dev);
-
rte_intr_instance_free(dev->vfio_req_intr_handle);
-
dev->vfio_req_intr_handle = NULL;
-
rte_intr_instance_free(dev->intr_handle);
-
dev->intr_handle = NULL;
-
} else {
-
/* 让设备的driver指针引用驱动的driver,方便获取驱动信息 */
-
dev->device.driver = &dr->driver;
-
}
-
-
return ret;
-
}
rte_pci_match(dr, dev)就使用前面设备扫描过程中得到的设备pci_id信息和driver中的id_table进行匹配,若匹配则进行PCI设备Bar空间的映射,调用驱动的probe函数(115行)。
每个类设备驱动的probe函数执行也是类似的,下面以鲲鹏920系列处理器内置网卡hns3 PF驱动的probe过程做一个简要的介绍。hns3 PF驱动的probe函数实现如下:
-
static inline int
-
rte_eth_dev_pci_generic_probe(struct rte_pci_device *pci_dev,
-
size_t private_data_size, eth_dev_pci_callback_t dev_init)
-
{
-
struct rte_eth_dev *eth_dev;
-
int ret;
-
-
/* 分配一个ethdev */
-
eth_dev = rte_eth_dev_pci_allocate(pci_dev, private_data_size);
-
if (!eth_dev)
-
return -ENOMEM;
-
-
if (*dev_init == NULL)
-
return -EINVAL;
-
-
/* 调用驱动的dev_init函数,进行硬件设备初始化 */
-
ret = dev_init(eth_dev);
-
if (ret)
-
rte_eth_dev_release_port(eth_dev);
-
else
-
rte_eth_dev_probing_finish(eth_dev);
-
-
return ret;
-
}
-
-
static int
-
eth_hns3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
-
struct rte_pci_device *pci_dev)
-
{
-
return rte_eth_dev_pci_generic_probe(pci_dev,
-
sizeof(struct hns3_adapter),
-
hns3_dev_init);
-
}
对于网卡设备,上层应用看到的就是一个以太网设备(ethdev),上层应用都是通过ethdev对应的端口号,操作PCI网卡。该probe函数执行时,首先分配一个ethdev,然后调用驱动的dev_init()函数进行设备驱动中的默认初始化。特别指出的是,在hns3_dev_init()函数中会调用hns3_init_pf(),该接口会将PCI设备的Bar2地址作为硬件设备的io地址,如下所示:
-
static int
-
hns3_init_pf(struct rte_eth_dev *eth_dev)
-
{
-
struct hns3_adapter *hns = eth_dev->data->dev_private;
-
struct hns3_hw *hw = &hns->hw;
-
-
...
-
/* Get hardware io base address from pcie BAR2 IO space */
-
hw->io_base = pci_dev->mem_resource[2].addr;
-
...
-
}
驱动通过该地址,就可在用户态直接读写设备寄存器。具体使用PCI设备的哪个Bar地址不固定,各厂商是有差异的。
驱动的probe函数执行完,则驱动和设备就探测成功了。但设备的初始化过程并没有完全完成,上层应用还需调用dev_configure接口下发配置,根据用户的配置重新配置硬件设备,再调用dev_start()接口,整个设备才全部初始化完成。
阅读(1646) | 评论(0) | 转发(0) |