1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vDPA bus. 4 * 5 * Copyright (c) 2020, Red Hat. All rights reserved. 6 * Author: Jason Wang <jasowang@redhat.com> 7 * 8 */ 9 10 #include <linux/module.h> 11 #include <linux/idr.h> 12 #include <linux/slab.h> 13 #include <linux/vdpa.h> 14 #include <uapi/linux/vdpa.h> 15 #include <net/genetlink.h> 16 #include <linux/mod_devicetable.h> 17 18 static LIST_HEAD(mdev_head); 19 /* A global mutex that protects vdpa management device and device level operations. */ 20 static DEFINE_MUTEX(vdpa_dev_mutex); 21 static DEFINE_IDA(vdpa_index_ida); 22 23 static struct genl_family vdpa_nl_family; 24 25 static int vdpa_dev_probe(struct device *d) 26 { 27 struct vdpa_device *vdev = dev_to_vdpa(d); 28 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); 29 int ret = 0; 30 31 if (drv && drv->probe) 32 ret = drv->probe(vdev); 33 34 return ret; 35 } 36 37 static void vdpa_dev_remove(struct device *d) 38 { 39 struct vdpa_device *vdev = dev_to_vdpa(d); 40 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); 41 42 if (drv && drv->remove) 43 drv->remove(vdev); 44 } 45 46 static struct bus_type vdpa_bus = { 47 .name = "vdpa", 48 .probe = vdpa_dev_probe, 49 .remove = vdpa_dev_remove, 50 }; 51 52 static void vdpa_release_dev(struct device *d) 53 { 54 struct vdpa_device *vdev = dev_to_vdpa(d); 55 const struct vdpa_config_ops *ops = vdev->config; 56 57 if (ops->free) 58 ops->free(vdev); 59 60 ida_simple_remove(&vdpa_index_ida, vdev->index); 61 kfree(vdev); 62 } 63 64 /** 65 * __vdpa_alloc_device - allocate and initilaize a vDPA device 66 * This allows driver to some prepartion after device is 67 * initialized but before registered. 68 * @parent: the parent device 69 * @config: the bus operations that is supported by this device 70 * @size: size of the parent structure that contains private data 71 * @name: name of the vdpa device; optional. 72 * 73 * Driver should use vdpa_alloc_device() wrapper macro instead of 74 * using this directly. 75 * 76 * Return: Returns an error when parent/config/dma_dev is not set or fail to get 77 * ida. 78 */ 79 struct vdpa_device *__vdpa_alloc_device(struct device *parent, 80 const struct vdpa_config_ops *config, 81 size_t size, const char *name) 82 { 83 struct vdpa_device *vdev; 84 int err = -EINVAL; 85 86 if (!config) 87 goto err; 88 89 if (!!config->dma_map != !!config->dma_unmap) 90 goto err; 91 92 err = -ENOMEM; 93 vdev = kzalloc(size, GFP_KERNEL); 94 if (!vdev) 95 goto err; 96 97 err = ida_alloc(&vdpa_index_ida, GFP_KERNEL); 98 if (err < 0) 99 goto err_ida; 100 101 vdev->dev.bus = &vdpa_bus; 102 vdev->dev.parent = parent; 103 vdev->dev.release = vdpa_release_dev; 104 vdev->index = err; 105 vdev->config = config; 106 vdev->features_valid = false; 107 108 if (name) 109 err = dev_set_name(&vdev->dev, "%s", name); 110 else 111 err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); 112 if (err) 113 goto err_name; 114 115 device_initialize(&vdev->dev); 116 117 return vdev; 118 119 err_name: 120 ida_simple_remove(&vdpa_index_ida, vdev->index); 121 err_ida: 122 kfree(vdev); 123 err: 124 return ERR_PTR(err); 125 } 126 EXPORT_SYMBOL_GPL(__vdpa_alloc_device); 127 128 static int vdpa_name_match(struct device *dev, const void *data) 129 { 130 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); 131 132 return (strcmp(dev_name(&vdev->dev), data) == 0); 133 } 134 135 static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs) 136 { 137 struct device *dev; 138 139 vdev->nvqs = nvqs; 140 141 lockdep_assert_held(&vdpa_dev_mutex); 142 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match); 143 if (dev) { 144 put_device(dev); 145 return -EEXIST; 146 } 147 return device_add(&vdev->dev); 148 } 149 150 /** 151 * _vdpa_register_device - register a vDPA device with vdpa lock held 152 * Caller must have a succeed call of vdpa_alloc_device() before. 153 * Caller must invoke this routine in the management device dev_add() 154 * callback after setting up valid mgmtdev for this vdpa device. 155 * @vdev: the vdpa device to be registered to vDPA bus 156 * @nvqs: number of virtqueues supported by this device 157 * 158 * Return: Returns an error when fail to add device to vDPA bus 159 */ 160 int _vdpa_register_device(struct vdpa_device *vdev, int nvqs) 161 { 162 if (!vdev->mdev) 163 return -EINVAL; 164 165 return __vdpa_register_device(vdev, nvqs); 166 } 167 EXPORT_SYMBOL_GPL(_vdpa_register_device); 168 169 /** 170 * vdpa_register_device - register a vDPA device 171 * Callers must have a succeed call of vdpa_alloc_device() before. 172 * @vdev: the vdpa device to be registered to vDPA bus 173 * @nvqs: number of virtqueues supported by this device 174 * 175 * Return: Returns an error when fail to add to vDPA bus 176 */ 177 int vdpa_register_device(struct vdpa_device *vdev, int nvqs) 178 { 179 int err; 180 181 mutex_lock(&vdpa_dev_mutex); 182 err = __vdpa_register_device(vdev, nvqs); 183 mutex_unlock(&vdpa_dev_mutex); 184 return err; 185 } 186 EXPORT_SYMBOL_GPL(vdpa_register_device); 187 188 /** 189 * _vdpa_unregister_device - unregister a vDPA device 190 * Caller must invoke this routine as part of management device dev_del() 191 * callback. 192 * @vdev: the vdpa device to be unregisted from vDPA bus 193 */ 194 void _vdpa_unregister_device(struct vdpa_device *vdev) 195 { 196 lockdep_assert_held(&vdpa_dev_mutex); 197 WARN_ON(!vdev->mdev); 198 device_unregister(&vdev->dev); 199 } 200 EXPORT_SYMBOL_GPL(_vdpa_unregister_device); 201 202 /** 203 * vdpa_unregister_device - unregister a vDPA device 204 * @vdev: the vdpa device to be unregisted from vDPA bus 205 */ 206 void vdpa_unregister_device(struct vdpa_device *vdev) 207 { 208 mutex_lock(&vdpa_dev_mutex); 209 device_unregister(&vdev->dev); 210 mutex_unlock(&vdpa_dev_mutex); 211 } 212 EXPORT_SYMBOL_GPL(vdpa_unregister_device); 213 214 /** 215 * __vdpa_register_driver - register a vDPA device driver 216 * @drv: the vdpa device driver to be registered 217 * @owner: module owner of the driver 218 * 219 * Return: Returns an err when fail to do the registration 220 */ 221 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner) 222 { 223 drv->driver.bus = &vdpa_bus; 224 drv->driver.owner = owner; 225 226 return driver_register(&drv->driver); 227 } 228 EXPORT_SYMBOL_GPL(__vdpa_register_driver); 229 230 /** 231 * vdpa_unregister_driver - unregister a vDPA device driver 232 * @drv: the vdpa device driver to be unregistered 233 */ 234 void vdpa_unregister_driver(struct vdpa_driver *drv) 235 { 236 driver_unregister(&drv->driver); 237 } 238 EXPORT_SYMBOL_GPL(vdpa_unregister_driver); 239 240 /** 241 * vdpa_mgmtdev_register - register a vdpa management device 242 * 243 * @mdev: Pointer to vdpa management device 244 * vdpa_mgmtdev_register() register a vdpa management device which supports 245 * vdpa device management. 246 * Return: Returns 0 on success or failure when required callback ops are not 247 * initialized. 248 */ 249 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev) 250 { 251 if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del) 252 return -EINVAL; 253 254 INIT_LIST_HEAD(&mdev->list); 255 mutex_lock(&vdpa_dev_mutex); 256 list_add_tail(&mdev->list, &mdev_head); 257 mutex_unlock(&vdpa_dev_mutex); 258 return 0; 259 } 260 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register); 261 262 static int vdpa_match_remove(struct device *dev, void *data) 263 { 264 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); 265 struct vdpa_mgmt_dev *mdev = vdev->mdev; 266 267 if (mdev == data) 268 mdev->ops->dev_del(mdev, vdev); 269 return 0; 270 } 271 272 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev) 273 { 274 mutex_lock(&vdpa_dev_mutex); 275 276 list_del(&mdev->list); 277 278 /* Filter out all the entries belong to this management device and delete it. */ 279 bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove); 280 281 mutex_unlock(&vdpa_dev_mutex); 282 } 283 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister); 284 285 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev, 286 const char *busname, const char *devname) 287 { 288 /* Bus name is optional for simulated management device, so ignore the 289 * device with bus if bus attribute is provided. 290 */ 291 if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus)) 292 return false; 293 294 if (!busname && strcmp(dev_name(mdev->device), devname) == 0) 295 return true; 296 297 if (busname && (strcmp(mdev->device->bus->name, busname) == 0) && 298 (strcmp(dev_name(mdev->device), devname) == 0)) 299 return true; 300 301 return false; 302 } 303 304 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs) 305 { 306 struct vdpa_mgmt_dev *mdev; 307 const char *busname = NULL; 308 const char *devname; 309 310 if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]) 311 return ERR_PTR(-EINVAL); 312 devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]); 313 if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]) 314 busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]); 315 316 list_for_each_entry(mdev, &mdev_head, list) { 317 if (mgmtdev_handle_match(mdev, busname, devname)) 318 return mdev; 319 } 320 return ERR_PTR(-ENODEV); 321 } 322 323 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev) 324 { 325 if (mdev->device->bus && 326 nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name)) 327 return -EMSGSIZE; 328 if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device))) 329 return -EMSGSIZE; 330 return 0; 331 } 332 333 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg, 334 u32 portid, u32 seq, int flags) 335 { 336 u64 supported_classes = 0; 337 void *hdr; 338 int i = 0; 339 int err; 340 341 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW); 342 if (!hdr) 343 return -EMSGSIZE; 344 err = vdpa_nl_mgmtdev_handle_fill(msg, mdev); 345 if (err) 346 goto msg_err; 347 348 while (mdev->id_table[i].device) { 349 supported_classes |= BIT(mdev->id_table[i].device); 350 i++; 351 } 352 353 if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, 354 supported_classes, VDPA_ATTR_UNSPEC)) { 355 err = -EMSGSIZE; 356 goto msg_err; 357 } 358 359 genlmsg_end(msg, hdr); 360 return 0; 361 362 msg_err: 363 genlmsg_cancel(msg, hdr); 364 return err; 365 } 366 367 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info) 368 { 369 struct vdpa_mgmt_dev *mdev; 370 struct sk_buff *msg; 371 int err; 372 373 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 374 if (!msg) 375 return -ENOMEM; 376 377 mutex_lock(&vdpa_dev_mutex); 378 mdev = vdpa_mgmtdev_get_from_attr(info->attrs); 379 if (IS_ERR(mdev)) { 380 mutex_unlock(&vdpa_dev_mutex); 381 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device"); 382 err = PTR_ERR(mdev); 383 goto out; 384 } 385 386 err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0); 387 mutex_unlock(&vdpa_dev_mutex); 388 if (err) 389 goto out; 390 err = genlmsg_reply(msg, info); 391 return err; 392 393 out: 394 nlmsg_free(msg); 395 return err; 396 } 397 398 static int 399 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) 400 { 401 struct vdpa_mgmt_dev *mdev; 402 int start = cb->args[0]; 403 int idx = 0; 404 int err; 405 406 mutex_lock(&vdpa_dev_mutex); 407 list_for_each_entry(mdev, &mdev_head, list) { 408 if (idx < start) { 409 idx++; 410 continue; 411 } 412 err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid, 413 cb->nlh->nlmsg_seq, NLM_F_MULTI); 414 if (err) 415 goto out; 416 idx++; 417 } 418 out: 419 mutex_unlock(&vdpa_dev_mutex); 420 cb->args[0] = idx; 421 return msg->len; 422 } 423 424 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info) 425 { 426 struct vdpa_mgmt_dev *mdev; 427 const char *name; 428 int err = 0; 429 430 if (!info->attrs[VDPA_ATTR_DEV_NAME]) 431 return -EINVAL; 432 433 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); 434 435 mutex_lock(&vdpa_dev_mutex); 436 mdev = vdpa_mgmtdev_get_from_attr(info->attrs); 437 if (IS_ERR(mdev)) { 438 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device"); 439 err = PTR_ERR(mdev); 440 goto err; 441 } 442 443 err = mdev->ops->dev_add(mdev, name); 444 err: 445 mutex_unlock(&vdpa_dev_mutex); 446 return err; 447 } 448 449 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info) 450 { 451 struct vdpa_mgmt_dev *mdev; 452 struct vdpa_device *vdev; 453 struct device *dev; 454 const char *name; 455 int err = 0; 456 457 if (!info->attrs[VDPA_ATTR_DEV_NAME]) 458 return -EINVAL; 459 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); 460 461 mutex_lock(&vdpa_dev_mutex); 462 dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match); 463 if (!dev) { 464 NL_SET_ERR_MSG_MOD(info->extack, "device not found"); 465 err = -ENODEV; 466 goto dev_err; 467 } 468 vdev = container_of(dev, struct vdpa_device, dev); 469 if (!vdev->mdev) { 470 NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user"); 471 err = -EINVAL; 472 goto mdev_err; 473 } 474 mdev = vdev->mdev; 475 mdev->ops->dev_del(mdev, vdev); 476 mdev_err: 477 put_device(dev); 478 dev_err: 479 mutex_unlock(&vdpa_dev_mutex); 480 return err; 481 } 482 483 static int 484 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, 485 int flags, struct netlink_ext_ack *extack) 486 { 487 u16 max_vq_size; 488 u32 device_id; 489 u32 vendor_id; 490 void *hdr; 491 int err; 492 493 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW); 494 if (!hdr) 495 return -EMSGSIZE; 496 497 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev); 498 if (err) 499 goto msg_err; 500 501 device_id = vdev->config->get_device_id(vdev); 502 vendor_id = vdev->config->get_vendor_id(vdev); 503 max_vq_size = vdev->config->get_vq_num_max(vdev); 504 505 err = -EMSGSIZE; 506 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) 507 goto msg_err; 508 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) 509 goto msg_err; 510 if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id)) 511 goto msg_err; 512 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs)) 513 goto msg_err; 514 if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size)) 515 goto msg_err; 516 517 genlmsg_end(msg, hdr); 518 return 0; 519 520 msg_err: 521 genlmsg_cancel(msg, hdr); 522 return err; 523 } 524 525 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info) 526 { 527 struct vdpa_device *vdev; 528 struct sk_buff *msg; 529 const char *devname; 530 struct device *dev; 531 int err; 532 533 if (!info->attrs[VDPA_ATTR_DEV_NAME]) 534 return -EINVAL; 535 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); 536 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 537 if (!msg) 538 return -ENOMEM; 539 540 mutex_lock(&vdpa_dev_mutex); 541 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match); 542 if (!dev) { 543 NL_SET_ERR_MSG_MOD(info->extack, "device not found"); 544 err = -ENODEV; 545 goto err; 546 } 547 vdev = container_of(dev, struct vdpa_device, dev); 548 if (!vdev->mdev) { 549 err = -EINVAL; 550 goto mdev_err; 551 } 552 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack); 553 if (!err) 554 err = genlmsg_reply(msg, info); 555 mdev_err: 556 put_device(dev); 557 err: 558 mutex_unlock(&vdpa_dev_mutex); 559 if (err) 560 nlmsg_free(msg); 561 return err; 562 } 563 564 struct vdpa_dev_dump_info { 565 struct sk_buff *msg; 566 struct netlink_callback *cb; 567 int start_idx; 568 int idx; 569 }; 570 571 static int vdpa_dev_dump(struct device *dev, void *data) 572 { 573 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); 574 struct vdpa_dev_dump_info *info = data; 575 int err; 576 577 if (!vdev->mdev) 578 return 0; 579 if (info->idx < info->start_idx) { 580 info->idx++; 581 return 0; 582 } 583 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid, 584 info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack); 585 if (err) 586 return err; 587 588 info->idx++; 589 return 0; 590 } 591 592 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) 593 { 594 struct vdpa_dev_dump_info info; 595 596 info.msg = msg; 597 info.cb = cb; 598 info.start_idx = cb->args[0]; 599 info.idx = 0; 600 601 mutex_lock(&vdpa_dev_mutex); 602 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump); 603 mutex_unlock(&vdpa_dev_mutex); 604 cb->args[0] = info.idx; 605 return msg->len; 606 } 607 608 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = { 609 [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING }, 610 [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING }, 611 [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING }, 612 }; 613 614 static const struct genl_ops vdpa_nl_ops[] = { 615 { 616 .cmd = VDPA_CMD_MGMTDEV_GET, 617 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 618 .doit = vdpa_nl_cmd_mgmtdev_get_doit, 619 .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit, 620 }, 621 { 622 .cmd = VDPA_CMD_DEV_NEW, 623 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 624 .doit = vdpa_nl_cmd_dev_add_set_doit, 625 .flags = GENL_ADMIN_PERM, 626 }, 627 { 628 .cmd = VDPA_CMD_DEV_DEL, 629 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 630 .doit = vdpa_nl_cmd_dev_del_set_doit, 631 .flags = GENL_ADMIN_PERM, 632 }, 633 { 634 .cmd = VDPA_CMD_DEV_GET, 635 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 636 .doit = vdpa_nl_cmd_dev_get_doit, 637 .dumpit = vdpa_nl_cmd_dev_get_dumpit, 638 }, 639 }; 640 641 static struct genl_family vdpa_nl_family __ro_after_init = { 642 .name = VDPA_GENL_NAME, 643 .version = VDPA_GENL_VERSION, 644 .maxattr = VDPA_ATTR_MAX, 645 .policy = vdpa_nl_policy, 646 .netnsok = false, 647 .module = THIS_MODULE, 648 .ops = vdpa_nl_ops, 649 .n_ops = ARRAY_SIZE(vdpa_nl_ops), 650 }; 651 652 static int vdpa_init(void) 653 { 654 int err; 655 656 err = bus_register(&vdpa_bus); 657 if (err) 658 return err; 659 err = genl_register_family(&vdpa_nl_family); 660 if (err) 661 goto err; 662 return 0; 663 664 err: 665 bus_unregister(&vdpa_bus); 666 return err; 667 } 668 669 static void __exit vdpa_exit(void) 670 { 671 genl_unregister_family(&vdpa_nl_family); 672 bus_unregister(&vdpa_bus); 673 ida_destroy(&vdpa_index_ida); 674 } 675 core_initcall(vdpa_init); 676 module_exit(vdpa_exit); 677 678 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>"); 679 MODULE_LICENSE("GPL v2"); 680