1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vDPA bus. 4 * 5 * Copyright (c) 2020, Red Hat. All rights reserved. 6 * Author: Jason Wang <jasowang@redhat.com> 7 * 8 */ 9 10 #include <linux/module.h> 11 #include <linux/idr.h> 12 #include <linux/slab.h> 13 #include <linux/vdpa.h> 14 #include <uapi/linux/vdpa.h> 15 #include <net/genetlink.h> 16 #include <linux/mod_devicetable.h> 17 18 static LIST_HEAD(mdev_head); 19 /* A global mutex that protects vdpa management device and device level operations. */ 20 static DEFINE_MUTEX(vdpa_dev_mutex); 21 static DEFINE_IDA(vdpa_index_ida); 22 23 static struct genl_family vdpa_nl_family; 24 25 static int vdpa_dev_probe(struct device *d) 26 { 27 struct vdpa_device *vdev = dev_to_vdpa(d); 28 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); 29 int ret = 0; 30 31 if (drv && drv->probe) 32 ret = drv->probe(vdev); 33 34 return ret; 35 } 36 37 static int vdpa_dev_remove(struct device *d) 38 { 39 struct vdpa_device *vdev = dev_to_vdpa(d); 40 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); 41 42 if (drv && drv->remove) 43 drv->remove(vdev); 44 45 return 0; 46 } 47 48 static struct bus_type vdpa_bus = { 49 .name = "vdpa", 50 .probe = vdpa_dev_probe, 51 .remove = vdpa_dev_remove, 52 }; 53 54 static void vdpa_release_dev(struct device *d) 55 { 56 struct vdpa_device *vdev = dev_to_vdpa(d); 57 const struct vdpa_config_ops *ops = vdev->config; 58 59 if (ops->free) 60 ops->free(vdev); 61 62 ida_simple_remove(&vdpa_index_ida, vdev->index); 63 kfree(vdev); 64 } 65 66 /** 67 * __vdpa_alloc_device - allocate and initilaize a vDPA device 68 * This allows driver to some prepartion after device is 69 * initialized but before registered. 70 * @parent: the parent device 71 * @config: the bus operations that is supported by this device 72 * @size: size of the parent structure that contains private data 73 * @name: name of the vdpa device; optional. 74 * 75 * Driver should use vdpa_alloc_device() wrapper macro instead of 76 * using this directly. 77 * 78 * Returns an error when parent/config/dma_dev is not set or fail to get 79 * ida. 80 */ 81 struct vdpa_device *__vdpa_alloc_device(struct device *parent, 82 const struct vdpa_config_ops *config, 83 size_t size, const char *name) 84 { 85 struct vdpa_device *vdev; 86 int err = -EINVAL; 87 88 if (!config) 89 goto err; 90 91 if (!!config->dma_map != !!config->dma_unmap) 92 goto err; 93 94 err = -ENOMEM; 95 vdev = kzalloc(size, GFP_KERNEL); 96 if (!vdev) 97 goto err; 98 99 err = ida_alloc(&vdpa_index_ida, GFP_KERNEL); 100 if (err < 0) 101 goto err_ida; 102 103 vdev->dev.bus = &vdpa_bus; 104 vdev->dev.parent = parent; 105 vdev->dev.release = vdpa_release_dev; 106 vdev->index = err; 107 vdev->config = config; 108 vdev->features_valid = false; 109 110 if (name) 111 err = dev_set_name(&vdev->dev, "%s", name); 112 else 113 err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); 114 if (err) 115 goto err_name; 116 117 device_initialize(&vdev->dev); 118 119 return vdev; 120 121 err_name: 122 ida_simple_remove(&vdpa_index_ida, vdev->index); 123 err_ida: 124 kfree(vdev); 125 err: 126 return ERR_PTR(err); 127 } 128 EXPORT_SYMBOL_GPL(__vdpa_alloc_device); 129 130 static int vdpa_name_match(struct device *dev, const void *data) 131 { 132 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); 133 134 return (strcmp(dev_name(&vdev->dev), data) == 0); 135 } 136 137 static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs) 138 { 139 struct device *dev; 140 141 vdev->nvqs = nvqs; 142 143 lockdep_assert_held(&vdpa_dev_mutex); 144 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match); 145 if (dev) { 146 put_device(dev); 147 return -EEXIST; 148 } 149 return device_add(&vdev->dev); 150 } 151 152 /** 153 * _vdpa_register_device - register a vDPA device with vdpa lock held 154 * Caller must have a succeed call of vdpa_alloc_device() before. 155 * Caller must invoke this routine in the management device dev_add() 156 * callback after setting up valid mgmtdev for this vdpa device. 157 * @vdev: the vdpa device to be registered to vDPA bus 158 * @nvqs: number of virtqueues supported by this device 159 * 160 * Returns an error when fail to add device to vDPA bus 161 */ 162 int _vdpa_register_device(struct vdpa_device *vdev, int nvqs) 163 { 164 if (!vdev->mdev) 165 return -EINVAL; 166 167 return __vdpa_register_device(vdev, nvqs); 168 } 169 EXPORT_SYMBOL_GPL(_vdpa_register_device); 170 171 /** 172 * vdpa_register_device - register a vDPA device 173 * Callers must have a succeed call of vdpa_alloc_device() before. 174 * @vdev: the vdpa device to be registered to vDPA bus 175 * @nvqs: number of virtqueues supported by this device 176 * 177 * Returns an error when fail to add to vDPA bus 178 */ 179 int vdpa_register_device(struct vdpa_device *vdev, int nvqs) 180 { 181 int err; 182 183 mutex_lock(&vdpa_dev_mutex); 184 err = __vdpa_register_device(vdev, nvqs); 185 mutex_unlock(&vdpa_dev_mutex); 186 return err; 187 } 188 EXPORT_SYMBOL_GPL(vdpa_register_device); 189 190 /** 191 * _vdpa_unregister_device - unregister a vDPA device 192 * Caller must invoke this routine as part of management device dev_del() 193 * callback. 194 * @vdev: the vdpa device to be unregisted from vDPA bus 195 */ 196 void _vdpa_unregister_device(struct vdpa_device *vdev) 197 { 198 lockdep_assert_held(&vdpa_dev_mutex); 199 WARN_ON(!vdev->mdev); 200 device_unregister(&vdev->dev); 201 } 202 EXPORT_SYMBOL_GPL(_vdpa_unregister_device); 203 204 /** 205 * vdpa_unregister_device - unregister a vDPA device 206 * @vdev: the vdpa device to be unregisted from vDPA bus 207 */ 208 void vdpa_unregister_device(struct vdpa_device *vdev) 209 { 210 mutex_lock(&vdpa_dev_mutex); 211 device_unregister(&vdev->dev); 212 mutex_unlock(&vdpa_dev_mutex); 213 } 214 EXPORT_SYMBOL_GPL(vdpa_unregister_device); 215 216 /** 217 * __vdpa_register_driver - register a vDPA device driver 218 * @drv: the vdpa device driver to be registered 219 * @owner: module owner of the driver 220 * 221 * Returns an err when fail to do the registration 222 */ 223 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner) 224 { 225 drv->driver.bus = &vdpa_bus; 226 drv->driver.owner = owner; 227 228 return driver_register(&drv->driver); 229 } 230 EXPORT_SYMBOL_GPL(__vdpa_register_driver); 231 232 /** 233 * vdpa_unregister_driver - unregister a vDPA device driver 234 * @drv: the vdpa device driver to be unregistered 235 */ 236 void vdpa_unregister_driver(struct vdpa_driver *drv) 237 { 238 driver_unregister(&drv->driver); 239 } 240 EXPORT_SYMBOL_GPL(vdpa_unregister_driver); 241 242 /** 243 * vdpa_mgmtdev_register - register a vdpa management device 244 * 245 * @mdev: Pointer to vdpa management device 246 * vdpa_mgmtdev_register() register a vdpa management device which supports 247 * vdpa device management. 248 */ 249 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev) 250 { 251 if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del) 252 return -EINVAL; 253 254 INIT_LIST_HEAD(&mdev->list); 255 mutex_lock(&vdpa_dev_mutex); 256 list_add_tail(&mdev->list, &mdev_head); 257 mutex_unlock(&vdpa_dev_mutex); 258 return 0; 259 } 260 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register); 261 262 static int vdpa_match_remove(struct device *dev, void *data) 263 { 264 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); 265 struct vdpa_mgmt_dev *mdev = vdev->mdev; 266 267 if (mdev == data) 268 mdev->ops->dev_del(mdev, vdev); 269 return 0; 270 } 271 272 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev) 273 { 274 mutex_lock(&vdpa_dev_mutex); 275 276 list_del(&mdev->list); 277 278 /* Filter out all the entries belong to this management device and delete it. */ 279 bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove); 280 281 mutex_unlock(&vdpa_dev_mutex); 282 } 283 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister); 284 285 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev, 286 const char *busname, const char *devname) 287 { 288 /* Bus name is optional for simulated management device, so ignore the 289 * device with bus if bus attribute is provided. 290 */ 291 if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus)) 292 return false; 293 294 if (!busname && strcmp(dev_name(mdev->device), devname) == 0) 295 return true; 296 297 if (busname && (strcmp(mdev->device->bus->name, busname) == 0) && 298 (strcmp(dev_name(mdev->device), devname) == 0)) 299 return true; 300 301 return false; 302 } 303 304 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs) 305 { 306 struct vdpa_mgmt_dev *mdev; 307 const char *busname = NULL; 308 const char *devname; 309 310 if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]) 311 return ERR_PTR(-EINVAL); 312 devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]); 313 if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]) 314 busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]); 315 316 list_for_each_entry(mdev, &mdev_head, list) { 317 if (mgmtdev_handle_match(mdev, busname, devname)) 318 return mdev; 319 } 320 return ERR_PTR(-ENODEV); 321 } 322 323 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev) 324 { 325 if (mdev->device->bus && 326 nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name)) 327 return -EMSGSIZE; 328 if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device))) 329 return -EMSGSIZE; 330 return 0; 331 } 332 333 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg, 334 u32 portid, u32 seq, int flags) 335 { 336 u64 supported_classes = 0; 337 void *hdr; 338 int i = 0; 339 int err; 340 341 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW); 342 if (!hdr) 343 return -EMSGSIZE; 344 err = vdpa_nl_mgmtdev_handle_fill(msg, mdev); 345 if (err) 346 goto msg_err; 347 348 while (mdev->id_table[i].device) { 349 supported_classes |= BIT(mdev->id_table[i].device); 350 i++; 351 } 352 353 if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, 354 supported_classes, VDPA_ATTR_UNSPEC)) { 355 err = -EMSGSIZE; 356 goto msg_err; 357 } 358 359 genlmsg_end(msg, hdr); 360 return 0; 361 362 msg_err: 363 genlmsg_cancel(msg, hdr); 364 return err; 365 } 366 367 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info) 368 { 369 struct vdpa_mgmt_dev *mdev; 370 struct sk_buff *msg; 371 int err; 372 373 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 374 if (!msg) 375 return -ENOMEM; 376 377 mutex_lock(&vdpa_dev_mutex); 378 mdev = vdpa_mgmtdev_get_from_attr(info->attrs); 379 if (IS_ERR(mdev)) { 380 mutex_unlock(&vdpa_dev_mutex); 381 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device"); 382 err = PTR_ERR(mdev); 383 goto out; 384 } 385 386 err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0); 387 mutex_unlock(&vdpa_dev_mutex); 388 if (err) 389 goto out; 390 err = genlmsg_reply(msg, info); 391 return err; 392 393 out: 394 nlmsg_free(msg); 395 return err; 396 } 397 398 static int 399 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) 400 { 401 struct vdpa_mgmt_dev *mdev; 402 int start = cb->args[0]; 403 int idx = 0; 404 int err; 405 406 mutex_lock(&vdpa_dev_mutex); 407 list_for_each_entry(mdev, &mdev_head, list) { 408 if (idx < start) { 409 idx++; 410 continue; 411 } 412 err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid, 413 cb->nlh->nlmsg_seq, NLM_F_MULTI); 414 if (err) 415 goto out; 416 idx++; 417 } 418 out: 419 mutex_unlock(&vdpa_dev_mutex); 420 cb->args[0] = idx; 421 return msg->len; 422 } 423 424 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info) 425 { 426 struct vdpa_mgmt_dev *mdev; 427 const char *name; 428 int err = 0; 429 430 if (!info->attrs[VDPA_ATTR_DEV_NAME]) 431 return -EINVAL; 432 433 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); 434 435 mutex_lock(&vdpa_dev_mutex); 436 mdev = vdpa_mgmtdev_get_from_attr(info->attrs); 437 if (IS_ERR(mdev)) { 438 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device"); 439 err = PTR_ERR(mdev); 440 goto err; 441 } 442 443 err = mdev->ops->dev_add(mdev, name); 444 err: 445 mutex_unlock(&vdpa_dev_mutex); 446 return err; 447 } 448 449 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info) 450 { 451 struct vdpa_mgmt_dev *mdev; 452 struct vdpa_device *vdev; 453 struct device *dev; 454 const char *name; 455 int err = 0; 456 457 if (!info->attrs[VDPA_ATTR_DEV_NAME]) 458 return -EINVAL; 459 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); 460 461 mutex_lock(&vdpa_dev_mutex); 462 dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match); 463 if (!dev) { 464 NL_SET_ERR_MSG_MOD(info->extack, "device not found"); 465 err = -ENODEV; 466 goto dev_err; 467 } 468 vdev = container_of(dev, struct vdpa_device, dev); 469 if (!vdev->mdev) { 470 NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user"); 471 err = -EINVAL; 472 goto mdev_err; 473 } 474 mdev = vdev->mdev; 475 mdev->ops->dev_del(mdev, vdev); 476 mdev_err: 477 put_device(dev); 478 dev_err: 479 mutex_unlock(&vdpa_dev_mutex); 480 return err; 481 } 482 483 static int 484 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, 485 int flags, struct netlink_ext_ack *extack) 486 { 487 u16 max_vq_size; 488 u32 device_id; 489 u32 vendor_id; 490 void *hdr; 491 int err; 492 493 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW); 494 if (!hdr) 495 return -EMSGSIZE; 496 497 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev); 498 if (err) 499 goto msg_err; 500 501 device_id = vdev->config->get_device_id(vdev); 502 vendor_id = vdev->config->get_vendor_id(vdev); 503 max_vq_size = vdev->config->get_vq_num_max(vdev); 504 505 err = -EMSGSIZE; 506 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) 507 goto msg_err; 508 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) 509 goto msg_err; 510 if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id)) 511 goto msg_err; 512 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs)) 513 goto msg_err; 514 if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size)) 515 goto msg_err; 516 517 genlmsg_end(msg, hdr); 518 return 0; 519 520 msg_err: 521 genlmsg_cancel(msg, hdr); 522 return err; 523 } 524 525 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info) 526 { 527 struct vdpa_device *vdev; 528 struct sk_buff *msg; 529 const char *devname; 530 struct device *dev; 531 int err; 532 533 if (!info->attrs[VDPA_ATTR_DEV_NAME]) 534 return -EINVAL; 535 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); 536 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 537 if (!msg) 538 return -ENOMEM; 539 540 mutex_lock(&vdpa_dev_mutex); 541 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match); 542 if (!dev) { 543 NL_SET_ERR_MSG_MOD(info->extack, "device not found"); 544 err = -ENODEV; 545 goto err; 546 } 547 vdev = container_of(dev, struct vdpa_device, dev); 548 if (!vdev->mdev) { 549 err = -EINVAL; 550 goto mdev_err; 551 } 552 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack); 553 if (!err) 554 err = genlmsg_reply(msg, info); 555 mdev_err: 556 put_device(dev); 557 err: 558 mutex_unlock(&vdpa_dev_mutex); 559 if (err) 560 nlmsg_free(msg); 561 return err; 562 } 563 564 struct vdpa_dev_dump_info { 565 struct sk_buff *msg; 566 struct netlink_callback *cb; 567 int start_idx; 568 int idx; 569 }; 570 571 static int vdpa_dev_dump(struct device *dev, void *data) 572 { 573 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); 574 struct vdpa_dev_dump_info *info = data; 575 int err; 576 577 if (!vdev->mdev) 578 return 0; 579 if (info->idx < info->start_idx) { 580 info->idx++; 581 return 0; 582 } 583 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid, 584 info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack); 585 if (err) 586 return err; 587 588 info->idx++; 589 return 0; 590 } 591 592 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) 593 { 594 struct vdpa_dev_dump_info info; 595 596 info.msg = msg; 597 info.cb = cb; 598 info.start_idx = cb->args[0]; 599 info.idx = 0; 600 601 mutex_lock(&vdpa_dev_mutex); 602 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump); 603 mutex_unlock(&vdpa_dev_mutex); 604 cb->args[0] = info.idx; 605 return msg->len; 606 } 607 608 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = { 609 [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING }, 610 [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING }, 611 [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING }, 612 }; 613 614 static const struct genl_ops vdpa_nl_ops[] = { 615 { 616 .cmd = VDPA_CMD_MGMTDEV_GET, 617 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 618 .doit = vdpa_nl_cmd_mgmtdev_get_doit, 619 .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit, 620 }, 621 { 622 .cmd = VDPA_CMD_DEV_NEW, 623 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 624 .doit = vdpa_nl_cmd_dev_add_set_doit, 625 .flags = GENL_ADMIN_PERM, 626 }, 627 { 628 .cmd = VDPA_CMD_DEV_DEL, 629 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 630 .doit = vdpa_nl_cmd_dev_del_set_doit, 631 .flags = GENL_ADMIN_PERM, 632 }, 633 { 634 .cmd = VDPA_CMD_DEV_GET, 635 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 636 .doit = vdpa_nl_cmd_dev_get_doit, 637 .dumpit = vdpa_nl_cmd_dev_get_dumpit, 638 }, 639 }; 640 641 static struct genl_family vdpa_nl_family __ro_after_init = { 642 .name = VDPA_GENL_NAME, 643 .version = VDPA_GENL_VERSION, 644 .maxattr = VDPA_ATTR_MAX, 645 .policy = vdpa_nl_policy, 646 .netnsok = false, 647 .module = THIS_MODULE, 648 .ops = vdpa_nl_ops, 649 .n_ops = ARRAY_SIZE(vdpa_nl_ops), 650 }; 651 652 static int vdpa_init(void) 653 { 654 int err; 655 656 err = bus_register(&vdpa_bus); 657 if (err) 658 return err; 659 err = genl_register_family(&vdpa_nl_family); 660 if (err) 661 goto err; 662 return 0; 663 664 err: 665 bus_unregister(&vdpa_bus); 666 return err; 667 } 668 669 static void __exit vdpa_exit(void) 670 { 671 genl_unregister_family(&vdpa_nl_family); 672 bus_unregister(&vdpa_bus); 673 ida_destroy(&vdpa_index_ida); 674 } 675 core_initcall(vdpa_init); 676 module_exit(vdpa_exit); 677 678 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>"); 679 MODULE_LICENSE("GPL v2"); 680