1 /* 2 * Virtio memory mapped device driver 3 * 4 * Copyright 2011-2014, ARM Ltd. 5 * 6 * This module allows virtio devices to be used over a virtual, memory mapped 7 * platform device. 8 * 9 * The guest device(s) may be instantiated in one of three equivalent ways: 10 * 11 * 1. Static platform device in board's code, eg.: 12 * 13 * static struct platform_device v2m_virtio_device = { 14 * .name = "virtio-mmio", 15 * .id = -1, 16 * .num_resources = 2, 17 * .resource = (struct resource []) { 18 * { 19 * .start = 0x1001e000, 20 * .end = 0x1001e0ff, 21 * .flags = IORESOURCE_MEM, 22 * }, { 23 * .start = 42 + 32, 24 * .end = 42 + 32, 25 * .flags = IORESOURCE_IRQ, 26 * }, 27 * } 28 * }; 29 * 30 * 2. Device Tree node, eg.: 31 * 32 * virtio_block@1e000 { 33 * compatible = "virtio,mmio"; 34 * reg = <0x1e000 0x100>; 35 * interrupts = <42>; 36 * } 37 * 38 * 3. Kernel module (or command line) parameter. Can be used more than once - 39 * one device will be created for each one. Syntax: 40 * 41 * [virtio_mmio.]device=<size>@<baseaddr>:<irq>[:<id>] 42 * where: 43 * <size> := size (can use standard suffixes like K, M or G) 44 * <baseaddr> := physical base address 45 * <irq> := interrupt number (as passed to request_irq()) 46 * <id> := (optional) platform device id 47 * eg.: 48 * virtio_mmio.device=0x100@0x100b0000:48 \ 49 * virtio_mmio.device=1K@0x1001e000:74 50 * 51 * 52 * 53 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 54 * 55 * This work is licensed under the terms of the GNU GPL, version 2 or later. 56 * See the COPYING file in the top-level directory. 57 */ 58 59 #define pr_fmt(fmt) "virtio-mmio: " fmt 60 61 #include <linux/highmem.h> 62 #include <linux/interrupt.h> 63 #include <linux/io.h> 64 #include <linux/list.h> 65 #include <linux/module.h> 66 #include <linux/platform_device.h> 67 #include <linux/slab.h> 68 #include <linux/spinlock.h> 69 #include <linux/virtio.h> 70 #include <linux/virtio_config.h> 71 #include <linux/virtio_mmio.h> 72 #include <linux/virtio_ring.h> 73 74 75 76 /* The alignment to use between consumer and producer parts of vring. 77 * Currently hardcoded to the page size. */ 78 #define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE 79 80 81 82 #define to_virtio_mmio_device(_plat_dev) \ 83 container_of(_plat_dev, struct virtio_mmio_device, vdev) 84 85 struct virtio_mmio_device { 86 struct virtio_device vdev; 87 struct platform_device *pdev; 88 89 void __iomem *base; 90 unsigned long version; 91 92 /* a list of queues so we can dispatch IRQs */ 93 spinlock_t lock; 94 struct list_head virtqueues; 95 }; 96 97 struct virtio_mmio_vq_info { 98 /* the actual virtqueue */ 99 struct virtqueue *vq; 100 101 /* the number of entries in the queue */ 102 unsigned int num; 103 104 /* the virtual address of the ring queue */ 105 void *queue; 106 107 /* the list node for the virtqueues list */ 108 struct list_head node; 109 }; 110 111 112 113 /* Configuration interface */ 114 115 static u64 vm_get_features(struct virtio_device *vdev) 116 { 117 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 118 u64 features; 119 120 writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); 121 features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); 122 features <<= 32; 123 124 writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); 125 features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); 126 127 return features; 128 } 129 130 static int vm_finalize_features(struct virtio_device *vdev) 131 { 132 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 133 134 /* Give virtio_ring a chance to accept features. */ 135 vring_transport_features(vdev); 136 137 /* Make sure there is are no mixed devices */ 138 if (vm_dev->version == 2 && 139 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 140 dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n"); 141 return -EINVAL; 142 } 143 144 writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL); 145 writel((u32)(vdev->features >> 32), 146 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); 147 148 writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL); 149 writel((u32)vdev->features, 150 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); 151 152 return 0; 153 } 154 155 static void vm_get(struct virtio_device *vdev, unsigned offset, 156 void *buf, unsigned len) 157 { 158 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 159 u8 *ptr = buf; 160 int i; 161 162 for (i = 0; i < len; i++) 163 ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); 164 } 165 166 static void vm_set(struct virtio_device *vdev, unsigned offset, 167 const void *buf, unsigned len) 168 { 169 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 170 const u8 *ptr = buf; 171 int i; 172 173 for (i = 0; i < len; i++) 174 writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); 175 } 176 177 static u8 vm_get_status(struct virtio_device *vdev) 178 { 179 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 180 181 return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff; 182 } 183 184 static void vm_set_status(struct virtio_device *vdev, u8 status) 185 { 186 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 187 188 /* We should never be setting status to 0. */ 189 BUG_ON(status == 0); 190 191 writel(status, vm_dev->base + VIRTIO_MMIO_STATUS); 192 } 193 194 static void vm_reset(struct virtio_device *vdev) 195 { 196 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 197 198 /* 0 status means a reset. */ 199 writel(0, vm_dev->base + VIRTIO_MMIO_STATUS); 200 } 201 202 203 204 /* Transport interface */ 205 206 /* the notify function used when creating a virt queue */ 207 static bool vm_notify(struct virtqueue *vq) 208 { 209 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); 210 211 /* We write the queue's selector into the notification register to 212 * signal the other end */ 213 writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); 214 return true; 215 } 216 217 /* Notify all virtqueues on an interrupt. */ 218 static irqreturn_t vm_interrupt(int irq, void *opaque) 219 { 220 struct virtio_mmio_device *vm_dev = opaque; 221 struct virtio_mmio_vq_info *info; 222 unsigned long status; 223 unsigned long flags; 224 irqreturn_t ret = IRQ_NONE; 225 226 /* Read and acknowledge interrupts */ 227 status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS); 228 writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK); 229 230 if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) { 231 virtio_config_changed(&vm_dev->vdev); 232 ret = IRQ_HANDLED; 233 } 234 235 if (likely(status & VIRTIO_MMIO_INT_VRING)) { 236 spin_lock_irqsave(&vm_dev->lock, flags); 237 list_for_each_entry(info, &vm_dev->virtqueues, node) 238 ret |= vring_interrupt(irq, info->vq); 239 spin_unlock_irqrestore(&vm_dev->lock, flags); 240 } 241 242 return ret; 243 } 244 245 246 247 static void vm_del_vq(struct virtqueue *vq) 248 { 249 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); 250 struct virtio_mmio_vq_info *info = vq->priv; 251 unsigned long flags, size; 252 unsigned int index = vq->index; 253 254 spin_lock_irqsave(&vm_dev->lock, flags); 255 list_del(&info->node); 256 spin_unlock_irqrestore(&vm_dev->lock, flags); 257 258 vring_del_virtqueue(vq); 259 260 /* Select and deactivate the queue */ 261 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); 262 if (vm_dev->version == 1) { 263 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 264 } else { 265 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); 266 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); 267 } 268 269 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); 270 free_pages_exact(info->queue, size); 271 kfree(info); 272 } 273 274 static void vm_del_vqs(struct virtio_device *vdev) 275 { 276 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 277 struct virtqueue *vq, *n; 278 279 list_for_each_entry_safe(vq, n, &vdev->vqs, list) 280 vm_del_vq(vq); 281 282 free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); 283 } 284 285 286 287 static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, 288 void (*callback)(struct virtqueue *vq), 289 const char *name) 290 { 291 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 292 struct virtio_mmio_vq_info *info; 293 struct virtqueue *vq; 294 unsigned long flags, size; 295 int err; 296 297 if (!name) 298 return NULL; 299 300 /* Select the queue we're interested in */ 301 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); 302 303 /* Queue shouldn't already be set up. */ 304 if (readl(vm_dev->base + (vm_dev->version == 1 ? 305 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) { 306 err = -ENOENT; 307 goto error_available; 308 } 309 310 /* Allocate and fill out our active queue description */ 311 info = kmalloc(sizeof(*info), GFP_KERNEL); 312 if (!info) { 313 err = -ENOMEM; 314 goto error_kmalloc; 315 } 316 317 /* Allocate pages for the queue - start with a queue as big as 318 * possible (limited by maximum size allowed by device), drop down 319 * to a minimal size, just big enough to fit descriptor table 320 * and two rings (which makes it "alignment_size * 2") 321 */ 322 info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX); 323 324 /* If the device reports a 0 entry queue, we won't be able to 325 * use it to perform I/O, and vring_new_virtqueue() can't create 326 * empty queues anyway, so don't bother to set up the device. 327 */ 328 if (info->num == 0) { 329 err = -ENOENT; 330 goto error_alloc_pages; 331 } 332 333 while (1) { 334 size = PAGE_ALIGN(vring_size(info->num, 335 VIRTIO_MMIO_VRING_ALIGN)); 336 /* Did the last iter shrink the queue below minimum size? */ 337 if (size < VIRTIO_MMIO_VRING_ALIGN * 2) { 338 err = -ENOMEM; 339 goto error_alloc_pages; 340 } 341 342 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); 343 if (info->queue) 344 break; 345 346 info->num /= 2; 347 } 348 349 /* Create the vring */ 350 vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev, 351 true, info->queue, vm_notify, callback, name); 352 if (!vq) { 353 err = -ENOMEM; 354 goto error_new_virtqueue; 355 } 356 357 /* Activate the queue */ 358 writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); 359 if (vm_dev->version == 1) { 360 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); 361 writel(virt_to_phys(info->queue) >> PAGE_SHIFT, 362 vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 363 } else { 364 u64 addr; 365 366 addr = virt_to_phys(info->queue); 367 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW); 368 writel((u32)(addr >> 32), 369 vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH); 370 371 addr = virt_to_phys(virtqueue_get_avail(vq)); 372 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW); 373 writel((u32)(addr >> 32), 374 vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH); 375 376 addr = virt_to_phys(virtqueue_get_used(vq)); 377 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW); 378 writel((u32)(addr >> 32), 379 vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH); 380 381 writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); 382 } 383 384 vq->priv = info; 385 info->vq = vq; 386 387 spin_lock_irqsave(&vm_dev->lock, flags); 388 list_add(&info->node, &vm_dev->virtqueues); 389 spin_unlock_irqrestore(&vm_dev->lock, flags); 390 391 return vq; 392 393 error_new_virtqueue: 394 if (vm_dev->version == 1) { 395 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 396 } else { 397 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); 398 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); 399 } 400 free_pages_exact(info->queue, size); 401 error_alloc_pages: 402 kfree(info); 403 error_kmalloc: 404 error_available: 405 return ERR_PTR(err); 406 } 407 408 static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, 409 struct virtqueue *vqs[], 410 vq_callback_t *callbacks[], 411 const char *names[]) 412 { 413 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 414 unsigned int irq = platform_get_irq(vm_dev->pdev, 0); 415 int i, err; 416 417 err = request_irq(irq, vm_interrupt, IRQF_SHARED, 418 dev_name(&vdev->dev), vm_dev); 419 if (err) 420 return err; 421 422 for (i = 0; i < nvqs; ++i) { 423 vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); 424 if (IS_ERR(vqs[i])) { 425 vm_del_vqs(vdev); 426 return PTR_ERR(vqs[i]); 427 } 428 } 429 430 return 0; 431 } 432 433 static const char *vm_bus_name(struct virtio_device *vdev) 434 { 435 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 436 437 return vm_dev->pdev->name; 438 } 439 440 static const struct virtio_config_ops virtio_mmio_config_ops = { 441 .get = vm_get, 442 .set = vm_set, 443 .get_status = vm_get_status, 444 .set_status = vm_set_status, 445 .reset = vm_reset, 446 .find_vqs = vm_find_vqs, 447 .del_vqs = vm_del_vqs, 448 .get_features = vm_get_features, 449 .finalize_features = vm_finalize_features, 450 .bus_name = vm_bus_name, 451 }; 452 453 454 455 /* Platform device */ 456 457 static int virtio_mmio_probe(struct platform_device *pdev) 458 { 459 struct virtio_mmio_device *vm_dev; 460 struct resource *mem; 461 unsigned long magic; 462 463 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 464 if (!mem) 465 return -EINVAL; 466 467 if (!devm_request_mem_region(&pdev->dev, mem->start, 468 resource_size(mem), pdev->name)) 469 return -EBUSY; 470 471 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); 472 if (!vm_dev) 473 return -ENOMEM; 474 475 vm_dev->vdev.dev.parent = &pdev->dev; 476 vm_dev->vdev.config = &virtio_mmio_config_ops; 477 vm_dev->pdev = pdev; 478 INIT_LIST_HEAD(&vm_dev->virtqueues); 479 spin_lock_init(&vm_dev->lock); 480 481 vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); 482 if (vm_dev->base == NULL) 483 return -EFAULT; 484 485 /* Check magic value */ 486 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); 487 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) { 488 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); 489 return -ENODEV; 490 } 491 492 /* Check device version */ 493 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION); 494 if (vm_dev->version < 1 || vm_dev->version > 2) { 495 dev_err(&pdev->dev, "Version %ld not supported!\n", 496 vm_dev->version); 497 return -ENXIO; 498 } 499 500 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); 501 if (vm_dev->vdev.id.device == 0) { 502 /* 503 * virtio-mmio device with an ID 0 is a (dummy) placeholder 504 * with no function. End probing now with no error reported. 505 */ 506 return -ENODEV; 507 } 508 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); 509 510 /* Reject legacy-only IDs for version 2 devices */ 511 if (vm_dev->version == 2 && 512 virtio_device_is_legacy_only(vm_dev->vdev.id)) { 513 dev_err(&pdev->dev, "Version 2 not supported for devices %u!\n", 514 vm_dev->vdev.id.device); 515 return -ENODEV; 516 } 517 518 if (vm_dev->version == 1) 519 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); 520 521 platform_set_drvdata(pdev, vm_dev); 522 523 return register_virtio_device(&vm_dev->vdev); 524 } 525 526 static int virtio_mmio_remove(struct platform_device *pdev) 527 { 528 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); 529 530 unregister_virtio_device(&vm_dev->vdev); 531 532 return 0; 533 } 534 535 536 537 /* Devices list parameter */ 538 539 #if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES) 540 541 static struct device vm_cmdline_parent = { 542 .init_name = "virtio-mmio-cmdline", 543 }; 544 545 static int vm_cmdline_parent_registered; 546 static int vm_cmdline_id; 547 548 static int vm_cmdline_set(const char *device, 549 const struct kernel_param *kp) 550 { 551 int err; 552 struct resource resources[2] = {}; 553 char *str; 554 long long int base, size; 555 unsigned int irq; 556 int processed, consumed = 0; 557 struct platform_device *pdev; 558 559 /* Consume "size" part of the command line parameter */ 560 size = memparse(device, &str); 561 562 /* Get "@<base>:<irq>[:<id>]" chunks */ 563 processed = sscanf(str, "@%lli:%u%n:%d%n", 564 &base, &irq, &consumed, 565 &vm_cmdline_id, &consumed); 566 567 /* 568 * sscanf() must processes at least 2 chunks; also there 569 * must be no extra characters after the last chunk, so 570 * str[consumed] must be '\0' 571 */ 572 if (processed < 2 || str[consumed]) 573 return -EINVAL; 574 575 resources[0].flags = IORESOURCE_MEM; 576 resources[0].start = base; 577 resources[0].end = base + size - 1; 578 579 resources[1].flags = IORESOURCE_IRQ; 580 resources[1].start = resources[1].end = irq; 581 582 if (!vm_cmdline_parent_registered) { 583 err = device_register(&vm_cmdline_parent); 584 if (err) { 585 pr_err("Failed to register parent device!\n"); 586 return err; 587 } 588 vm_cmdline_parent_registered = 1; 589 } 590 591 pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n", 592 vm_cmdline_id, 593 (unsigned long long)resources[0].start, 594 (unsigned long long)resources[0].end, 595 (int)resources[1].start); 596 597 pdev = platform_device_register_resndata(&vm_cmdline_parent, 598 "virtio-mmio", vm_cmdline_id++, 599 resources, ARRAY_SIZE(resources), NULL, 0); 600 if (IS_ERR(pdev)) 601 return PTR_ERR(pdev); 602 603 return 0; 604 } 605 606 static int vm_cmdline_get_device(struct device *dev, void *data) 607 { 608 char *buffer = data; 609 unsigned int len = strlen(buffer); 610 struct platform_device *pdev = to_platform_device(dev); 611 612 snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n", 613 pdev->resource[0].end - pdev->resource[0].start + 1ULL, 614 (unsigned long long)pdev->resource[0].start, 615 (unsigned long long)pdev->resource[1].start, 616 pdev->id); 617 return 0; 618 } 619 620 static int vm_cmdline_get(char *buffer, const struct kernel_param *kp) 621 { 622 buffer[0] = '\0'; 623 device_for_each_child(&vm_cmdline_parent, buffer, 624 vm_cmdline_get_device); 625 return strlen(buffer) + 1; 626 } 627 628 static struct kernel_param_ops vm_cmdline_param_ops = { 629 .set = vm_cmdline_set, 630 .get = vm_cmdline_get, 631 }; 632 633 device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR); 634 635 static int vm_unregister_cmdline_device(struct device *dev, 636 void *data) 637 { 638 platform_device_unregister(to_platform_device(dev)); 639 640 return 0; 641 } 642 643 static void vm_unregister_cmdline_devices(void) 644 { 645 if (vm_cmdline_parent_registered) { 646 device_for_each_child(&vm_cmdline_parent, NULL, 647 vm_unregister_cmdline_device); 648 device_unregister(&vm_cmdline_parent); 649 vm_cmdline_parent_registered = 0; 650 } 651 } 652 653 #else 654 655 static void vm_unregister_cmdline_devices(void) 656 { 657 } 658 659 #endif 660 661 /* Platform driver */ 662 663 static struct of_device_id virtio_mmio_match[] = { 664 { .compatible = "virtio,mmio", }, 665 {}, 666 }; 667 MODULE_DEVICE_TABLE(of, virtio_mmio_match); 668 669 static struct platform_driver virtio_mmio_driver = { 670 .probe = virtio_mmio_probe, 671 .remove = virtio_mmio_remove, 672 .driver = { 673 .name = "virtio-mmio", 674 .of_match_table = virtio_mmio_match, 675 }, 676 }; 677 678 static int __init virtio_mmio_init(void) 679 { 680 return platform_driver_register(&virtio_mmio_driver); 681 } 682 683 static void __exit virtio_mmio_exit(void) 684 { 685 platform_driver_unregister(&virtio_mmio_driver); 686 vm_unregister_cmdline_devices(); 687 } 688 689 module_init(virtio_mmio_init); 690 module_exit(virtio_mmio_exit); 691 692 MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>"); 693 MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices"); 694 MODULE_LICENSE("GPL"); 695