1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Virtio memory mapped device driver 4 * 5 * Copyright 2011-2014, ARM Ltd. 6 * 7 * This module allows virtio devices to be used over a virtual, memory mapped 8 * platform device. 9 * 10 * The guest device(s) may be instantiated in one of three equivalent ways: 11 * 12 * 1. Static platform device in board's code, eg.: 13 * 14 * static struct platform_device v2m_virtio_device = { 15 * .name = "virtio-mmio", 16 * .id = -1, 17 * .num_resources = 2, 18 * .resource = (struct resource []) { 19 * { 20 * .start = 0x1001e000, 21 * .end = 0x1001e0ff, 22 * .flags = IORESOURCE_MEM, 23 * }, { 24 * .start = 42 + 32, 25 * .end = 42 + 32, 26 * .flags = IORESOURCE_IRQ, 27 * }, 28 * } 29 * }; 30 * 31 * 2. Device Tree node, eg.: 32 * 33 * virtio_block@1e000 { 34 * compatible = "virtio,mmio"; 35 * reg = <0x1e000 0x100>; 36 * interrupts = <42>; 37 * } 38 * 39 * 3. Kernel module (or command line) parameter. Can be used more than once - 40 * one device will be created for each one. Syntax: 41 * 42 * [virtio_mmio.]device=<size>@<baseaddr>:<irq>[:<id>] 43 * where: 44 * <size> := size (can use standard suffixes like K, M or G) 45 * <baseaddr> := physical base address 46 * <irq> := interrupt number (as passed to request_irq()) 47 * <id> := (optional) platform device id 48 * eg.: 49 * virtio_mmio.device=0x100@0x100b0000:48 \ 50 * virtio_mmio.device=1K@0x1001e000:74 51 * 52 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 53 */ 54 55 #define pr_fmt(fmt) "virtio-mmio: " fmt 56 57 #include <linux/acpi.h> 58 #include <linux/dma-mapping.h> 59 #include <linux/highmem.h> 60 #include <linux/interrupt.h> 61 #include <linux/io.h> 62 #include <linux/list.h> 63 #include <linux/module.h> 64 #include <linux/platform_device.h> 65 #include <linux/slab.h> 66 #include <linux/spinlock.h> 67 #include <linux/virtio.h> 68 #include <linux/virtio_config.h> 69 #include <uapi/linux/virtio_mmio.h> 70 #include <linux/virtio_ring.h> 71 72 73 74 /* The alignment to use between consumer and producer parts of vring. 75 * Currently hardcoded to the page size. */ 76 #define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE 77 78 79 80 #define to_virtio_mmio_device(_plat_dev) \ 81 container_of(_plat_dev, struct virtio_mmio_device, vdev) 82 83 struct virtio_mmio_device { 84 struct virtio_device vdev; 85 struct platform_device *pdev; 86 87 void __iomem *base; 88 unsigned long version; 89 90 /* a list of queues so we can dispatch IRQs */ 91 spinlock_t lock; 92 struct list_head virtqueues; 93 }; 94 95 struct virtio_mmio_vq_info { 96 /* the actual virtqueue */ 97 struct virtqueue *vq; 98 99 /* the list node for the virtqueues list */ 100 struct list_head node; 101 }; 102 103 104 105 /* Configuration interface */ 106 107 static u64 vm_get_features(struct virtio_device *vdev) 108 { 109 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 110 u64 features; 111 112 writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); 113 features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); 114 features <<= 32; 115 116 writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); 117 features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); 118 119 return features; 120 } 121 122 static int vm_finalize_features(struct virtio_device *vdev) 123 { 124 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 125 126 /* Give virtio_ring a chance to accept features. */ 127 vring_transport_features(vdev); 128 129 /* Make sure there is are no mixed devices */ 130 if (vm_dev->version == 2 && 131 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 132 dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n"); 133 return -EINVAL; 134 } 135 136 writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL); 137 writel((u32)(vdev->features >> 32), 138 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); 139 140 writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL); 141 writel((u32)vdev->features, 142 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); 143 144 return 0; 145 } 146 147 static void vm_get(struct virtio_device *vdev, unsigned offset, 148 void *buf, unsigned len) 149 { 150 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 151 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; 152 u8 b; 153 __le16 w; 154 __le32 l; 155 156 if (vm_dev->version == 1) { 157 u8 *ptr = buf; 158 int i; 159 160 for (i = 0; i < len; i++) 161 ptr[i] = readb(base + offset + i); 162 return; 163 } 164 165 switch (len) { 166 case 1: 167 b = readb(base + offset); 168 memcpy(buf, &b, sizeof b); 169 break; 170 case 2: 171 w = cpu_to_le16(readw(base + offset)); 172 memcpy(buf, &w, sizeof w); 173 break; 174 case 4: 175 l = cpu_to_le32(readl(base + offset)); 176 memcpy(buf, &l, sizeof l); 177 break; 178 case 8: 179 l = cpu_to_le32(readl(base + offset)); 180 memcpy(buf, &l, sizeof l); 181 l = cpu_to_le32(ioread32(base + offset + sizeof l)); 182 memcpy(buf + sizeof l, &l, sizeof l); 183 break; 184 default: 185 BUG(); 186 } 187 } 188 189 static void vm_set(struct virtio_device *vdev, unsigned offset, 190 const void *buf, unsigned len) 191 { 192 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 193 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; 194 u8 b; 195 __le16 w; 196 __le32 l; 197 198 if (vm_dev->version == 1) { 199 const u8 *ptr = buf; 200 int i; 201 202 for (i = 0; i < len; i++) 203 writeb(ptr[i], base + offset + i); 204 205 return; 206 } 207 208 switch (len) { 209 case 1: 210 memcpy(&b, buf, sizeof b); 211 writeb(b, base + offset); 212 break; 213 case 2: 214 memcpy(&w, buf, sizeof w); 215 writew(le16_to_cpu(w), base + offset); 216 break; 217 case 4: 218 memcpy(&l, buf, sizeof l); 219 writel(le32_to_cpu(l), base + offset); 220 break; 221 case 8: 222 memcpy(&l, buf, sizeof l); 223 writel(le32_to_cpu(l), base + offset); 224 memcpy(&l, buf + sizeof l, sizeof l); 225 writel(le32_to_cpu(l), base + offset + sizeof l); 226 break; 227 default: 228 BUG(); 229 } 230 } 231 232 static u32 vm_generation(struct virtio_device *vdev) 233 { 234 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 235 236 if (vm_dev->version == 1) 237 return 0; 238 else 239 return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION); 240 } 241 242 static u8 vm_get_status(struct virtio_device *vdev) 243 { 244 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 245 246 return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff; 247 } 248 249 static void vm_set_status(struct virtio_device *vdev, u8 status) 250 { 251 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 252 253 /* We should never be setting status to 0. */ 254 BUG_ON(status == 0); 255 256 writel(status, vm_dev->base + VIRTIO_MMIO_STATUS); 257 } 258 259 static void vm_reset(struct virtio_device *vdev) 260 { 261 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 262 263 /* 0 status means a reset. */ 264 writel(0, vm_dev->base + VIRTIO_MMIO_STATUS); 265 } 266 267 268 269 /* Transport interface */ 270 271 /* the notify function used when creating a virt queue */ 272 static bool vm_notify(struct virtqueue *vq) 273 { 274 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); 275 276 /* We write the queue's selector into the notification register to 277 * signal the other end */ 278 writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); 279 return true; 280 } 281 282 /* Notify all virtqueues on an interrupt. */ 283 static irqreturn_t vm_interrupt(int irq, void *opaque) 284 { 285 struct virtio_mmio_device *vm_dev = opaque; 286 struct virtio_mmio_vq_info *info; 287 unsigned long status; 288 unsigned long flags; 289 irqreturn_t ret = IRQ_NONE; 290 291 /* Read and acknowledge interrupts */ 292 status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS); 293 writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK); 294 295 if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) { 296 virtio_config_changed(&vm_dev->vdev); 297 ret = IRQ_HANDLED; 298 } 299 300 if (likely(status & VIRTIO_MMIO_INT_VRING)) { 301 spin_lock_irqsave(&vm_dev->lock, flags); 302 list_for_each_entry(info, &vm_dev->virtqueues, node) 303 ret |= vring_interrupt(irq, info->vq); 304 spin_unlock_irqrestore(&vm_dev->lock, flags); 305 } 306 307 return ret; 308 } 309 310 311 312 static void vm_del_vq(struct virtqueue *vq) 313 { 314 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); 315 struct virtio_mmio_vq_info *info = vq->priv; 316 unsigned long flags; 317 unsigned int index = vq->index; 318 319 spin_lock_irqsave(&vm_dev->lock, flags); 320 list_del(&info->node); 321 spin_unlock_irqrestore(&vm_dev->lock, flags); 322 323 /* Select and deactivate the queue */ 324 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); 325 if (vm_dev->version == 1) { 326 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 327 } else { 328 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); 329 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); 330 } 331 332 vring_del_virtqueue(vq); 333 334 kfree(info); 335 } 336 337 static void vm_del_vqs(struct virtio_device *vdev) 338 { 339 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 340 struct virtqueue *vq, *n; 341 342 list_for_each_entry_safe(vq, n, &vdev->vqs, list) 343 vm_del_vq(vq); 344 345 free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); 346 } 347 348 static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, 349 void (*callback)(struct virtqueue *vq), 350 const char *name, bool ctx) 351 { 352 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 353 struct virtio_mmio_vq_info *info; 354 struct virtqueue *vq; 355 unsigned long flags; 356 unsigned int num; 357 int err; 358 359 if (!name) 360 return NULL; 361 362 /* Select the queue we're interested in */ 363 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); 364 365 /* Queue shouldn't already be set up. */ 366 if (readl(vm_dev->base + (vm_dev->version == 1 ? 367 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) { 368 err = -ENOENT; 369 goto error_available; 370 } 371 372 /* Allocate and fill out our active queue description */ 373 info = kmalloc(sizeof(*info), GFP_KERNEL); 374 if (!info) { 375 err = -ENOMEM; 376 goto error_kmalloc; 377 } 378 379 num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX); 380 if (num == 0) { 381 err = -ENOENT; 382 goto error_new_virtqueue; 383 } 384 385 /* Create the vring */ 386 vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev, 387 true, true, ctx, vm_notify, callback, name); 388 if (!vq) { 389 err = -ENOMEM; 390 goto error_new_virtqueue; 391 } 392 393 /* Activate the queue */ 394 writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); 395 if (vm_dev->version == 1) { 396 u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT; 397 398 /* 399 * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something 400 * that doesn't fit in 32bit, fail the setup rather than 401 * pretending to be successful. 402 */ 403 if (q_pfn >> 32) { 404 dev_err(&vdev->dev, 405 "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n", 406 0x1ULL << (32 + PAGE_SHIFT - 30)); 407 err = -E2BIG; 408 goto error_bad_pfn; 409 } 410 411 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); 412 writel(q_pfn, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 413 } else { 414 u64 addr; 415 416 addr = virtqueue_get_desc_addr(vq); 417 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW); 418 writel((u32)(addr >> 32), 419 vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH); 420 421 addr = virtqueue_get_avail_addr(vq); 422 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW); 423 writel((u32)(addr >> 32), 424 vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH); 425 426 addr = virtqueue_get_used_addr(vq); 427 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW); 428 writel((u32)(addr >> 32), 429 vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH); 430 431 writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); 432 } 433 434 vq->priv = info; 435 info->vq = vq; 436 437 spin_lock_irqsave(&vm_dev->lock, flags); 438 list_add(&info->node, &vm_dev->virtqueues); 439 spin_unlock_irqrestore(&vm_dev->lock, flags); 440 441 return vq; 442 443 error_bad_pfn: 444 vring_del_virtqueue(vq); 445 error_new_virtqueue: 446 if (vm_dev->version == 1) { 447 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 448 } else { 449 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); 450 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); 451 } 452 kfree(info); 453 error_kmalloc: 454 error_available: 455 return ERR_PTR(err); 456 } 457 458 static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, 459 struct virtqueue *vqs[], 460 vq_callback_t *callbacks[], 461 const char * const names[], 462 const bool *ctx, 463 struct irq_affinity *desc) 464 { 465 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 466 unsigned int irq = platform_get_irq(vm_dev->pdev, 0); 467 int i, err, queue_idx = 0; 468 469 err = request_irq(irq, vm_interrupt, IRQF_SHARED, 470 dev_name(&vdev->dev), vm_dev); 471 if (err) 472 return err; 473 474 for (i = 0; i < nvqs; ++i) { 475 if (!names[i]) { 476 vqs[i] = NULL; 477 continue; 478 } 479 480 vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i], 481 ctx ? ctx[i] : false); 482 if (IS_ERR(vqs[i])) { 483 vm_del_vqs(vdev); 484 return PTR_ERR(vqs[i]); 485 } 486 } 487 488 return 0; 489 } 490 491 static const char *vm_bus_name(struct virtio_device *vdev) 492 { 493 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 494 495 return vm_dev->pdev->name; 496 } 497 498 static const struct virtio_config_ops virtio_mmio_config_ops = { 499 .get = vm_get, 500 .set = vm_set, 501 .generation = vm_generation, 502 .get_status = vm_get_status, 503 .set_status = vm_set_status, 504 .reset = vm_reset, 505 .find_vqs = vm_find_vqs, 506 .del_vqs = vm_del_vqs, 507 .get_features = vm_get_features, 508 .finalize_features = vm_finalize_features, 509 .bus_name = vm_bus_name, 510 }; 511 512 513 static void virtio_mmio_release_dev(struct device *_d) 514 { 515 struct virtio_device *vdev = 516 container_of(_d, struct virtio_device, dev); 517 struct virtio_mmio_device *vm_dev = 518 container_of(vdev, struct virtio_mmio_device, vdev); 519 struct platform_device *pdev = vm_dev->pdev; 520 521 devm_kfree(&pdev->dev, vm_dev); 522 } 523 524 /* Platform device */ 525 526 static int virtio_mmio_probe(struct platform_device *pdev) 527 { 528 struct virtio_mmio_device *vm_dev; 529 struct resource *mem; 530 unsigned long magic; 531 int rc; 532 533 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 534 if (!mem) 535 return -EINVAL; 536 537 if (!devm_request_mem_region(&pdev->dev, mem->start, 538 resource_size(mem), pdev->name)) 539 return -EBUSY; 540 541 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); 542 if (!vm_dev) 543 return -ENOMEM; 544 545 vm_dev->vdev.dev.parent = &pdev->dev; 546 vm_dev->vdev.dev.release = virtio_mmio_release_dev; 547 vm_dev->vdev.config = &virtio_mmio_config_ops; 548 vm_dev->pdev = pdev; 549 INIT_LIST_HEAD(&vm_dev->virtqueues); 550 spin_lock_init(&vm_dev->lock); 551 552 vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); 553 if (vm_dev->base == NULL) 554 return -EFAULT; 555 556 /* Check magic value */ 557 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); 558 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) { 559 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); 560 return -ENODEV; 561 } 562 563 /* Check device version */ 564 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION); 565 if (vm_dev->version < 1 || vm_dev->version > 2) { 566 dev_err(&pdev->dev, "Version %ld not supported!\n", 567 vm_dev->version); 568 return -ENXIO; 569 } 570 571 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); 572 if (vm_dev->vdev.id.device == 0) { 573 /* 574 * virtio-mmio device with an ID 0 is a (dummy) placeholder 575 * with no function. End probing now with no error reported. 576 */ 577 return -ENODEV; 578 } 579 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); 580 581 if (vm_dev->version == 1) { 582 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); 583 584 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 585 /* 586 * In the legacy case, ensure our coherently-allocated virtio 587 * ring will be at an address expressable as a 32-bit PFN. 588 */ 589 if (!rc) 590 dma_set_coherent_mask(&pdev->dev, 591 DMA_BIT_MASK(32 + PAGE_SHIFT)); 592 } else { 593 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 594 } 595 if (rc) 596 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 597 if (rc) 598 dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); 599 600 platform_set_drvdata(pdev, vm_dev); 601 602 rc = register_virtio_device(&vm_dev->vdev); 603 if (rc) 604 put_device(&vm_dev->vdev.dev); 605 606 return rc; 607 } 608 609 static int virtio_mmio_remove(struct platform_device *pdev) 610 { 611 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); 612 unregister_virtio_device(&vm_dev->vdev); 613 614 return 0; 615 } 616 617 618 619 /* Devices list parameter */ 620 621 #if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES) 622 623 static struct device vm_cmdline_parent = { 624 .init_name = "virtio-mmio-cmdline", 625 }; 626 627 static int vm_cmdline_parent_registered; 628 static int vm_cmdline_id; 629 630 static int vm_cmdline_set(const char *device, 631 const struct kernel_param *kp) 632 { 633 int err; 634 struct resource resources[2] = {}; 635 char *str; 636 long long int base, size; 637 unsigned int irq; 638 int processed, consumed = 0; 639 struct platform_device *pdev; 640 641 /* Consume "size" part of the command line parameter */ 642 size = memparse(device, &str); 643 644 /* Get "@<base>:<irq>[:<id>]" chunks */ 645 processed = sscanf(str, "@%lli:%u%n:%d%n", 646 &base, &irq, &consumed, 647 &vm_cmdline_id, &consumed); 648 649 /* 650 * sscanf() must processes at least 2 chunks; also there 651 * must be no extra characters after the last chunk, so 652 * str[consumed] must be '\0' 653 */ 654 if (processed < 2 || str[consumed]) 655 return -EINVAL; 656 657 resources[0].flags = IORESOURCE_MEM; 658 resources[0].start = base; 659 resources[0].end = base + size - 1; 660 661 resources[1].flags = IORESOURCE_IRQ; 662 resources[1].start = resources[1].end = irq; 663 664 if (!vm_cmdline_parent_registered) { 665 err = device_register(&vm_cmdline_parent); 666 if (err) { 667 pr_err("Failed to register parent device!\n"); 668 return err; 669 } 670 vm_cmdline_parent_registered = 1; 671 } 672 673 pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n", 674 vm_cmdline_id, 675 (unsigned long long)resources[0].start, 676 (unsigned long long)resources[0].end, 677 (int)resources[1].start); 678 679 pdev = platform_device_register_resndata(&vm_cmdline_parent, 680 "virtio-mmio", vm_cmdline_id++, 681 resources, ARRAY_SIZE(resources), NULL, 0); 682 683 return PTR_ERR_OR_ZERO(pdev); 684 } 685 686 static int vm_cmdline_get_device(struct device *dev, void *data) 687 { 688 char *buffer = data; 689 unsigned int len = strlen(buffer); 690 struct platform_device *pdev = to_platform_device(dev); 691 692 snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n", 693 pdev->resource[0].end - pdev->resource[0].start + 1ULL, 694 (unsigned long long)pdev->resource[0].start, 695 (unsigned long long)pdev->resource[1].start, 696 pdev->id); 697 return 0; 698 } 699 700 static int vm_cmdline_get(char *buffer, const struct kernel_param *kp) 701 { 702 buffer[0] = '\0'; 703 device_for_each_child(&vm_cmdline_parent, buffer, 704 vm_cmdline_get_device); 705 return strlen(buffer) + 1; 706 } 707 708 static const struct kernel_param_ops vm_cmdline_param_ops = { 709 .set = vm_cmdline_set, 710 .get = vm_cmdline_get, 711 }; 712 713 device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR); 714 715 static int vm_unregister_cmdline_device(struct device *dev, 716 void *data) 717 { 718 platform_device_unregister(to_platform_device(dev)); 719 720 return 0; 721 } 722 723 static void vm_unregister_cmdline_devices(void) 724 { 725 if (vm_cmdline_parent_registered) { 726 device_for_each_child(&vm_cmdline_parent, NULL, 727 vm_unregister_cmdline_device); 728 device_unregister(&vm_cmdline_parent); 729 vm_cmdline_parent_registered = 0; 730 } 731 } 732 733 #else 734 735 static void vm_unregister_cmdline_devices(void) 736 { 737 } 738 739 #endif 740 741 /* Platform driver */ 742 743 static const struct of_device_id virtio_mmio_match[] = { 744 { .compatible = "virtio,mmio", }, 745 {}, 746 }; 747 MODULE_DEVICE_TABLE(of, virtio_mmio_match); 748 749 #ifdef CONFIG_ACPI 750 static const struct acpi_device_id virtio_mmio_acpi_match[] = { 751 { "LNRO0005", }, 752 { } 753 }; 754 MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match); 755 #endif 756 757 static struct platform_driver virtio_mmio_driver = { 758 .probe = virtio_mmio_probe, 759 .remove = virtio_mmio_remove, 760 .driver = { 761 .name = "virtio-mmio", 762 .of_match_table = virtio_mmio_match, 763 .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match), 764 }, 765 }; 766 767 static int __init virtio_mmio_init(void) 768 { 769 return platform_driver_register(&virtio_mmio_driver); 770 } 771 772 static void __exit virtio_mmio_exit(void) 773 { 774 platform_driver_unregister(&virtio_mmio_driver); 775 vm_unregister_cmdline_devices(); 776 } 777 778 module_init(virtio_mmio_init); 779 module_exit(virtio_mmio_exit); 780 781 MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>"); 782 MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices"); 783 MODULE_LICENSE("GPL"); 784