1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* 3 * Copyright 2013-2016 Freescale Semiconductor Inc. 4 * Copyright 2016-2017,2019-2020 NXP 5 */ 6 7 #include <linux/device.h> 8 #include <linux/iommu.h> 9 #include <linux/module.h> 10 #include <linux/mutex.h> 11 #include <linux/slab.h> 12 #include <linux/types.h> 13 #include <linux/vfio.h> 14 #include <linux/fsl/mc.h> 15 #include <linux/delay.h> 16 #include <linux/io-64-nonatomic-hi-lo.h> 17 18 #include "vfio_fsl_mc_private.h" 19 20 static struct fsl_mc_driver vfio_fsl_mc_driver; 21 22 static DEFINE_MUTEX(reflck_lock); 23 24 static void vfio_fsl_mc_reflck_get(struct vfio_fsl_mc_reflck *reflck) 25 { 26 kref_get(&reflck->kref); 27 } 28 29 static void vfio_fsl_mc_reflck_release(struct kref *kref) 30 { 31 struct vfio_fsl_mc_reflck *reflck = container_of(kref, 32 struct vfio_fsl_mc_reflck, 33 kref); 34 35 mutex_destroy(&reflck->lock); 36 kfree(reflck); 37 mutex_unlock(&reflck_lock); 38 } 39 40 static void vfio_fsl_mc_reflck_put(struct vfio_fsl_mc_reflck *reflck) 41 { 42 kref_put_mutex(&reflck->kref, vfio_fsl_mc_reflck_release, &reflck_lock); 43 } 44 45 static struct vfio_fsl_mc_reflck *vfio_fsl_mc_reflck_alloc(void) 46 { 47 struct vfio_fsl_mc_reflck *reflck; 48 49 reflck = kzalloc(sizeof(*reflck), GFP_KERNEL); 50 if (!reflck) 51 return ERR_PTR(-ENOMEM); 52 53 kref_init(&reflck->kref); 54 mutex_init(&reflck->lock); 55 56 return reflck; 57 } 58 59 static int vfio_fsl_mc_reflck_attach(struct vfio_fsl_mc_device *vdev) 60 { 61 int ret = 0; 62 63 mutex_lock(&reflck_lock); 64 if (is_fsl_mc_bus_dprc(vdev->mc_dev)) { 65 vdev->reflck = vfio_fsl_mc_reflck_alloc(); 66 ret = PTR_ERR_OR_ZERO(vdev->reflck); 67 } else { 68 struct device *mc_cont_dev = vdev->mc_dev->dev.parent; 69 struct vfio_device *device; 70 struct vfio_fsl_mc_device *cont_vdev; 71 72 device = vfio_device_get_from_dev(mc_cont_dev); 73 if (!device) { 74 ret = -ENODEV; 75 goto unlock; 76 } 77 78 cont_vdev = 79 container_of(device, struct vfio_fsl_mc_device, vdev); 80 if (!cont_vdev || !cont_vdev->reflck) { 81 vfio_device_put(device); 82 ret = -ENODEV; 83 goto unlock; 84 } 85 vfio_fsl_mc_reflck_get(cont_vdev->reflck); 86 vdev->reflck = cont_vdev->reflck; 87 vfio_device_put(device); 88 } 89 90 unlock: 91 mutex_unlock(&reflck_lock); 92 return ret; 93 } 94 95 static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev) 96 { 97 struct fsl_mc_device *mc_dev = vdev->mc_dev; 98 int count = mc_dev->obj_desc.region_count; 99 int i; 100 101 vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region), 102 GFP_KERNEL); 103 if (!vdev->regions) 104 return -ENOMEM; 105 106 for (i = 0; i < count; i++) { 107 struct resource *res = &mc_dev->regions[i]; 108 int no_mmap = is_fsl_mc_bus_dprc(mc_dev); 109 110 vdev->regions[i].addr = res->start; 111 vdev->regions[i].size = resource_size(res); 112 vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS; 113 /* 114 * Only regions addressed with PAGE granularity may be 115 * MMAPed securely. 116 */ 117 if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) && 118 !(vdev->regions[i].size & ~PAGE_MASK)) 119 vdev->regions[i].flags |= 120 VFIO_REGION_INFO_FLAG_MMAP; 121 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; 122 if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY)) 123 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE; 124 } 125 126 return 0; 127 } 128 129 static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev) 130 { 131 struct fsl_mc_device *mc_dev = vdev->mc_dev; 132 int i; 133 134 for (i = 0; i < mc_dev->obj_desc.region_count; i++) 135 iounmap(vdev->regions[i].ioaddr); 136 kfree(vdev->regions); 137 } 138 139 static int vfio_fsl_mc_open(struct vfio_device *core_vdev) 140 { 141 struct vfio_fsl_mc_device *vdev = 142 container_of(core_vdev, struct vfio_fsl_mc_device, vdev); 143 int ret; 144 145 if (!try_module_get(THIS_MODULE)) 146 return -ENODEV; 147 148 mutex_lock(&vdev->reflck->lock); 149 if (!vdev->refcnt) { 150 ret = vfio_fsl_mc_regions_init(vdev); 151 if (ret) 152 goto err_reg_init; 153 } 154 vdev->refcnt++; 155 156 mutex_unlock(&vdev->reflck->lock); 157 158 return 0; 159 160 err_reg_init: 161 mutex_unlock(&vdev->reflck->lock); 162 module_put(THIS_MODULE); 163 return ret; 164 } 165 166 static void vfio_fsl_mc_release(struct vfio_device *core_vdev) 167 { 168 struct vfio_fsl_mc_device *vdev = 169 container_of(core_vdev, struct vfio_fsl_mc_device, vdev); 170 int ret; 171 172 mutex_lock(&vdev->reflck->lock); 173 174 if (!(--vdev->refcnt)) { 175 struct fsl_mc_device *mc_dev = vdev->mc_dev; 176 struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev); 177 struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev); 178 179 vfio_fsl_mc_regions_cleanup(vdev); 180 181 /* reset the device before cleaning up the interrupts */ 182 ret = dprc_reset_container(mc_cont->mc_io, 0, 183 mc_cont->mc_handle, 184 mc_cont->obj_desc.id, 185 DPRC_RESET_OPTION_NON_RECURSIVE); 186 187 if (ret) { 188 dev_warn(&mc_cont->dev, "VFIO_FLS_MC: reset device has failed (%d)\n", 189 ret); 190 WARN_ON(1); 191 } 192 193 vfio_fsl_mc_irqs_cleanup(vdev); 194 195 fsl_mc_cleanup_irq_pool(mc_cont); 196 } 197 198 mutex_unlock(&vdev->reflck->lock); 199 200 module_put(THIS_MODULE); 201 } 202 203 static long vfio_fsl_mc_ioctl(struct vfio_device *core_vdev, 204 unsigned int cmd, unsigned long arg) 205 { 206 unsigned long minsz; 207 struct vfio_fsl_mc_device *vdev = 208 container_of(core_vdev, struct vfio_fsl_mc_device, vdev); 209 struct fsl_mc_device *mc_dev = vdev->mc_dev; 210 211 switch (cmd) { 212 case VFIO_DEVICE_GET_INFO: 213 { 214 struct vfio_device_info info; 215 216 minsz = offsetofend(struct vfio_device_info, num_irqs); 217 218 if (copy_from_user(&info, (void __user *)arg, minsz)) 219 return -EFAULT; 220 221 if (info.argsz < minsz) 222 return -EINVAL; 223 224 info.flags = VFIO_DEVICE_FLAGS_FSL_MC; 225 226 if (is_fsl_mc_bus_dprc(mc_dev)) 227 info.flags |= VFIO_DEVICE_FLAGS_RESET; 228 229 info.num_regions = mc_dev->obj_desc.region_count; 230 info.num_irqs = mc_dev->obj_desc.irq_count; 231 232 return copy_to_user((void __user *)arg, &info, minsz) ? 233 -EFAULT : 0; 234 } 235 case VFIO_DEVICE_GET_REGION_INFO: 236 { 237 struct vfio_region_info info; 238 239 minsz = offsetofend(struct vfio_region_info, offset); 240 241 if (copy_from_user(&info, (void __user *)arg, minsz)) 242 return -EFAULT; 243 244 if (info.argsz < minsz) 245 return -EINVAL; 246 247 if (info.index >= mc_dev->obj_desc.region_count) 248 return -EINVAL; 249 250 /* map offset to the physical address */ 251 info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index); 252 info.size = vdev->regions[info.index].size; 253 info.flags = vdev->regions[info.index].flags; 254 255 if (copy_to_user((void __user *)arg, &info, minsz)) 256 return -EFAULT; 257 return 0; 258 } 259 case VFIO_DEVICE_GET_IRQ_INFO: 260 { 261 struct vfio_irq_info info; 262 263 minsz = offsetofend(struct vfio_irq_info, count); 264 if (copy_from_user(&info, (void __user *)arg, minsz)) 265 return -EFAULT; 266 267 if (info.argsz < minsz) 268 return -EINVAL; 269 270 if (info.index >= mc_dev->obj_desc.irq_count) 271 return -EINVAL; 272 273 info.flags = VFIO_IRQ_INFO_EVENTFD; 274 info.count = 1; 275 276 if (copy_to_user((void __user *)arg, &info, minsz)) 277 return -EFAULT; 278 return 0; 279 } 280 case VFIO_DEVICE_SET_IRQS: 281 { 282 struct vfio_irq_set hdr; 283 u8 *data = NULL; 284 int ret = 0; 285 size_t data_size = 0; 286 287 minsz = offsetofend(struct vfio_irq_set, count); 288 289 if (copy_from_user(&hdr, (void __user *)arg, minsz)) 290 return -EFAULT; 291 292 ret = vfio_set_irqs_validate_and_prepare(&hdr, mc_dev->obj_desc.irq_count, 293 mc_dev->obj_desc.irq_count, &data_size); 294 if (ret) 295 return ret; 296 297 if (data_size) { 298 data = memdup_user((void __user *)(arg + minsz), 299 data_size); 300 if (IS_ERR(data)) 301 return PTR_ERR(data); 302 } 303 304 mutex_lock(&vdev->igate); 305 ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags, 306 hdr.index, hdr.start, 307 hdr.count, data); 308 mutex_unlock(&vdev->igate); 309 kfree(data); 310 311 return ret; 312 } 313 case VFIO_DEVICE_RESET: 314 { 315 int ret; 316 struct fsl_mc_device *mc_dev = vdev->mc_dev; 317 318 /* reset is supported only for the DPRC */ 319 if (!is_fsl_mc_bus_dprc(mc_dev)) 320 return -ENOTTY; 321 322 ret = dprc_reset_container(mc_dev->mc_io, 0, 323 mc_dev->mc_handle, 324 mc_dev->obj_desc.id, 325 DPRC_RESET_OPTION_NON_RECURSIVE); 326 return ret; 327 328 } 329 default: 330 return -ENOTTY; 331 } 332 } 333 334 static ssize_t vfio_fsl_mc_read(struct vfio_device *core_vdev, char __user *buf, 335 size_t count, loff_t *ppos) 336 { 337 struct vfio_fsl_mc_device *vdev = 338 container_of(core_vdev, struct vfio_fsl_mc_device, vdev); 339 unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos); 340 loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK; 341 struct fsl_mc_device *mc_dev = vdev->mc_dev; 342 struct vfio_fsl_mc_region *region; 343 u64 data[8]; 344 int i; 345 346 if (index >= mc_dev->obj_desc.region_count) 347 return -EINVAL; 348 349 region = &vdev->regions[index]; 350 351 if (!(region->flags & VFIO_REGION_INFO_FLAG_READ)) 352 return -EINVAL; 353 354 if (!region->ioaddr) { 355 region->ioaddr = ioremap(region->addr, region->size); 356 if (!region->ioaddr) 357 return -ENOMEM; 358 } 359 360 if (count != 64 || off != 0) 361 return -EINVAL; 362 363 for (i = 7; i >= 0; i--) 364 data[i] = readq(region->ioaddr + i * sizeof(uint64_t)); 365 366 if (copy_to_user(buf, data, 64)) 367 return -EFAULT; 368 369 return count; 370 } 371 372 #define MC_CMD_COMPLETION_TIMEOUT_MS 5000 373 #define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500 374 375 static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data) 376 { 377 int i; 378 enum mc_cmd_status status; 379 unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000; 380 381 /* Write at command parameter into portal */ 382 for (i = 7; i >= 1; i--) 383 writeq_relaxed(cmd_data[i], ioaddr + i * sizeof(uint64_t)); 384 385 /* Write command header in the end */ 386 writeq(cmd_data[0], ioaddr); 387 388 /* Wait for response before returning to user-space 389 * This can be optimized in future to even prepare response 390 * before returning to user-space and avoid read ioctl. 391 */ 392 for (;;) { 393 u64 header; 394 struct mc_cmd_header *resp_hdr; 395 396 header = cpu_to_le64(readq_relaxed(ioaddr)); 397 398 resp_hdr = (struct mc_cmd_header *)&header; 399 status = (enum mc_cmd_status)resp_hdr->status; 400 if (status != MC_CMD_STATUS_READY) 401 break; 402 403 udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); 404 timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; 405 if (timeout_usecs == 0) 406 return -ETIMEDOUT; 407 } 408 409 return 0; 410 } 411 412 static ssize_t vfio_fsl_mc_write(struct vfio_device *core_vdev, 413 const char __user *buf, size_t count, 414 loff_t *ppos) 415 { 416 struct vfio_fsl_mc_device *vdev = 417 container_of(core_vdev, struct vfio_fsl_mc_device, vdev); 418 unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos); 419 loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK; 420 struct fsl_mc_device *mc_dev = vdev->mc_dev; 421 struct vfio_fsl_mc_region *region; 422 u64 data[8]; 423 int ret; 424 425 if (index >= mc_dev->obj_desc.region_count) 426 return -EINVAL; 427 428 region = &vdev->regions[index]; 429 430 if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE)) 431 return -EINVAL; 432 433 if (!region->ioaddr) { 434 region->ioaddr = ioremap(region->addr, region->size); 435 if (!region->ioaddr) 436 return -ENOMEM; 437 } 438 439 if (count != 64 || off != 0) 440 return -EINVAL; 441 442 if (copy_from_user(&data, buf, 64)) 443 return -EFAULT; 444 445 ret = vfio_fsl_mc_send_command(region->ioaddr, data); 446 if (ret) 447 return ret; 448 449 return count; 450 451 } 452 453 static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region, 454 struct vm_area_struct *vma) 455 { 456 u64 size = vma->vm_end - vma->vm_start; 457 u64 pgoff, base; 458 u8 region_cacheable; 459 460 pgoff = vma->vm_pgoff & 461 ((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1); 462 base = pgoff << PAGE_SHIFT; 463 464 if (region.size < PAGE_SIZE || base + size > region.size) 465 return -EINVAL; 466 467 region_cacheable = (region.type & FSL_MC_REGION_CACHEABLE) && 468 (region.type & FSL_MC_REGION_SHAREABLE); 469 if (!region_cacheable) 470 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 471 472 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff; 473 474 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 475 size, vma->vm_page_prot); 476 } 477 478 static int vfio_fsl_mc_mmap(struct vfio_device *core_vdev, 479 struct vm_area_struct *vma) 480 { 481 struct vfio_fsl_mc_device *vdev = 482 container_of(core_vdev, struct vfio_fsl_mc_device, vdev); 483 struct fsl_mc_device *mc_dev = vdev->mc_dev; 484 unsigned int index; 485 486 index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT); 487 488 if (vma->vm_end < vma->vm_start) 489 return -EINVAL; 490 if (vma->vm_start & ~PAGE_MASK) 491 return -EINVAL; 492 if (vma->vm_end & ~PAGE_MASK) 493 return -EINVAL; 494 if (!(vma->vm_flags & VM_SHARED)) 495 return -EINVAL; 496 if (index >= mc_dev->obj_desc.region_count) 497 return -EINVAL; 498 499 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP)) 500 return -EINVAL; 501 502 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) 503 && (vma->vm_flags & VM_READ)) 504 return -EINVAL; 505 506 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) 507 && (vma->vm_flags & VM_WRITE)) 508 return -EINVAL; 509 510 vma->vm_private_data = mc_dev; 511 512 return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma); 513 } 514 515 static const struct vfio_device_ops vfio_fsl_mc_ops = { 516 .name = "vfio-fsl-mc", 517 .open = vfio_fsl_mc_open, 518 .release = vfio_fsl_mc_release, 519 .ioctl = vfio_fsl_mc_ioctl, 520 .read = vfio_fsl_mc_read, 521 .write = vfio_fsl_mc_write, 522 .mmap = vfio_fsl_mc_mmap, 523 }; 524 525 static int vfio_fsl_mc_bus_notifier(struct notifier_block *nb, 526 unsigned long action, void *data) 527 { 528 struct vfio_fsl_mc_device *vdev = container_of(nb, 529 struct vfio_fsl_mc_device, nb); 530 struct device *dev = data; 531 struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); 532 struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent); 533 534 if (action == BUS_NOTIFY_ADD_DEVICE && 535 vdev->mc_dev == mc_cont) { 536 mc_dev->driver_override = kasprintf(GFP_KERNEL, "%s", 537 vfio_fsl_mc_ops.name); 538 if (!mc_dev->driver_override) 539 dev_warn(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s failed\n", 540 dev_name(&mc_cont->dev)); 541 else 542 dev_info(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s\n", 543 dev_name(&mc_cont->dev)); 544 } else if (action == BUS_NOTIFY_BOUND_DRIVER && 545 vdev->mc_dev == mc_cont) { 546 struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); 547 548 if (mc_drv && mc_drv != &vfio_fsl_mc_driver) 549 dev_warn(dev, "VFIO_FSL_MC: Object %s bound to driver %s while DPRC bound to vfio-fsl-mc\n", 550 dev_name(dev), mc_drv->driver.name); 551 } 552 553 return 0; 554 } 555 556 static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev) 557 { 558 struct fsl_mc_device *mc_dev = vdev->mc_dev; 559 int ret; 560 561 /* Non-dprc devices share mc_io from parent */ 562 if (!is_fsl_mc_bus_dprc(mc_dev)) { 563 struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent); 564 565 mc_dev->mc_io = mc_cont->mc_io; 566 return 0; 567 } 568 569 vdev->nb.notifier_call = vfio_fsl_mc_bus_notifier; 570 ret = bus_register_notifier(&fsl_mc_bus_type, &vdev->nb); 571 if (ret) 572 return ret; 573 574 /* open DPRC, allocate a MC portal */ 575 ret = dprc_setup(mc_dev); 576 if (ret) { 577 dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret); 578 goto out_nc_unreg; 579 } 580 return 0; 581 582 out_nc_unreg: 583 bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb); 584 return ret; 585 } 586 587 static int vfio_fsl_mc_scan_container(struct fsl_mc_device *mc_dev) 588 { 589 int ret; 590 591 /* non dprc devices do not scan for other devices */ 592 if (!is_fsl_mc_bus_dprc(mc_dev)) 593 return 0; 594 ret = dprc_scan_container(mc_dev, false); 595 if (ret) { 596 dev_err(&mc_dev->dev, 597 "VFIO_FSL_MC: Container scanning failed (%d)\n", ret); 598 dprc_remove_devices(mc_dev, NULL, 0); 599 return ret; 600 } 601 return 0; 602 } 603 604 static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev) 605 { 606 struct fsl_mc_device *mc_dev = vdev->mc_dev; 607 608 if (!is_fsl_mc_bus_dprc(mc_dev)) 609 return; 610 611 dprc_cleanup(mc_dev); 612 bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb); 613 } 614 615 static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev) 616 { 617 struct iommu_group *group; 618 struct vfio_fsl_mc_device *vdev; 619 struct device *dev = &mc_dev->dev; 620 int ret; 621 622 group = vfio_iommu_group_get(dev); 623 if (!group) { 624 dev_err(dev, "VFIO_FSL_MC: No IOMMU group\n"); 625 return -EINVAL; 626 } 627 628 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); 629 if (!vdev) { 630 ret = -ENOMEM; 631 goto out_group_put; 632 } 633 634 vfio_init_group_dev(&vdev->vdev, dev, &vfio_fsl_mc_ops); 635 vdev->mc_dev = mc_dev; 636 mutex_init(&vdev->igate); 637 638 ret = vfio_fsl_mc_reflck_attach(vdev); 639 if (ret) 640 goto out_kfree; 641 642 ret = vfio_fsl_mc_init_device(vdev); 643 if (ret) 644 goto out_reflck; 645 646 ret = vfio_register_group_dev(&vdev->vdev); 647 if (ret) { 648 dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n"); 649 goto out_device; 650 } 651 652 /* 653 * This triggers recursion into vfio_fsl_mc_probe() on another device 654 * and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the 655 * vfio_add_group_dev() above. It has no impact on this vdev, so it is 656 * safe to be after the vfio device is made live. 657 */ 658 ret = vfio_fsl_mc_scan_container(mc_dev); 659 if (ret) 660 goto out_group_dev; 661 dev_set_drvdata(dev, vdev); 662 return 0; 663 664 out_group_dev: 665 vfio_unregister_group_dev(&vdev->vdev); 666 out_device: 667 vfio_fsl_uninit_device(vdev); 668 out_reflck: 669 vfio_fsl_mc_reflck_put(vdev->reflck); 670 out_kfree: 671 kfree(vdev); 672 out_group_put: 673 vfio_iommu_group_put(group, dev); 674 return ret; 675 } 676 677 static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev) 678 { 679 struct device *dev = &mc_dev->dev; 680 struct vfio_fsl_mc_device *vdev = dev_get_drvdata(dev); 681 682 vfio_unregister_group_dev(&vdev->vdev); 683 mutex_destroy(&vdev->igate); 684 685 dprc_remove_devices(mc_dev, NULL, 0); 686 vfio_fsl_uninit_device(vdev); 687 vfio_fsl_mc_reflck_put(vdev->reflck); 688 689 kfree(vdev); 690 vfio_iommu_group_put(mc_dev->dev.iommu_group, dev); 691 692 return 0; 693 } 694 695 static struct fsl_mc_driver vfio_fsl_mc_driver = { 696 .probe = vfio_fsl_mc_probe, 697 .remove = vfio_fsl_mc_remove, 698 .driver = { 699 .name = "vfio-fsl-mc", 700 .owner = THIS_MODULE, 701 }, 702 }; 703 704 static int __init vfio_fsl_mc_driver_init(void) 705 { 706 return fsl_mc_driver_register(&vfio_fsl_mc_driver); 707 } 708 709 static void __exit vfio_fsl_mc_driver_exit(void) 710 { 711 fsl_mc_driver_unregister(&vfio_fsl_mc_driver); 712 } 713 714 module_init(vfio_fsl_mc_driver_init); 715 module_exit(vfio_fsl_mc_driver_exit); 716 717 MODULE_LICENSE("Dual BSD/GPL"); 718 MODULE_DESCRIPTION("VFIO for FSL-MC devices - User Level meta-driver"); 719