1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VFIO core 4 * 5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved. 6 * Author: Alex Williamson <alex.williamson@redhat.com> 7 * 8 * Derived from original vfio: 9 * Copyright 2010 Cisco Systems, Inc. All rights reserved. 10 * Author: Tom Lyon, pugs@cisco.com 11 */ 12 13 #include <linux/vfio.h> 14 #include <linux/iommufd.h> 15 #include <linux/anon_inodes.h> 16 #include "vfio.h" 17 18 static struct vfio { 19 struct class *class; 20 struct list_head group_list; 21 struct mutex group_lock; /* locks group_list */ 22 struct ida group_ida; 23 dev_t group_devt; 24 } vfio; 25 26 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, 27 char *buf) 28 { 29 struct vfio_device *it, *device = ERR_PTR(-ENODEV); 30 31 mutex_lock(&group->device_lock); 32 list_for_each_entry(it, &group->device_list, group_next) { 33 int ret; 34 35 if (it->ops->match) { 36 ret = it->ops->match(it, buf); 37 if (ret < 0) { 38 device = ERR_PTR(ret); 39 break; 40 } 41 } else { 42 ret = !strcmp(dev_name(it->dev), buf); 43 } 44 45 if (ret && vfio_device_try_get_registration(it)) { 46 device = it; 47 break; 48 } 49 } 50 mutex_unlock(&group->device_lock); 51 52 return device; 53 } 54 55 /* 56 * VFIO Group fd, /dev/vfio/$GROUP 57 */ 58 static bool vfio_group_has_iommu(struct vfio_group *group) 59 { 60 lockdep_assert_held(&group->group_lock); 61 /* 62 * There can only be users if there is a container, and if there is a 63 * container there must be users. 64 */ 65 WARN_ON(!group->container != !group->container_users); 66 67 return group->container || group->iommufd; 68 } 69 70 /* 71 * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or 72 * if there was no container to unset. Since the ioctl is called on 73 * the group, we know that still exists, therefore the only valid 74 * transition here is 1->0. 75 */ 76 static int vfio_group_ioctl_unset_container(struct vfio_group *group) 77 { 78 int ret = 0; 79 80 mutex_lock(&group->group_lock); 81 if (!vfio_group_has_iommu(group)) { 82 ret = -EINVAL; 83 goto out_unlock; 84 } 85 if (group->container) { 86 if (group->container_users != 1) { 87 ret = -EBUSY; 88 goto out_unlock; 89 } 90 vfio_group_detach_container(group); 91 } 92 if (group->iommufd) { 93 iommufd_ctx_put(group->iommufd); 94 group->iommufd = NULL; 95 } 96 97 out_unlock: 98 mutex_unlock(&group->group_lock); 99 return ret; 100 } 101 102 static int vfio_group_ioctl_set_container(struct vfio_group *group, 103 int __user *arg) 104 { 105 struct vfio_container *container; 106 struct iommufd_ctx *iommufd; 107 struct fd f; 108 int ret; 109 int fd; 110 111 if (get_user(fd, arg)) 112 return -EFAULT; 113 114 f = fdget(fd); 115 if (!f.file) 116 return -EBADF; 117 118 mutex_lock(&group->group_lock); 119 if (vfio_group_has_iommu(group)) { 120 ret = -EINVAL; 121 goto out_unlock; 122 } 123 if (!group->iommu_group) { 124 ret = -ENODEV; 125 goto out_unlock; 126 } 127 128 container = vfio_container_from_file(f.file); 129 if (container) { 130 ret = vfio_container_attach_group(container, group); 131 goto out_unlock; 132 } 133 134 iommufd = iommufd_ctx_from_file(f.file); 135 if (!IS_ERR(iommufd)) { 136 if (IS_ENABLED(CONFIG_VFIO_NOIOMMU) && 137 group->type == VFIO_NO_IOMMU) 138 ret = iommufd_vfio_compat_set_no_iommu(iommufd); 139 else 140 ret = iommufd_vfio_compat_ioas_create(iommufd); 141 142 if (ret) { 143 iommufd_ctx_put(group->iommufd); 144 goto out_unlock; 145 } 146 147 group->iommufd = iommufd; 148 goto out_unlock; 149 } 150 151 /* The FD passed is not recognized. */ 152 ret = -EBADFD; 153 154 out_unlock: 155 mutex_unlock(&group->group_lock); 156 fdput(f); 157 return ret; 158 } 159 160 static int vfio_device_group_open(struct vfio_device *device) 161 { 162 int ret; 163 164 mutex_lock(&device->group->group_lock); 165 if (!vfio_group_has_iommu(device->group)) { 166 ret = -EINVAL; 167 goto out_unlock; 168 } 169 170 /* 171 * Here we pass the KVM pointer with the group under the lock. If the 172 * device driver will use it, it must obtain a reference and release it 173 * during close_device. 174 */ 175 ret = vfio_device_open(device, device->group->iommufd, 176 device->group->kvm); 177 178 out_unlock: 179 mutex_unlock(&device->group->group_lock); 180 return ret; 181 } 182 183 void vfio_device_group_close(struct vfio_device *device) 184 { 185 mutex_lock(&device->group->group_lock); 186 vfio_device_close(device, device->group->iommufd); 187 mutex_unlock(&device->group->group_lock); 188 } 189 190 static struct file *vfio_device_open_file(struct vfio_device *device) 191 { 192 struct file *filep; 193 int ret; 194 195 ret = vfio_device_group_open(device); 196 if (ret) 197 goto err_out; 198 199 /* 200 * We can't use anon_inode_getfd() because we need to modify 201 * the f_mode flags directly to allow more than just ioctls 202 */ 203 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, 204 device, O_RDWR); 205 if (IS_ERR(filep)) { 206 ret = PTR_ERR(filep); 207 goto err_close_device; 208 } 209 210 /* 211 * TODO: add an anon_inode interface to do this. 212 * Appears to be missing by lack of need rather than 213 * explicitly prevented. Now there's need. 214 */ 215 filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE); 216 217 if (device->group->type == VFIO_NO_IOMMU) 218 dev_warn(device->dev, "vfio-noiommu device opened by user " 219 "(%s:%d)\n", current->comm, task_pid_nr(current)); 220 /* 221 * On success the ref of device is moved to the file and 222 * put in vfio_device_fops_release() 223 */ 224 return filep; 225 226 err_close_device: 227 vfio_device_group_close(device); 228 err_out: 229 return ERR_PTR(ret); 230 } 231 232 static int vfio_group_ioctl_get_device_fd(struct vfio_group *group, 233 char __user *arg) 234 { 235 struct vfio_device *device; 236 struct file *filep; 237 char *buf; 238 int fdno; 239 int ret; 240 241 buf = strndup_user(arg, PAGE_SIZE); 242 if (IS_ERR(buf)) 243 return PTR_ERR(buf); 244 245 device = vfio_device_get_from_name(group, buf); 246 kfree(buf); 247 if (IS_ERR(device)) 248 return PTR_ERR(device); 249 250 fdno = get_unused_fd_flags(O_CLOEXEC); 251 if (fdno < 0) { 252 ret = fdno; 253 goto err_put_device; 254 } 255 256 filep = vfio_device_open_file(device); 257 if (IS_ERR(filep)) { 258 ret = PTR_ERR(filep); 259 goto err_put_fdno; 260 } 261 262 fd_install(fdno, filep); 263 return fdno; 264 265 err_put_fdno: 266 put_unused_fd(fdno); 267 err_put_device: 268 vfio_device_put_registration(device); 269 return ret; 270 } 271 272 static int vfio_group_ioctl_get_status(struct vfio_group *group, 273 struct vfio_group_status __user *arg) 274 { 275 unsigned long minsz = offsetofend(struct vfio_group_status, flags); 276 struct vfio_group_status status; 277 278 if (copy_from_user(&status, arg, minsz)) 279 return -EFAULT; 280 281 if (status.argsz < minsz) 282 return -EINVAL; 283 284 status.flags = 0; 285 286 mutex_lock(&group->group_lock); 287 if (!group->iommu_group) { 288 mutex_unlock(&group->group_lock); 289 return -ENODEV; 290 } 291 292 /* 293 * With the container FD the iommu_group_claim_dma_owner() is done 294 * during SET_CONTAINER but for IOMMFD this is done during 295 * VFIO_GROUP_GET_DEVICE_FD. Meaning that with iommufd 296 * VFIO_GROUP_FLAGS_VIABLE could be set but GET_DEVICE_FD will fail due 297 * to viability. 298 */ 299 if (vfio_group_has_iommu(group)) 300 status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET | 301 VFIO_GROUP_FLAGS_VIABLE; 302 else if (!iommu_group_dma_owner_claimed(group->iommu_group)) 303 status.flags |= VFIO_GROUP_FLAGS_VIABLE; 304 mutex_unlock(&group->group_lock); 305 306 if (copy_to_user(arg, &status, minsz)) 307 return -EFAULT; 308 return 0; 309 } 310 311 static long vfio_group_fops_unl_ioctl(struct file *filep, 312 unsigned int cmd, unsigned long arg) 313 { 314 struct vfio_group *group = filep->private_data; 315 void __user *uarg = (void __user *)arg; 316 317 switch (cmd) { 318 case VFIO_GROUP_GET_DEVICE_FD: 319 return vfio_group_ioctl_get_device_fd(group, uarg); 320 case VFIO_GROUP_GET_STATUS: 321 return vfio_group_ioctl_get_status(group, uarg); 322 case VFIO_GROUP_SET_CONTAINER: 323 return vfio_group_ioctl_set_container(group, uarg); 324 case VFIO_GROUP_UNSET_CONTAINER: 325 return vfio_group_ioctl_unset_container(group); 326 default: 327 return -ENOTTY; 328 } 329 } 330 331 static int vfio_group_fops_open(struct inode *inode, struct file *filep) 332 { 333 struct vfio_group *group = 334 container_of(inode->i_cdev, struct vfio_group, cdev); 335 int ret; 336 337 mutex_lock(&group->group_lock); 338 339 /* 340 * drivers can be zero if this races with vfio_device_remove_group(), it 341 * will be stable at 0 under the group rwsem 342 */ 343 if (refcount_read(&group->drivers) == 0) { 344 ret = -ENODEV; 345 goto out_unlock; 346 } 347 348 if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) { 349 ret = -EPERM; 350 goto out_unlock; 351 } 352 353 /* 354 * Do we need multiple instances of the group open? Seems not. 355 */ 356 if (group->opened_file) { 357 ret = -EBUSY; 358 goto out_unlock; 359 } 360 group->opened_file = filep; 361 filep->private_data = group; 362 ret = 0; 363 out_unlock: 364 mutex_unlock(&group->group_lock); 365 return ret; 366 } 367 368 static int vfio_group_fops_release(struct inode *inode, struct file *filep) 369 { 370 struct vfio_group *group = filep->private_data; 371 372 filep->private_data = NULL; 373 374 mutex_lock(&group->group_lock); 375 /* 376 * Device FDs hold a group file reference, therefore the group release 377 * is only called when there are no open devices. 378 */ 379 WARN_ON(group->notifier.head); 380 if (group->container) 381 vfio_group_detach_container(group); 382 if (group->iommufd) { 383 iommufd_ctx_put(group->iommufd); 384 group->iommufd = NULL; 385 } 386 group->opened_file = NULL; 387 mutex_unlock(&group->group_lock); 388 return 0; 389 } 390 391 static const struct file_operations vfio_group_fops = { 392 .owner = THIS_MODULE, 393 .unlocked_ioctl = vfio_group_fops_unl_ioctl, 394 .compat_ioctl = compat_ptr_ioctl, 395 .open = vfio_group_fops_open, 396 .release = vfio_group_fops_release, 397 }; 398 399 /* 400 * Group objects - create, release, get, put, search 401 */ 402 static struct vfio_group * 403 vfio_group_find_from_iommu(struct iommu_group *iommu_group) 404 { 405 struct vfio_group *group; 406 407 lockdep_assert_held(&vfio.group_lock); 408 409 /* 410 * group->iommu_group from the vfio.group_list cannot be NULL 411 * under the vfio.group_lock. 412 */ 413 list_for_each_entry(group, &vfio.group_list, vfio_next) { 414 if (group->iommu_group == iommu_group) 415 return group; 416 } 417 return NULL; 418 } 419 420 static void vfio_group_release(struct device *dev) 421 { 422 struct vfio_group *group = container_of(dev, struct vfio_group, dev); 423 424 mutex_destroy(&group->device_lock); 425 mutex_destroy(&group->group_lock); 426 WARN_ON(group->iommu_group); 427 ida_free(&vfio.group_ida, MINOR(group->dev.devt)); 428 kfree(group); 429 } 430 431 static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group, 432 enum vfio_group_type type) 433 { 434 struct vfio_group *group; 435 int minor; 436 437 group = kzalloc(sizeof(*group), GFP_KERNEL); 438 if (!group) 439 return ERR_PTR(-ENOMEM); 440 441 minor = ida_alloc_max(&vfio.group_ida, MINORMASK, GFP_KERNEL); 442 if (minor < 0) { 443 kfree(group); 444 return ERR_PTR(minor); 445 } 446 447 device_initialize(&group->dev); 448 group->dev.devt = MKDEV(MAJOR(vfio.group_devt), minor); 449 group->dev.class = vfio.class; 450 group->dev.release = vfio_group_release; 451 cdev_init(&group->cdev, &vfio_group_fops); 452 group->cdev.owner = THIS_MODULE; 453 454 refcount_set(&group->drivers, 1); 455 mutex_init(&group->group_lock); 456 INIT_LIST_HEAD(&group->device_list); 457 mutex_init(&group->device_lock); 458 group->iommu_group = iommu_group; 459 /* put in vfio_group_release() */ 460 iommu_group_ref_get(iommu_group); 461 group->type = type; 462 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); 463 464 return group; 465 } 466 467 static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, 468 enum vfio_group_type type) 469 { 470 struct vfio_group *group; 471 struct vfio_group *ret; 472 int err; 473 474 lockdep_assert_held(&vfio.group_lock); 475 476 group = vfio_group_alloc(iommu_group, type); 477 if (IS_ERR(group)) 478 return group; 479 480 err = dev_set_name(&group->dev, "%s%d", 481 group->type == VFIO_NO_IOMMU ? "noiommu-" : "", 482 iommu_group_id(iommu_group)); 483 if (err) { 484 ret = ERR_PTR(err); 485 goto err_put; 486 } 487 488 err = cdev_device_add(&group->cdev, &group->dev); 489 if (err) { 490 ret = ERR_PTR(err); 491 goto err_put; 492 } 493 494 list_add(&group->vfio_next, &vfio.group_list); 495 496 return group; 497 498 err_put: 499 put_device(&group->dev); 500 return ret; 501 } 502 503 static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev, 504 enum vfio_group_type type) 505 { 506 struct iommu_group *iommu_group; 507 struct vfio_group *group; 508 int ret; 509 510 iommu_group = iommu_group_alloc(); 511 if (IS_ERR(iommu_group)) 512 return ERR_CAST(iommu_group); 513 514 ret = iommu_group_set_name(iommu_group, "vfio-noiommu"); 515 if (ret) 516 goto out_put_group; 517 ret = iommu_group_add_device(iommu_group, dev); 518 if (ret) 519 goto out_put_group; 520 521 mutex_lock(&vfio.group_lock); 522 group = vfio_create_group(iommu_group, type); 523 mutex_unlock(&vfio.group_lock); 524 if (IS_ERR(group)) { 525 ret = PTR_ERR(group); 526 goto out_remove_device; 527 } 528 iommu_group_put(iommu_group); 529 return group; 530 531 out_remove_device: 532 iommu_group_remove_device(dev); 533 out_put_group: 534 iommu_group_put(iommu_group); 535 return ERR_PTR(ret); 536 } 537 538 static bool vfio_group_has_device(struct vfio_group *group, struct device *dev) 539 { 540 struct vfio_device *device; 541 542 mutex_lock(&group->device_lock); 543 list_for_each_entry(device, &group->device_list, group_next) { 544 if (device->dev == dev) { 545 mutex_unlock(&group->device_lock); 546 return true; 547 } 548 } 549 mutex_unlock(&group->device_lock); 550 return false; 551 } 552 553 static struct vfio_group *vfio_group_find_or_alloc(struct device *dev) 554 { 555 struct iommu_group *iommu_group; 556 struct vfio_group *group; 557 558 iommu_group = iommu_group_get(dev); 559 if (!iommu_group && vfio_noiommu) { 560 /* 561 * With noiommu enabled, create an IOMMU group for devices that 562 * don't already have one, implying no IOMMU hardware/driver 563 * exists. Taint the kernel because we're about to give a DMA 564 * capable device to a user without IOMMU protection. 565 */ 566 group = vfio_noiommu_group_alloc(dev, VFIO_NO_IOMMU); 567 if (!IS_ERR(group)) { 568 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 569 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n"); 570 } 571 return group; 572 } 573 574 if (!iommu_group) 575 return ERR_PTR(-EINVAL); 576 577 /* 578 * VFIO always sets IOMMU_CACHE because we offer no way for userspace to 579 * restore cache coherency. It has to be checked here because it is only 580 * valid for cases where we are using iommu groups. 581 */ 582 if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) { 583 iommu_group_put(iommu_group); 584 return ERR_PTR(-EINVAL); 585 } 586 587 mutex_lock(&vfio.group_lock); 588 group = vfio_group_find_from_iommu(iommu_group); 589 if (group) { 590 if (WARN_ON(vfio_group_has_device(group, dev))) 591 group = ERR_PTR(-EINVAL); 592 else 593 refcount_inc(&group->drivers); 594 } else { 595 group = vfio_create_group(iommu_group, VFIO_IOMMU); 596 } 597 mutex_unlock(&vfio.group_lock); 598 599 /* The vfio_group holds a reference to the iommu_group */ 600 iommu_group_put(iommu_group); 601 return group; 602 } 603 604 int vfio_device_set_group(struct vfio_device *device, 605 enum vfio_group_type type) 606 { 607 struct vfio_group *group; 608 609 if (type == VFIO_IOMMU) 610 group = vfio_group_find_or_alloc(device->dev); 611 else 612 group = vfio_noiommu_group_alloc(device->dev, type); 613 614 if (IS_ERR(group)) 615 return PTR_ERR(group); 616 617 /* Our reference on group is moved to the device */ 618 device->group = group; 619 return 0; 620 } 621 622 void vfio_device_remove_group(struct vfio_device *device) 623 { 624 struct vfio_group *group = device->group; 625 struct iommu_group *iommu_group; 626 627 if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU) 628 iommu_group_remove_device(device->dev); 629 630 /* Pairs with vfio_create_group() / vfio_group_get_from_iommu() */ 631 if (!refcount_dec_and_mutex_lock(&group->drivers, &vfio.group_lock)) 632 return; 633 list_del(&group->vfio_next); 634 635 /* 636 * We could concurrently probe another driver in the group that might 637 * race vfio_device_remove_group() with vfio_get_group(), so we have to 638 * ensure that the sysfs is all cleaned up under lock otherwise the 639 * cdev_device_add() will fail due to the name aready existing. 640 */ 641 cdev_device_del(&group->cdev, &group->dev); 642 643 mutex_lock(&group->group_lock); 644 /* 645 * These data structures all have paired operations that can only be 646 * undone when the caller holds a live reference on the device. Since 647 * all pairs must be undone these WARN_ON's indicate some caller did not 648 * properly hold the group reference. 649 */ 650 WARN_ON(!list_empty(&group->device_list)); 651 WARN_ON(group->notifier.head); 652 653 /* 654 * Revoke all users of group->iommu_group. At this point we know there 655 * are no devices active because we are unplugging the last one. Setting 656 * iommu_group to NULL blocks all new users. 657 */ 658 if (group->container) 659 vfio_group_detach_container(group); 660 iommu_group = group->iommu_group; 661 group->iommu_group = NULL; 662 mutex_unlock(&group->group_lock); 663 mutex_unlock(&vfio.group_lock); 664 665 iommu_group_put(iommu_group); 666 put_device(&group->dev); 667 } 668 669 void vfio_device_group_register(struct vfio_device *device) 670 { 671 mutex_lock(&device->group->device_lock); 672 list_add(&device->group_next, &device->group->device_list); 673 mutex_unlock(&device->group->device_lock); 674 } 675 676 void vfio_device_group_unregister(struct vfio_device *device) 677 { 678 mutex_lock(&device->group->device_lock); 679 list_del(&device->group_next); 680 mutex_unlock(&device->group->device_lock); 681 } 682 683 int vfio_device_group_use_iommu(struct vfio_device *device) 684 { 685 struct vfio_group *group = device->group; 686 int ret = 0; 687 688 lockdep_assert_held(&group->group_lock); 689 690 if (WARN_ON(!group->container)) 691 return -EINVAL; 692 693 ret = vfio_group_use_container(group); 694 if (ret) 695 return ret; 696 vfio_device_container_register(device); 697 return 0; 698 } 699 700 void vfio_device_group_unuse_iommu(struct vfio_device *device) 701 { 702 struct vfio_group *group = device->group; 703 704 lockdep_assert_held(&group->group_lock); 705 706 if (WARN_ON(!group->container)) 707 return; 708 709 vfio_device_container_unregister(device); 710 vfio_group_unuse_container(group); 711 } 712 713 bool vfio_device_has_container(struct vfio_device *device) 714 { 715 return device->group->container; 716 } 717 718 /** 719 * vfio_file_iommu_group - Return the struct iommu_group for the vfio group file 720 * @file: VFIO group file 721 * 722 * The returned iommu_group is valid as long as a ref is held on the file. This 723 * returns a reference on the group. This function is deprecated, only the SPAPR 724 * path in kvm should call it. 725 */ 726 struct iommu_group *vfio_file_iommu_group(struct file *file) 727 { 728 struct vfio_group *group = file->private_data; 729 struct iommu_group *iommu_group = NULL; 730 731 if (!IS_ENABLED(CONFIG_SPAPR_TCE_IOMMU)) 732 return NULL; 733 734 if (!vfio_file_is_group(file)) 735 return NULL; 736 737 mutex_lock(&group->group_lock); 738 if (group->iommu_group) { 739 iommu_group = group->iommu_group; 740 iommu_group_ref_get(iommu_group); 741 } 742 mutex_unlock(&group->group_lock); 743 return iommu_group; 744 } 745 EXPORT_SYMBOL_GPL(vfio_file_iommu_group); 746 747 /** 748 * vfio_file_is_group - True if the file is usable with VFIO aPIS 749 * @file: VFIO group file 750 */ 751 bool vfio_file_is_group(struct file *file) 752 { 753 return file->f_op == &vfio_group_fops; 754 } 755 EXPORT_SYMBOL_GPL(vfio_file_is_group); 756 757 /** 758 * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file 759 * is always CPU cache coherent 760 * @file: VFIO group file 761 * 762 * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop 763 * bit in DMA transactions. A return of false indicates that the user has 764 * rights to access additional instructions such as wbinvd on x86. 765 */ 766 bool vfio_file_enforced_coherent(struct file *file) 767 { 768 struct vfio_group *group = file->private_data; 769 struct vfio_device *device; 770 bool ret = true; 771 772 if (!vfio_file_is_group(file)) 773 return true; 774 775 /* 776 * If the device does not have IOMMU_CAP_ENFORCE_CACHE_COHERENCY then 777 * any domain later attached to it will also not support it. If the cap 778 * is set then the iommu_domain eventually attached to the device/group 779 * must use a domain with enforce_cache_coherency(). 780 */ 781 mutex_lock(&group->device_lock); 782 list_for_each_entry(device, &group->device_list, group_next) { 783 if (!device_iommu_capable(device->dev, 784 IOMMU_CAP_ENFORCE_CACHE_COHERENCY)) { 785 ret = false; 786 break; 787 } 788 } 789 mutex_unlock(&group->device_lock); 790 return ret; 791 } 792 EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent); 793 794 /** 795 * vfio_file_set_kvm - Link a kvm with VFIO drivers 796 * @file: VFIO group file 797 * @kvm: KVM to link 798 * 799 * When a VFIO device is first opened the KVM will be available in 800 * device->kvm if one was associated with the group. 801 */ 802 void vfio_file_set_kvm(struct file *file, struct kvm *kvm) 803 { 804 struct vfio_group *group = file->private_data; 805 806 if (!vfio_file_is_group(file)) 807 return; 808 809 mutex_lock(&group->group_lock); 810 group->kvm = kvm; 811 mutex_unlock(&group->group_lock); 812 } 813 EXPORT_SYMBOL_GPL(vfio_file_set_kvm); 814 815 /** 816 * vfio_file_has_dev - True if the VFIO file is a handle for device 817 * @file: VFIO file to check 818 * @device: Device that must be part of the file 819 * 820 * Returns true if given file has permission to manipulate the given device. 821 */ 822 bool vfio_file_has_dev(struct file *file, struct vfio_device *device) 823 { 824 struct vfio_group *group = file->private_data; 825 826 if (!vfio_file_is_group(file)) 827 return false; 828 829 return group == device->group; 830 } 831 EXPORT_SYMBOL_GPL(vfio_file_has_dev); 832 833 static char *vfio_devnode(const struct device *dev, umode_t *mode) 834 { 835 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); 836 } 837 838 int __init vfio_group_init(void) 839 { 840 int ret; 841 842 ida_init(&vfio.group_ida); 843 mutex_init(&vfio.group_lock); 844 INIT_LIST_HEAD(&vfio.group_list); 845 846 ret = vfio_container_init(); 847 if (ret) 848 return ret; 849 850 /* /dev/vfio/$GROUP */ 851 vfio.class = class_create(THIS_MODULE, "vfio"); 852 if (IS_ERR(vfio.class)) { 853 ret = PTR_ERR(vfio.class); 854 goto err_group_class; 855 } 856 857 vfio.class->devnode = vfio_devnode; 858 859 ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio"); 860 if (ret) 861 goto err_alloc_chrdev; 862 return 0; 863 864 err_alloc_chrdev: 865 class_destroy(vfio.class); 866 vfio.class = NULL; 867 err_group_class: 868 vfio_container_cleanup(); 869 return ret; 870 } 871 872 void vfio_group_cleanup(void) 873 { 874 WARN_ON(!list_empty(&vfio.group_list)); 875 ida_destroy(&vfio.group_ida); 876 unregister_chrdev_region(vfio.group_devt, MINORMASK + 1); 877 class_destroy(vfio.class); 878 vfio.class = NULL; 879 vfio_container_cleanup(); 880 } 881