1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES 3 */ 4 #include <linux/iommufd.h> 5 #include <linux/slab.h> 6 #include <linux/iommu.h> 7 8 #include "io_pagetable.h" 9 #include "iommufd_private.h" 10 11 static bool allow_unsafe_interrupts; 12 module_param(allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR); 13 MODULE_PARM_DESC( 14 allow_unsafe_interrupts, 15 "Allow IOMMUFD to bind to devices even if the platform cannot isolate " 16 "the MSI interrupt window. Enabling this is a security weakness."); 17 18 void iommufd_device_destroy(struct iommufd_object *obj) 19 { 20 struct iommufd_device *idev = 21 container_of(obj, struct iommufd_device, obj); 22 23 iommu_device_release_dma_owner(idev->dev); 24 iommu_group_put(idev->group); 25 if (!iommufd_selftest_is_mock_dev(idev->dev)) 26 iommufd_ctx_put(idev->ictx); 27 } 28 29 /** 30 * iommufd_device_bind - Bind a physical device to an iommu fd 31 * @ictx: iommufd file descriptor 32 * @dev: Pointer to a physical device struct 33 * @id: Output ID number to return to userspace for this device 34 * 35 * A successful bind establishes an ownership over the device and returns 36 * struct iommufd_device pointer, otherwise returns error pointer. 37 * 38 * A driver using this API must set driver_managed_dma and must not touch 39 * the device until this routine succeeds and establishes ownership. 40 * 41 * Binding a PCI device places the entire RID under iommufd control. 42 * 43 * The caller must undo this with iommufd_device_unbind() 44 */ 45 struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx, 46 struct device *dev, u32 *id) 47 { 48 struct iommufd_device *idev; 49 struct iommu_group *group; 50 int rc; 51 52 /* 53 * iommufd always sets IOMMU_CACHE because we offer no way for userspace 54 * to restore cache coherency. 55 */ 56 if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) 57 return ERR_PTR(-EINVAL); 58 59 group = iommu_group_get(dev); 60 if (!group) 61 return ERR_PTR(-ENODEV); 62 63 rc = iommu_device_claim_dma_owner(dev, ictx); 64 if (rc) 65 goto out_group_put; 66 67 idev = iommufd_object_alloc(ictx, idev, IOMMUFD_OBJ_DEVICE); 68 if (IS_ERR(idev)) { 69 rc = PTR_ERR(idev); 70 goto out_release_owner; 71 } 72 idev->ictx = ictx; 73 if (!iommufd_selftest_is_mock_dev(dev)) 74 iommufd_ctx_get(ictx); 75 idev->dev = dev; 76 idev->enforce_cache_coherency = 77 device_iommu_capable(dev, IOMMU_CAP_ENFORCE_CACHE_COHERENCY); 78 /* The calling driver is a user until iommufd_device_unbind() */ 79 refcount_inc(&idev->obj.users); 80 /* group refcount moves into iommufd_device */ 81 idev->group = group; 82 83 /* 84 * If the caller fails after this success it must call 85 * iommufd_unbind_device() which is safe since we hold this refcount. 86 * This also means the device is a leaf in the graph and no other object 87 * can take a reference on it. 88 */ 89 iommufd_object_finalize(ictx, &idev->obj); 90 *id = idev->obj.id; 91 return idev; 92 93 out_release_owner: 94 iommu_device_release_dma_owner(dev); 95 out_group_put: 96 iommu_group_put(group); 97 return ERR_PTR(rc); 98 } 99 EXPORT_SYMBOL_NS_GPL(iommufd_device_bind, IOMMUFD); 100 101 /** 102 * iommufd_device_unbind - Undo iommufd_device_bind() 103 * @idev: Device returned by iommufd_device_bind() 104 * 105 * Release the device from iommufd control. The DMA ownership will return back 106 * to unowned with DMA controlled by the DMA API. This invalidates the 107 * iommufd_device pointer, other APIs that consume it must not be called 108 * concurrently. 109 */ 110 void iommufd_device_unbind(struct iommufd_device *idev) 111 { 112 bool was_destroyed; 113 114 was_destroyed = iommufd_object_destroy_user(idev->ictx, &idev->obj); 115 WARN_ON(!was_destroyed); 116 } 117 EXPORT_SYMBOL_NS_GPL(iommufd_device_unbind, IOMMUFD); 118 119 static int iommufd_device_setup_msi(struct iommufd_device *idev, 120 struct iommufd_hw_pagetable *hwpt, 121 phys_addr_t sw_msi_start) 122 { 123 int rc; 124 125 /* 126 * If the IOMMU driver gives a IOMMU_RESV_SW_MSI then it is asking us to 127 * call iommu_get_msi_cookie() on its behalf. This is necessary to setup 128 * the MSI window so iommu_dma_prepare_msi() can install pages into our 129 * domain after request_irq(). If it is not done interrupts will not 130 * work on this domain. 131 * 132 * FIXME: This is conceptually broken for iommufd since we want to allow 133 * userspace to change the domains, eg switch from an identity IOAS to a 134 * DMA IOAS. There is currently no way to create a MSI window that 135 * matches what the IRQ layer actually expects in a newly created 136 * domain. 137 */ 138 if (sw_msi_start != PHYS_ADDR_MAX && !hwpt->msi_cookie) { 139 rc = iommu_get_msi_cookie(hwpt->domain, sw_msi_start); 140 if (rc) 141 return rc; 142 143 /* 144 * iommu_get_msi_cookie() can only be called once per domain, 145 * it returns -EBUSY on later calls. 146 */ 147 hwpt->msi_cookie = true; 148 } 149 150 /* 151 * For historical compat with VFIO the insecure interrupt path is 152 * allowed if the module parameter is set. Insecure means that a MemWr 153 * operation from the device (eg a simple DMA) cannot trigger an 154 * interrupt outside this iommufd context. 155 */ 156 if (!iommufd_selftest_is_mock_dev(idev->dev) && 157 !iommu_group_has_isolated_msi(idev->group)) { 158 if (!allow_unsafe_interrupts) 159 return -EPERM; 160 161 dev_warn( 162 idev->dev, 163 "MSI interrupts are not secure, they cannot be isolated by the platform. " 164 "Check that platform features like interrupt remapping are enabled. " 165 "Use the \"allow_unsafe_interrupts\" module parameter to override\n"); 166 } 167 return 0; 168 } 169 170 static bool iommufd_hw_pagetable_has_group(struct iommufd_hw_pagetable *hwpt, 171 struct iommu_group *group) 172 { 173 struct iommufd_device *cur_dev; 174 175 lockdep_assert_held(&hwpt->devices_lock); 176 177 list_for_each_entry(cur_dev, &hwpt->devices, devices_item) 178 if (cur_dev->group == group) 179 return true; 180 return false; 181 } 182 183 int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, 184 struct iommufd_device *idev) 185 { 186 phys_addr_t sw_msi_start = PHYS_ADDR_MAX; 187 int rc; 188 189 lockdep_assert_held(&hwpt->devices_lock); 190 191 if (WARN_ON(idev->hwpt)) 192 return -EINVAL; 193 194 /* 195 * Try to upgrade the domain we have, it is an iommu driver bug to 196 * report IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail 197 * enforce_cache_coherency when there are no devices attached to the 198 * domain. 199 */ 200 if (idev->enforce_cache_coherency && !hwpt->enforce_cache_coherency) { 201 if (hwpt->domain->ops->enforce_cache_coherency) 202 hwpt->enforce_cache_coherency = 203 hwpt->domain->ops->enforce_cache_coherency( 204 hwpt->domain); 205 if (!hwpt->enforce_cache_coherency) { 206 WARN_ON(list_empty(&hwpt->devices)); 207 return -EINVAL; 208 } 209 } 210 211 rc = iopt_table_enforce_group_resv_regions(&hwpt->ioas->iopt, idev->dev, 212 idev->group, &sw_msi_start); 213 if (rc) 214 return rc; 215 216 rc = iommufd_device_setup_msi(idev, hwpt, sw_msi_start); 217 if (rc) 218 goto err_unresv; 219 220 /* 221 * FIXME: Hack around missing a device-centric iommu api, only attach to 222 * the group once for the first device that is in the group. 223 */ 224 if (!iommufd_hw_pagetable_has_group(hwpt, idev->group)) { 225 rc = iommu_attach_group(hwpt->domain, idev->group); 226 if (rc) 227 goto err_unresv; 228 } 229 return 0; 230 err_unresv: 231 iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev); 232 return rc; 233 } 234 235 void iommufd_hw_pagetable_detach(struct iommufd_hw_pagetable *hwpt, 236 struct iommufd_device *idev) 237 { 238 if (!iommufd_hw_pagetable_has_group(hwpt, idev->group)) 239 iommu_detach_group(hwpt->domain, idev->group); 240 iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev); 241 } 242 243 static int iommufd_device_do_attach(struct iommufd_device *idev, 244 struct iommufd_hw_pagetable *hwpt) 245 { 246 int rc; 247 248 mutex_lock(&hwpt->devices_lock); 249 rc = iommufd_hw_pagetable_attach(hwpt, idev); 250 if (rc) 251 goto out_unlock; 252 253 idev->hwpt = hwpt; 254 refcount_inc(&hwpt->obj.users); 255 list_add(&idev->devices_item, &hwpt->devices); 256 out_unlock: 257 mutex_unlock(&hwpt->devices_lock); 258 return rc; 259 } 260 261 /* 262 * When automatically managing the domains we search for a compatible domain in 263 * the iopt and if one is found use it, otherwise create a new domain. 264 * Automatic domain selection will never pick a manually created domain. 265 */ 266 static int iommufd_device_auto_get_domain(struct iommufd_device *idev, 267 struct iommufd_ioas *ioas) 268 { 269 struct iommufd_hw_pagetable *hwpt; 270 int rc; 271 272 /* 273 * There is no differentiation when domains are allocated, so any domain 274 * that is willing to attach to the device is interchangeable with any 275 * other. 276 */ 277 mutex_lock(&ioas->mutex); 278 list_for_each_entry(hwpt, &ioas->hwpt_list, hwpt_item) { 279 if (!hwpt->auto_domain) 280 continue; 281 282 if (!iommufd_lock_obj(&hwpt->obj)) 283 continue; 284 rc = iommufd_device_do_attach(idev, hwpt); 285 iommufd_put_object(&hwpt->obj); 286 287 /* 288 * -EINVAL means the domain is incompatible with the device. 289 * Other error codes should propagate to userspace as failure. 290 * Success means the domain is attached. 291 */ 292 if (rc == -EINVAL) 293 continue; 294 goto out_unlock; 295 } 296 297 hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev, true); 298 if (IS_ERR(hwpt)) { 299 rc = PTR_ERR(hwpt); 300 goto out_unlock; 301 } 302 hwpt->auto_domain = true; 303 304 mutex_unlock(&ioas->mutex); 305 iommufd_object_finalize(idev->ictx, &hwpt->obj); 306 return 0; 307 out_unlock: 308 mutex_unlock(&ioas->mutex); 309 return rc; 310 } 311 312 /** 313 * iommufd_device_attach - Connect a device from an iommu_domain 314 * @idev: device to attach 315 * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE 316 * Output the IOMMUFD_OBJ_HW_PAGETABLE ID 317 * 318 * This connects the device to an iommu_domain, either automatically or manually 319 * selected. Once this completes the device could do DMA. 320 * 321 * The caller should return the resulting pt_id back to userspace. 322 * This function is undone by calling iommufd_device_detach(). 323 */ 324 int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id) 325 { 326 struct iommufd_object *pt_obj; 327 int rc; 328 329 pt_obj = iommufd_get_object(idev->ictx, *pt_id, IOMMUFD_OBJ_ANY); 330 if (IS_ERR(pt_obj)) 331 return PTR_ERR(pt_obj); 332 333 switch (pt_obj->type) { 334 case IOMMUFD_OBJ_HW_PAGETABLE: { 335 struct iommufd_hw_pagetable *hwpt = 336 container_of(pt_obj, struct iommufd_hw_pagetable, obj); 337 338 rc = iommufd_device_do_attach(idev, hwpt); 339 if (rc) 340 goto out_put_pt_obj; 341 break; 342 } 343 case IOMMUFD_OBJ_IOAS: { 344 struct iommufd_ioas *ioas = 345 container_of(pt_obj, struct iommufd_ioas, obj); 346 347 rc = iommufd_device_auto_get_domain(idev, ioas); 348 if (rc) 349 goto out_put_pt_obj; 350 break; 351 } 352 default: 353 rc = -EINVAL; 354 goto out_put_pt_obj; 355 } 356 357 refcount_inc(&idev->obj.users); 358 *pt_id = idev->hwpt->obj.id; 359 rc = 0; 360 361 out_put_pt_obj: 362 iommufd_put_object(pt_obj); 363 return rc; 364 } 365 EXPORT_SYMBOL_NS_GPL(iommufd_device_attach, IOMMUFD); 366 367 /** 368 * iommufd_device_detach - Disconnect a device to an iommu_domain 369 * @idev: device to detach 370 * 371 * Undo iommufd_device_attach(). This disconnects the idev from the previously 372 * attached pt_id. The device returns back to a blocked DMA translation. 373 */ 374 void iommufd_device_detach(struct iommufd_device *idev) 375 { 376 struct iommufd_hw_pagetable *hwpt = idev->hwpt; 377 378 mutex_lock(&hwpt->devices_lock); 379 list_del(&idev->devices_item); 380 idev->hwpt = NULL; 381 iommufd_hw_pagetable_detach(hwpt, idev); 382 mutex_unlock(&hwpt->devices_lock); 383 384 if (hwpt->auto_domain) 385 iommufd_object_destroy_user(idev->ictx, &hwpt->obj); 386 else 387 refcount_dec(&hwpt->obj.users); 388 389 refcount_dec(&idev->obj.users); 390 } 391 EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, IOMMUFD); 392 393 void iommufd_access_destroy_object(struct iommufd_object *obj) 394 { 395 struct iommufd_access *access = 396 container_of(obj, struct iommufd_access, obj); 397 398 if (access->ioas) { 399 iopt_remove_access(&access->ioas->iopt, access); 400 refcount_dec(&access->ioas->obj.users); 401 access->ioas = NULL; 402 } 403 iommufd_ctx_put(access->ictx); 404 } 405 406 /** 407 * iommufd_access_create - Create an iommufd_access 408 * @ictx: iommufd file descriptor 409 * @ops: Driver's ops to associate with the access 410 * @data: Opaque data to pass into ops functions 411 * @id: Output ID number to return to userspace for this access 412 * 413 * An iommufd_access allows a driver to read/write to the IOAS without using 414 * DMA. The underlying CPU memory can be accessed using the 415 * iommufd_access_pin_pages() or iommufd_access_rw() functions. 416 * 417 * The provided ops are required to use iommufd_access_pin_pages(). 418 */ 419 struct iommufd_access * 420 iommufd_access_create(struct iommufd_ctx *ictx, 421 const struct iommufd_access_ops *ops, void *data, u32 *id) 422 { 423 struct iommufd_access *access; 424 425 /* 426 * There is no uAPI for the access object, but to keep things symmetric 427 * use the object infrastructure anyhow. 428 */ 429 access = iommufd_object_alloc(ictx, access, IOMMUFD_OBJ_ACCESS); 430 if (IS_ERR(access)) 431 return access; 432 433 access->data = data; 434 access->ops = ops; 435 436 if (ops->needs_pin_pages) 437 access->iova_alignment = PAGE_SIZE; 438 else 439 access->iova_alignment = 1; 440 441 /* The calling driver is a user until iommufd_access_destroy() */ 442 refcount_inc(&access->obj.users); 443 access->ictx = ictx; 444 iommufd_ctx_get(ictx); 445 iommufd_object_finalize(ictx, &access->obj); 446 *id = access->obj.id; 447 return access; 448 } 449 EXPORT_SYMBOL_NS_GPL(iommufd_access_create, IOMMUFD); 450 451 /** 452 * iommufd_access_destroy - Destroy an iommufd_access 453 * @access: The access to destroy 454 * 455 * The caller must stop using the access before destroying it. 456 */ 457 void iommufd_access_destroy(struct iommufd_access *access) 458 { 459 bool was_destroyed; 460 461 was_destroyed = iommufd_object_destroy_user(access->ictx, &access->obj); 462 WARN_ON(!was_destroyed); 463 } 464 EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD); 465 466 int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id) 467 { 468 struct iommufd_ioas *new_ioas; 469 int rc = 0; 470 471 if (access->ioas) 472 return -EINVAL; 473 474 new_ioas = iommufd_get_ioas(access->ictx, ioas_id); 475 if (IS_ERR(new_ioas)) 476 return PTR_ERR(new_ioas); 477 478 rc = iopt_add_access(&new_ioas->iopt, access); 479 if (rc) { 480 iommufd_put_object(&new_ioas->obj); 481 return rc; 482 } 483 iommufd_ref_to_users(&new_ioas->obj); 484 485 access->ioas = new_ioas; 486 return 0; 487 } 488 EXPORT_SYMBOL_NS_GPL(iommufd_access_attach, IOMMUFD); 489 490 /** 491 * iommufd_access_notify_unmap - Notify users of an iopt to stop using it 492 * @iopt: iopt to work on 493 * @iova: Starting iova in the iopt 494 * @length: Number of bytes 495 * 496 * After this function returns there should be no users attached to the pages 497 * linked to this iopt that intersect with iova,length. Anyone that has attached 498 * a user through iopt_access_pages() needs to detach it through 499 * iommufd_access_unpin_pages() before this function returns. 500 * 501 * iommufd_access_destroy() will wait for any outstanding unmap callback to 502 * complete. Once iommufd_access_destroy() no unmap ops are running or will 503 * run in the future. Due to this a driver must not create locking that prevents 504 * unmap to complete while iommufd_access_destroy() is running. 505 */ 506 void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova, 507 unsigned long length) 508 { 509 struct iommufd_ioas *ioas = 510 container_of(iopt, struct iommufd_ioas, iopt); 511 struct iommufd_access *access; 512 unsigned long index; 513 514 xa_lock(&ioas->iopt.access_list); 515 xa_for_each(&ioas->iopt.access_list, index, access) { 516 if (!iommufd_lock_obj(&access->obj)) 517 continue; 518 xa_unlock(&ioas->iopt.access_list); 519 520 access->ops->unmap(access->data, iova, length); 521 522 iommufd_put_object(&access->obj); 523 xa_lock(&ioas->iopt.access_list); 524 } 525 xa_unlock(&ioas->iopt.access_list); 526 } 527 528 /** 529 * iommufd_access_unpin_pages() - Undo iommufd_access_pin_pages 530 * @access: IOAS access to act on 531 * @iova: Starting IOVA 532 * @length: Number of bytes to access 533 * 534 * Return the struct page's. The caller must stop accessing them before calling 535 * this. The iova/length must exactly match the one provided to access_pages. 536 */ 537 void iommufd_access_unpin_pages(struct iommufd_access *access, 538 unsigned long iova, unsigned long length) 539 { 540 struct io_pagetable *iopt = &access->ioas->iopt; 541 struct iopt_area_contig_iter iter; 542 unsigned long last_iova; 543 struct iopt_area *area; 544 545 if (WARN_ON(!length) || 546 WARN_ON(check_add_overflow(iova, length - 1, &last_iova))) 547 return; 548 549 down_read(&iopt->iova_rwsem); 550 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) 551 iopt_area_remove_access( 552 area, iopt_area_iova_to_index(area, iter.cur_iova), 553 iopt_area_iova_to_index( 554 area, 555 min(last_iova, iopt_area_last_iova(area)))); 556 WARN_ON(!iopt_area_contig_done(&iter)); 557 up_read(&iopt->iova_rwsem); 558 } 559 EXPORT_SYMBOL_NS_GPL(iommufd_access_unpin_pages, IOMMUFD); 560 561 static bool iopt_area_contig_is_aligned(struct iopt_area_contig_iter *iter) 562 { 563 if (iopt_area_start_byte(iter->area, iter->cur_iova) % PAGE_SIZE) 564 return false; 565 566 if (!iopt_area_contig_done(iter) && 567 (iopt_area_start_byte(iter->area, iopt_area_last_iova(iter->area)) % 568 PAGE_SIZE) != (PAGE_SIZE - 1)) 569 return false; 570 return true; 571 } 572 573 static bool check_area_prot(struct iopt_area *area, unsigned int flags) 574 { 575 if (flags & IOMMUFD_ACCESS_RW_WRITE) 576 return area->iommu_prot & IOMMU_WRITE; 577 return area->iommu_prot & IOMMU_READ; 578 } 579 580 /** 581 * iommufd_access_pin_pages() - Return a list of pages under the iova 582 * @access: IOAS access to act on 583 * @iova: Starting IOVA 584 * @length: Number of bytes to access 585 * @out_pages: Output page list 586 * @flags: IOPMMUFD_ACCESS_RW_* flags 587 * 588 * Reads @length bytes starting at iova and returns the struct page * pointers. 589 * These can be kmap'd by the caller for CPU access. 590 * 591 * The caller must perform iommufd_access_unpin_pages() when done to balance 592 * this. 593 * 594 * This API always requires a page aligned iova. This happens naturally if the 595 * ioas alignment is >= PAGE_SIZE and the iova is PAGE_SIZE aligned. However 596 * smaller alignments have corner cases where this API can fail on otherwise 597 * aligned iova. 598 */ 599 int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova, 600 unsigned long length, struct page **out_pages, 601 unsigned int flags) 602 { 603 struct io_pagetable *iopt = &access->ioas->iopt; 604 struct iopt_area_contig_iter iter; 605 unsigned long last_iova; 606 struct iopt_area *area; 607 int rc; 608 609 /* Driver's ops don't support pin_pages */ 610 if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && 611 WARN_ON(access->iova_alignment != PAGE_SIZE || !access->ops->unmap)) 612 return -EINVAL; 613 614 if (!length) 615 return -EINVAL; 616 if (check_add_overflow(iova, length - 1, &last_iova)) 617 return -EOVERFLOW; 618 619 down_read(&iopt->iova_rwsem); 620 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) { 621 unsigned long last = min(last_iova, iopt_area_last_iova(area)); 622 unsigned long last_index = iopt_area_iova_to_index(area, last); 623 unsigned long index = 624 iopt_area_iova_to_index(area, iter.cur_iova); 625 626 if (area->prevent_access || 627 !iopt_area_contig_is_aligned(&iter)) { 628 rc = -EINVAL; 629 goto err_remove; 630 } 631 632 if (!check_area_prot(area, flags)) { 633 rc = -EPERM; 634 goto err_remove; 635 } 636 637 rc = iopt_area_add_access(area, index, last_index, out_pages, 638 flags); 639 if (rc) 640 goto err_remove; 641 out_pages += last_index - index + 1; 642 } 643 if (!iopt_area_contig_done(&iter)) { 644 rc = -ENOENT; 645 goto err_remove; 646 } 647 648 up_read(&iopt->iova_rwsem); 649 return 0; 650 651 err_remove: 652 if (iova < iter.cur_iova) { 653 last_iova = iter.cur_iova - 1; 654 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) 655 iopt_area_remove_access( 656 area, 657 iopt_area_iova_to_index(area, iter.cur_iova), 658 iopt_area_iova_to_index( 659 area, min(last_iova, 660 iopt_area_last_iova(area)))); 661 } 662 up_read(&iopt->iova_rwsem); 663 return rc; 664 } 665 EXPORT_SYMBOL_NS_GPL(iommufd_access_pin_pages, IOMMUFD); 666 667 /** 668 * iommufd_access_rw - Read or write data under the iova 669 * @access: IOAS access to act on 670 * @iova: Starting IOVA 671 * @data: Kernel buffer to copy to/from 672 * @length: Number of bytes to access 673 * @flags: IOMMUFD_ACCESS_RW_* flags 674 * 675 * Copy kernel to/from data into the range given by IOVA/length. If flags 676 * indicates IOMMUFD_ACCESS_RW_KTHREAD then a large copy can be optimized 677 * by changing it into copy_to/from_user(). 678 */ 679 int iommufd_access_rw(struct iommufd_access *access, unsigned long iova, 680 void *data, size_t length, unsigned int flags) 681 { 682 struct io_pagetable *iopt = &access->ioas->iopt; 683 struct iopt_area_contig_iter iter; 684 struct iopt_area *area; 685 unsigned long last_iova; 686 int rc; 687 688 if (!length) 689 return -EINVAL; 690 if (check_add_overflow(iova, length - 1, &last_iova)) 691 return -EOVERFLOW; 692 693 down_read(&iopt->iova_rwsem); 694 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) { 695 unsigned long last = min(last_iova, iopt_area_last_iova(area)); 696 unsigned long bytes = (last - iter.cur_iova) + 1; 697 698 if (area->prevent_access) { 699 rc = -EINVAL; 700 goto err_out; 701 } 702 703 if (!check_area_prot(area, flags)) { 704 rc = -EPERM; 705 goto err_out; 706 } 707 708 rc = iopt_pages_rw_access( 709 area->pages, iopt_area_start_byte(area, iter.cur_iova), 710 data, bytes, flags); 711 if (rc) 712 goto err_out; 713 data += bytes; 714 } 715 if (!iopt_area_contig_done(&iter)) 716 rc = -ENOENT; 717 err_out: 718 up_read(&iopt->iova_rwsem); 719 return rc; 720 } 721 EXPORT_SYMBOL_NS_GPL(iommufd_access_rw, IOMMUFD); 722