1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/amba/bus.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/bits.h> 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/host1x_context_bus.h> 20 #include <linux/iommu.h> 21 #include <linux/idr.h> 22 #include <linux/err.h> 23 #include <linux/pci.h> 24 #include <linux/pci-ats.h> 25 #include <linux/bitops.h> 26 #include <linux/platform_device.h> 27 #include <linux/property.h> 28 #include <linux/fsl/mc.h> 29 #include <linux/module.h> 30 #include <linux/cc_platform.h> 31 #include <linux/cdx/cdx_bus.h> 32 #include <trace/events/iommu.h> 33 #include <linux/sched/mm.h> 34 #include <linux/msi.h> 35 36 #include "dma-iommu.h" 37 38 #include "iommu-sva.h" 39 40 static struct kset *iommu_group_kset; 41 static DEFINE_IDA(iommu_group_ida); 42 43 static unsigned int iommu_def_domain_type __read_mostly; 44 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); 45 static u32 iommu_cmd_line __read_mostly; 46 47 struct iommu_group { 48 struct kobject kobj; 49 struct kobject *devices_kobj; 50 struct list_head devices; 51 struct xarray pasid_array; 52 struct mutex mutex; 53 void *iommu_data; 54 void (*iommu_data_release)(void *iommu_data); 55 char *name; 56 int id; 57 struct iommu_domain *default_domain; 58 struct iommu_domain *blocking_domain; 59 struct iommu_domain *domain; 60 struct list_head entry; 61 unsigned int owner_cnt; 62 void *owner; 63 }; 64 65 struct group_device { 66 struct list_head list; 67 struct device *dev; 68 char *name; 69 }; 70 71 /* Iterate over each struct group_device in a struct iommu_group */ 72 #define for_each_group_device(group, pos) \ 73 list_for_each_entry(pos, &(group)->devices, list) 74 75 struct iommu_group_attribute { 76 struct attribute attr; 77 ssize_t (*show)(struct iommu_group *group, char *buf); 78 ssize_t (*store)(struct iommu_group *group, 79 const char *buf, size_t count); 80 }; 81 82 static const char * const iommu_group_resv_type_string[] = { 83 [IOMMU_RESV_DIRECT] = "direct", 84 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 85 [IOMMU_RESV_RESERVED] = "reserved", 86 [IOMMU_RESV_MSI] = "msi", 87 [IOMMU_RESV_SW_MSI] = "msi", 88 }; 89 90 #define IOMMU_CMD_LINE_DMA_API BIT(0) 91 #define IOMMU_CMD_LINE_STRICT BIT(1) 92 93 static int iommu_bus_notifier(struct notifier_block *nb, 94 unsigned long action, void *data); 95 static void iommu_release_device(struct device *dev); 96 static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus, 97 unsigned type); 98 static int __iommu_attach_device(struct iommu_domain *domain, 99 struct device *dev); 100 static int __iommu_attach_group(struct iommu_domain *domain, 101 struct iommu_group *group); 102 103 enum { 104 IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0, 105 }; 106 107 static int __iommu_device_set_domain(struct iommu_group *group, 108 struct device *dev, 109 struct iommu_domain *new_domain, 110 unsigned int flags); 111 static int __iommu_group_set_domain_internal(struct iommu_group *group, 112 struct iommu_domain *new_domain, 113 unsigned int flags); 114 static int __iommu_group_set_domain(struct iommu_group *group, 115 struct iommu_domain *new_domain) 116 { 117 return __iommu_group_set_domain_internal(group, new_domain, 0); 118 } 119 static void __iommu_group_set_domain_nofail(struct iommu_group *group, 120 struct iommu_domain *new_domain) 121 { 122 WARN_ON(__iommu_group_set_domain_internal( 123 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); 124 } 125 126 static int iommu_setup_default_domain(struct iommu_group *group, 127 int target_type); 128 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 129 struct device *dev); 130 static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 131 static ssize_t iommu_group_store_type(struct iommu_group *group, 132 const char *buf, size_t count); 133 134 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 135 struct iommu_group_attribute iommu_group_attr_##_name = \ 136 __ATTR(_name, _mode, _show, _store) 137 138 #define to_iommu_group_attr(_attr) \ 139 container_of(_attr, struct iommu_group_attribute, attr) 140 #define to_iommu_group(_kobj) \ 141 container_of(_kobj, struct iommu_group, kobj) 142 143 static LIST_HEAD(iommu_device_list); 144 static DEFINE_SPINLOCK(iommu_device_lock); 145 146 static struct bus_type * const iommu_buses[] = { 147 &platform_bus_type, 148 #ifdef CONFIG_PCI 149 &pci_bus_type, 150 #endif 151 #ifdef CONFIG_ARM_AMBA 152 &amba_bustype, 153 #endif 154 #ifdef CONFIG_FSL_MC_BUS 155 &fsl_mc_bus_type, 156 #endif 157 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS 158 &host1x_context_device_bus_type, 159 #endif 160 #ifdef CONFIG_CDX_BUS 161 &cdx_bus_type, 162 #endif 163 }; 164 165 /* 166 * Use a function instead of an array here because the domain-type is a 167 * bit-field, so an array would waste memory. 168 */ 169 static const char *iommu_domain_type_str(unsigned int t) 170 { 171 switch (t) { 172 case IOMMU_DOMAIN_BLOCKED: 173 return "Blocked"; 174 case IOMMU_DOMAIN_IDENTITY: 175 return "Passthrough"; 176 case IOMMU_DOMAIN_UNMANAGED: 177 return "Unmanaged"; 178 case IOMMU_DOMAIN_DMA: 179 case IOMMU_DOMAIN_DMA_FQ: 180 return "Translated"; 181 default: 182 return "Unknown"; 183 } 184 } 185 186 static int __init iommu_subsys_init(void) 187 { 188 struct notifier_block *nb; 189 190 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 191 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 192 iommu_set_default_passthrough(false); 193 else 194 iommu_set_default_translated(false); 195 196 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 197 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 198 iommu_set_default_translated(false); 199 } 200 } 201 202 if (!iommu_default_passthrough() && !iommu_dma_strict) 203 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; 204 205 pr_info("Default domain type: %s%s\n", 206 iommu_domain_type_str(iommu_def_domain_type), 207 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 208 " (set via kernel command line)" : ""); 209 210 if (!iommu_default_passthrough()) 211 pr_info("DMA domain TLB invalidation policy: %s mode%s\n", 212 iommu_dma_strict ? "strict" : "lazy", 213 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? 214 " (set via kernel command line)" : ""); 215 216 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); 217 if (!nb) 218 return -ENOMEM; 219 220 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { 221 nb[i].notifier_call = iommu_bus_notifier; 222 bus_register_notifier(iommu_buses[i], &nb[i]); 223 } 224 225 return 0; 226 } 227 subsys_initcall(iommu_subsys_init); 228 229 static int remove_iommu_group(struct device *dev, void *data) 230 { 231 if (dev->iommu && dev->iommu->iommu_dev == data) 232 iommu_release_device(dev); 233 234 return 0; 235 } 236 237 /** 238 * iommu_device_register() - Register an IOMMU hardware instance 239 * @iommu: IOMMU handle for the instance 240 * @ops: IOMMU ops to associate with the instance 241 * @hwdev: (optional) actual instance device, used for fwnode lookup 242 * 243 * Return: 0 on success, or an error. 244 */ 245 int iommu_device_register(struct iommu_device *iommu, 246 const struct iommu_ops *ops, struct device *hwdev) 247 { 248 int err = 0; 249 250 /* We need to be able to take module references appropriately */ 251 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 252 return -EINVAL; 253 /* 254 * Temporarily enforce global restriction to a single driver. This was 255 * already the de-facto behaviour, since any possible combination of 256 * existing drivers would compete for at least the PCI or platform bus. 257 */ 258 if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops) 259 return -EBUSY; 260 261 iommu->ops = ops; 262 if (hwdev) 263 iommu->fwnode = dev_fwnode(hwdev); 264 265 spin_lock(&iommu_device_lock); 266 list_add_tail(&iommu->list, &iommu_device_list); 267 spin_unlock(&iommu_device_lock); 268 269 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) { 270 iommu_buses[i]->iommu_ops = ops; 271 err = bus_iommu_probe(iommu_buses[i]); 272 } 273 if (err) 274 iommu_device_unregister(iommu); 275 return err; 276 } 277 EXPORT_SYMBOL_GPL(iommu_device_register); 278 279 void iommu_device_unregister(struct iommu_device *iommu) 280 { 281 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) 282 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); 283 284 spin_lock(&iommu_device_lock); 285 list_del(&iommu->list); 286 spin_unlock(&iommu_device_lock); 287 } 288 EXPORT_SYMBOL_GPL(iommu_device_unregister); 289 290 static struct dev_iommu *dev_iommu_get(struct device *dev) 291 { 292 struct dev_iommu *param = dev->iommu; 293 294 if (param) 295 return param; 296 297 param = kzalloc(sizeof(*param), GFP_KERNEL); 298 if (!param) 299 return NULL; 300 301 mutex_init(¶m->lock); 302 dev->iommu = param; 303 return param; 304 } 305 306 static void dev_iommu_free(struct device *dev) 307 { 308 struct dev_iommu *param = dev->iommu; 309 310 dev->iommu = NULL; 311 if (param->fwspec) { 312 fwnode_handle_put(param->fwspec->iommu_fwnode); 313 kfree(param->fwspec); 314 } 315 kfree(param); 316 } 317 318 static u32 dev_iommu_get_max_pasids(struct device *dev) 319 { 320 u32 max_pasids = 0, bits = 0; 321 int ret; 322 323 if (dev_is_pci(dev)) { 324 ret = pci_max_pasids(to_pci_dev(dev)); 325 if (ret > 0) 326 max_pasids = ret; 327 } else { 328 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); 329 if (!ret) 330 max_pasids = 1UL << bits; 331 } 332 333 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); 334 } 335 336 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 337 { 338 const struct iommu_ops *ops = dev->bus->iommu_ops; 339 struct iommu_device *iommu_dev; 340 struct iommu_group *group; 341 static DEFINE_MUTEX(iommu_probe_device_lock); 342 int ret; 343 344 if (!ops) 345 return -ENODEV; 346 /* 347 * Serialise to avoid races between IOMMU drivers registering in 348 * parallel and/or the "replay" calls from ACPI/OF code via client 349 * driver probe. Once the latter have been cleaned up we should 350 * probably be able to use device_lock() here to minimise the scope, 351 * but for now enforcing a simple global ordering is fine. 352 */ 353 mutex_lock(&iommu_probe_device_lock); 354 if (!dev_iommu_get(dev)) { 355 ret = -ENOMEM; 356 goto err_unlock; 357 } 358 359 if (!try_module_get(ops->owner)) { 360 ret = -EINVAL; 361 goto err_free; 362 } 363 364 iommu_dev = ops->probe_device(dev); 365 if (IS_ERR(iommu_dev)) { 366 ret = PTR_ERR(iommu_dev); 367 goto out_module_put; 368 } 369 370 dev->iommu->iommu_dev = iommu_dev; 371 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 372 if (ops->is_attach_deferred) 373 dev->iommu->attach_deferred = ops->is_attach_deferred(dev); 374 375 group = iommu_group_get_for_dev(dev); 376 if (IS_ERR(group)) { 377 ret = PTR_ERR(group); 378 goto out_release; 379 } 380 381 mutex_lock(&group->mutex); 382 if (group_list && !group->default_domain && list_empty(&group->entry)) 383 list_add_tail(&group->entry, group_list); 384 mutex_unlock(&group->mutex); 385 iommu_group_put(group); 386 387 mutex_unlock(&iommu_probe_device_lock); 388 iommu_device_link(iommu_dev, dev); 389 390 return 0; 391 392 out_release: 393 if (ops->release_device) 394 ops->release_device(dev); 395 396 out_module_put: 397 module_put(ops->owner); 398 399 err_free: 400 dev_iommu_free(dev); 401 402 err_unlock: 403 mutex_unlock(&iommu_probe_device_lock); 404 405 return ret; 406 } 407 408 int iommu_probe_device(struct device *dev) 409 { 410 const struct iommu_ops *ops; 411 struct iommu_group *group; 412 int ret; 413 414 ret = __iommu_probe_device(dev, NULL); 415 if (ret) 416 goto err_out; 417 418 group = iommu_group_get(dev); 419 if (!group) { 420 ret = -ENODEV; 421 goto err_release; 422 } 423 424 mutex_lock(&group->mutex); 425 426 if (group->default_domain) 427 iommu_create_device_direct_mappings(group->default_domain, dev); 428 429 if (group->domain) { 430 ret = __iommu_device_set_domain(group, dev, group->domain, 0); 431 if (ret) 432 goto err_unlock; 433 } else if (!group->default_domain) { 434 ret = iommu_setup_default_domain(group, 0); 435 if (ret) 436 goto err_unlock; 437 } 438 439 mutex_unlock(&group->mutex); 440 iommu_group_put(group); 441 442 ops = dev_iommu_ops(dev); 443 if (ops->probe_finalize) 444 ops->probe_finalize(dev); 445 446 return 0; 447 448 err_unlock: 449 mutex_unlock(&group->mutex); 450 iommu_group_put(group); 451 err_release: 452 iommu_release_device(dev); 453 454 err_out: 455 return ret; 456 457 } 458 459 /* 460 * Remove a device from a group's device list and return the group device 461 * if successful. 462 */ 463 static struct group_device * 464 __iommu_group_remove_device(struct iommu_group *group, struct device *dev) 465 { 466 struct group_device *device; 467 468 lockdep_assert_held(&group->mutex); 469 for_each_group_device(group, device) { 470 if (device->dev == dev) { 471 list_del(&device->list); 472 return device; 473 } 474 } 475 476 return NULL; 477 } 478 479 /* 480 * Release a device from its group and decrements the iommu group reference 481 * count. 482 */ 483 static void __iommu_group_release_device(struct iommu_group *group, 484 struct group_device *grp_dev) 485 { 486 struct device *dev = grp_dev->dev; 487 488 sysfs_remove_link(group->devices_kobj, grp_dev->name); 489 sysfs_remove_link(&dev->kobj, "iommu_group"); 490 491 trace_remove_device_from_group(group->id, dev); 492 493 kfree(grp_dev->name); 494 kfree(grp_dev); 495 dev->iommu_group = NULL; 496 kobject_put(group->devices_kobj); 497 } 498 499 static void iommu_release_device(struct device *dev) 500 { 501 struct iommu_group *group = dev->iommu_group; 502 struct group_device *device; 503 const struct iommu_ops *ops; 504 505 if (!dev->iommu || !group) 506 return; 507 508 iommu_device_unlink(dev->iommu->iommu_dev, dev); 509 510 mutex_lock(&group->mutex); 511 device = __iommu_group_remove_device(group, dev); 512 513 /* 514 * If the group has become empty then ownership must have been released, 515 * and the current domain must be set back to NULL or the default 516 * domain. 517 */ 518 if (list_empty(&group->devices)) 519 WARN_ON(group->owner_cnt || 520 group->domain != group->default_domain); 521 522 /* 523 * release_device() must stop using any attached domain on the device. 524 * If there are still other devices in the group they are not effected 525 * by this callback. 526 * 527 * The IOMMU driver must set the device to either an identity or 528 * blocking translation and stop using any domain pointer, as it is 529 * going to be freed. 530 */ 531 ops = dev_iommu_ops(dev); 532 if (ops->release_device) 533 ops->release_device(dev); 534 mutex_unlock(&group->mutex); 535 536 if (device) 537 __iommu_group_release_device(group, device); 538 539 module_put(ops->owner); 540 dev_iommu_free(dev); 541 } 542 543 static int __init iommu_set_def_domain_type(char *str) 544 { 545 bool pt; 546 int ret; 547 548 ret = kstrtobool(str, &pt); 549 if (ret) 550 return ret; 551 552 if (pt) 553 iommu_set_default_passthrough(true); 554 else 555 iommu_set_default_translated(true); 556 557 return 0; 558 } 559 early_param("iommu.passthrough", iommu_set_def_domain_type); 560 561 static int __init iommu_dma_setup(char *str) 562 { 563 int ret = kstrtobool(str, &iommu_dma_strict); 564 565 if (!ret) 566 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 567 return ret; 568 } 569 early_param("iommu.strict", iommu_dma_setup); 570 571 void iommu_set_dma_strict(void) 572 { 573 iommu_dma_strict = true; 574 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) 575 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 576 } 577 578 static ssize_t iommu_group_attr_show(struct kobject *kobj, 579 struct attribute *__attr, char *buf) 580 { 581 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 582 struct iommu_group *group = to_iommu_group(kobj); 583 ssize_t ret = -EIO; 584 585 if (attr->show) 586 ret = attr->show(group, buf); 587 return ret; 588 } 589 590 static ssize_t iommu_group_attr_store(struct kobject *kobj, 591 struct attribute *__attr, 592 const char *buf, size_t count) 593 { 594 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 595 struct iommu_group *group = to_iommu_group(kobj); 596 ssize_t ret = -EIO; 597 598 if (attr->store) 599 ret = attr->store(group, buf, count); 600 return ret; 601 } 602 603 static const struct sysfs_ops iommu_group_sysfs_ops = { 604 .show = iommu_group_attr_show, 605 .store = iommu_group_attr_store, 606 }; 607 608 static int iommu_group_create_file(struct iommu_group *group, 609 struct iommu_group_attribute *attr) 610 { 611 return sysfs_create_file(&group->kobj, &attr->attr); 612 } 613 614 static void iommu_group_remove_file(struct iommu_group *group, 615 struct iommu_group_attribute *attr) 616 { 617 sysfs_remove_file(&group->kobj, &attr->attr); 618 } 619 620 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 621 { 622 return sysfs_emit(buf, "%s\n", group->name); 623 } 624 625 /** 626 * iommu_insert_resv_region - Insert a new region in the 627 * list of reserved regions. 628 * @new: new region to insert 629 * @regions: list of regions 630 * 631 * Elements are sorted by start address and overlapping segments 632 * of the same type are merged. 633 */ 634 static int iommu_insert_resv_region(struct iommu_resv_region *new, 635 struct list_head *regions) 636 { 637 struct iommu_resv_region *iter, *tmp, *nr, *top; 638 LIST_HEAD(stack); 639 640 nr = iommu_alloc_resv_region(new->start, new->length, 641 new->prot, new->type, GFP_KERNEL); 642 if (!nr) 643 return -ENOMEM; 644 645 /* First add the new element based on start address sorting */ 646 list_for_each_entry(iter, regions, list) { 647 if (nr->start < iter->start || 648 (nr->start == iter->start && nr->type <= iter->type)) 649 break; 650 } 651 list_add_tail(&nr->list, &iter->list); 652 653 /* Merge overlapping segments of type nr->type in @regions, if any */ 654 list_for_each_entry_safe(iter, tmp, regions, list) { 655 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 656 657 /* no merge needed on elements of different types than @new */ 658 if (iter->type != new->type) { 659 list_move_tail(&iter->list, &stack); 660 continue; 661 } 662 663 /* look for the last stack element of same type as @iter */ 664 list_for_each_entry_reverse(top, &stack, list) 665 if (top->type == iter->type) 666 goto check_overlap; 667 668 list_move_tail(&iter->list, &stack); 669 continue; 670 671 check_overlap: 672 top_end = top->start + top->length - 1; 673 674 if (iter->start > top_end + 1) { 675 list_move_tail(&iter->list, &stack); 676 } else { 677 top->length = max(top_end, iter_end) - top->start + 1; 678 list_del(&iter->list); 679 kfree(iter); 680 } 681 } 682 list_splice(&stack, regions); 683 return 0; 684 } 685 686 static int 687 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 688 struct list_head *group_resv_regions) 689 { 690 struct iommu_resv_region *entry; 691 int ret = 0; 692 693 list_for_each_entry(entry, dev_resv_regions, list) { 694 ret = iommu_insert_resv_region(entry, group_resv_regions); 695 if (ret) 696 break; 697 } 698 return ret; 699 } 700 701 int iommu_get_group_resv_regions(struct iommu_group *group, 702 struct list_head *head) 703 { 704 struct group_device *device; 705 int ret = 0; 706 707 mutex_lock(&group->mutex); 708 for_each_group_device(group, device) { 709 struct list_head dev_resv_regions; 710 711 /* 712 * Non-API groups still expose reserved_regions in sysfs, 713 * so filter out calls that get here that way. 714 */ 715 if (!device->dev->iommu) 716 break; 717 718 INIT_LIST_HEAD(&dev_resv_regions); 719 iommu_get_resv_regions(device->dev, &dev_resv_regions); 720 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 721 iommu_put_resv_regions(device->dev, &dev_resv_regions); 722 if (ret) 723 break; 724 } 725 mutex_unlock(&group->mutex); 726 return ret; 727 } 728 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 729 730 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 731 char *buf) 732 { 733 struct iommu_resv_region *region, *next; 734 struct list_head group_resv_regions; 735 int offset = 0; 736 737 INIT_LIST_HEAD(&group_resv_regions); 738 iommu_get_group_resv_regions(group, &group_resv_regions); 739 740 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 741 offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n", 742 (long long)region->start, 743 (long long)(region->start + 744 region->length - 1), 745 iommu_group_resv_type_string[region->type]); 746 kfree(region); 747 } 748 749 return offset; 750 } 751 752 static ssize_t iommu_group_show_type(struct iommu_group *group, 753 char *buf) 754 { 755 char *type = "unknown"; 756 757 mutex_lock(&group->mutex); 758 if (group->default_domain) { 759 switch (group->default_domain->type) { 760 case IOMMU_DOMAIN_BLOCKED: 761 type = "blocked"; 762 break; 763 case IOMMU_DOMAIN_IDENTITY: 764 type = "identity"; 765 break; 766 case IOMMU_DOMAIN_UNMANAGED: 767 type = "unmanaged"; 768 break; 769 case IOMMU_DOMAIN_DMA: 770 type = "DMA"; 771 break; 772 case IOMMU_DOMAIN_DMA_FQ: 773 type = "DMA-FQ"; 774 break; 775 } 776 } 777 mutex_unlock(&group->mutex); 778 779 return sysfs_emit(buf, "%s\n", type); 780 } 781 782 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 783 784 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 785 iommu_group_show_resv_regions, NULL); 786 787 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 788 iommu_group_store_type); 789 790 static void iommu_group_release(struct kobject *kobj) 791 { 792 struct iommu_group *group = to_iommu_group(kobj); 793 794 pr_debug("Releasing group %d\n", group->id); 795 796 if (group->iommu_data_release) 797 group->iommu_data_release(group->iommu_data); 798 799 ida_free(&iommu_group_ida, group->id); 800 801 if (group->default_domain) 802 iommu_domain_free(group->default_domain); 803 if (group->blocking_domain) 804 iommu_domain_free(group->blocking_domain); 805 806 kfree(group->name); 807 kfree(group); 808 } 809 810 static const struct kobj_type iommu_group_ktype = { 811 .sysfs_ops = &iommu_group_sysfs_ops, 812 .release = iommu_group_release, 813 }; 814 815 /** 816 * iommu_group_alloc - Allocate a new group 817 * 818 * This function is called by an iommu driver to allocate a new iommu 819 * group. The iommu group represents the minimum granularity of the iommu. 820 * Upon successful return, the caller holds a reference to the supplied 821 * group in order to hold the group until devices are added. Use 822 * iommu_group_put() to release this extra reference count, allowing the 823 * group to be automatically reclaimed once it has no devices or external 824 * references. 825 */ 826 struct iommu_group *iommu_group_alloc(void) 827 { 828 struct iommu_group *group; 829 int ret; 830 831 group = kzalloc(sizeof(*group), GFP_KERNEL); 832 if (!group) 833 return ERR_PTR(-ENOMEM); 834 835 group->kobj.kset = iommu_group_kset; 836 mutex_init(&group->mutex); 837 INIT_LIST_HEAD(&group->devices); 838 INIT_LIST_HEAD(&group->entry); 839 xa_init(&group->pasid_array); 840 841 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); 842 if (ret < 0) { 843 kfree(group); 844 return ERR_PTR(ret); 845 } 846 group->id = ret; 847 848 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 849 NULL, "%d", group->id); 850 if (ret) { 851 kobject_put(&group->kobj); 852 return ERR_PTR(ret); 853 } 854 855 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 856 if (!group->devices_kobj) { 857 kobject_put(&group->kobj); /* triggers .release & free */ 858 return ERR_PTR(-ENOMEM); 859 } 860 861 /* 862 * The devices_kobj holds a reference on the group kobject, so 863 * as long as that exists so will the group. We can therefore 864 * use the devices_kobj for reference counting. 865 */ 866 kobject_put(&group->kobj); 867 868 ret = iommu_group_create_file(group, 869 &iommu_group_attr_reserved_regions); 870 if (ret) { 871 kobject_put(group->devices_kobj); 872 return ERR_PTR(ret); 873 } 874 875 ret = iommu_group_create_file(group, &iommu_group_attr_type); 876 if (ret) { 877 kobject_put(group->devices_kobj); 878 return ERR_PTR(ret); 879 } 880 881 pr_debug("Allocated group %d\n", group->id); 882 883 return group; 884 } 885 EXPORT_SYMBOL_GPL(iommu_group_alloc); 886 887 /** 888 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 889 * @group: the group 890 * 891 * iommu drivers can store data in the group for use when doing iommu 892 * operations. This function provides a way to retrieve it. Caller 893 * should hold a group reference. 894 */ 895 void *iommu_group_get_iommudata(struct iommu_group *group) 896 { 897 return group->iommu_data; 898 } 899 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 900 901 /** 902 * iommu_group_set_iommudata - set iommu_data for a group 903 * @group: the group 904 * @iommu_data: new data 905 * @release: release function for iommu_data 906 * 907 * iommu drivers can store data in the group for use when doing iommu 908 * operations. This function provides a way to set the data after 909 * the group has been allocated. Caller should hold a group reference. 910 */ 911 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 912 void (*release)(void *iommu_data)) 913 { 914 group->iommu_data = iommu_data; 915 group->iommu_data_release = release; 916 } 917 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 918 919 /** 920 * iommu_group_set_name - set name for a group 921 * @group: the group 922 * @name: name 923 * 924 * Allow iommu driver to set a name for a group. When set it will 925 * appear in a name attribute file under the group in sysfs. 926 */ 927 int iommu_group_set_name(struct iommu_group *group, const char *name) 928 { 929 int ret; 930 931 if (group->name) { 932 iommu_group_remove_file(group, &iommu_group_attr_name); 933 kfree(group->name); 934 group->name = NULL; 935 if (!name) 936 return 0; 937 } 938 939 group->name = kstrdup(name, GFP_KERNEL); 940 if (!group->name) 941 return -ENOMEM; 942 943 ret = iommu_group_create_file(group, &iommu_group_attr_name); 944 if (ret) { 945 kfree(group->name); 946 group->name = NULL; 947 return ret; 948 } 949 950 return 0; 951 } 952 EXPORT_SYMBOL_GPL(iommu_group_set_name); 953 954 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 955 struct device *dev) 956 { 957 struct iommu_resv_region *entry; 958 struct list_head mappings; 959 unsigned long pg_size; 960 int ret = 0; 961 962 if (!iommu_is_dma_domain(domain)) 963 return 0; 964 965 BUG_ON(!domain->pgsize_bitmap); 966 967 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 968 INIT_LIST_HEAD(&mappings); 969 970 iommu_get_resv_regions(dev, &mappings); 971 972 /* We need to consider overlapping regions for different devices */ 973 list_for_each_entry(entry, &mappings, list) { 974 dma_addr_t start, end, addr; 975 size_t map_size = 0; 976 977 start = ALIGN(entry->start, pg_size); 978 end = ALIGN(entry->start + entry->length, pg_size); 979 980 if (entry->type != IOMMU_RESV_DIRECT && 981 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 982 continue; 983 984 for (addr = start; addr <= end; addr += pg_size) { 985 phys_addr_t phys_addr; 986 987 if (addr == end) 988 goto map_end; 989 990 phys_addr = iommu_iova_to_phys(domain, addr); 991 if (!phys_addr) { 992 map_size += pg_size; 993 continue; 994 } 995 996 map_end: 997 if (map_size) { 998 ret = iommu_map(domain, addr - map_size, 999 addr - map_size, map_size, 1000 entry->prot, GFP_KERNEL); 1001 if (ret) 1002 goto out; 1003 map_size = 0; 1004 } 1005 } 1006 1007 } 1008 1009 iommu_flush_iotlb_all(domain); 1010 1011 out: 1012 iommu_put_resv_regions(dev, &mappings); 1013 1014 return ret; 1015 } 1016 1017 /** 1018 * iommu_group_add_device - add a device to an iommu group 1019 * @group: the group into which to add the device (reference should be held) 1020 * @dev: the device 1021 * 1022 * This function is called by an iommu driver to add a device into a 1023 * group. Adding a device increments the group reference count. 1024 */ 1025 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 1026 { 1027 int ret, i = 0; 1028 struct group_device *device; 1029 1030 device = kzalloc(sizeof(*device), GFP_KERNEL); 1031 if (!device) 1032 return -ENOMEM; 1033 1034 device->dev = dev; 1035 1036 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 1037 if (ret) 1038 goto err_free_device; 1039 1040 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 1041 rename: 1042 if (!device->name) { 1043 ret = -ENOMEM; 1044 goto err_remove_link; 1045 } 1046 1047 ret = sysfs_create_link_nowarn(group->devices_kobj, 1048 &dev->kobj, device->name); 1049 if (ret) { 1050 if (ret == -EEXIST && i >= 0) { 1051 /* 1052 * Account for the slim chance of collision 1053 * and append an instance to the name. 1054 */ 1055 kfree(device->name); 1056 device->name = kasprintf(GFP_KERNEL, "%s.%d", 1057 kobject_name(&dev->kobj), i++); 1058 goto rename; 1059 } 1060 goto err_free_name; 1061 } 1062 1063 kobject_get(group->devices_kobj); 1064 1065 dev->iommu_group = group; 1066 1067 mutex_lock(&group->mutex); 1068 list_add_tail(&device->list, &group->devices); 1069 mutex_unlock(&group->mutex); 1070 trace_add_device_to_group(group->id, dev); 1071 1072 dev_info(dev, "Adding to iommu group %d\n", group->id); 1073 1074 return 0; 1075 1076 err_free_name: 1077 kfree(device->name); 1078 err_remove_link: 1079 sysfs_remove_link(&dev->kobj, "iommu_group"); 1080 err_free_device: 1081 kfree(device); 1082 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 1083 return ret; 1084 } 1085 EXPORT_SYMBOL_GPL(iommu_group_add_device); 1086 1087 /** 1088 * iommu_group_remove_device - remove a device from it's current group 1089 * @dev: device to be removed 1090 * 1091 * This function is called by an iommu driver to remove the device from 1092 * it's current group. This decrements the iommu group reference count. 1093 */ 1094 void iommu_group_remove_device(struct device *dev) 1095 { 1096 struct iommu_group *group = dev->iommu_group; 1097 struct group_device *device; 1098 1099 if (!group) 1100 return; 1101 1102 dev_info(dev, "Removing from iommu group %d\n", group->id); 1103 1104 mutex_lock(&group->mutex); 1105 device = __iommu_group_remove_device(group, dev); 1106 mutex_unlock(&group->mutex); 1107 1108 if (device) 1109 __iommu_group_release_device(group, device); 1110 } 1111 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 1112 1113 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 1114 int (*fn)(struct device *, void *)) 1115 { 1116 struct group_device *device; 1117 int ret = 0; 1118 1119 for_each_group_device(group, device) { 1120 ret = fn(device->dev, data); 1121 if (ret) 1122 break; 1123 } 1124 return ret; 1125 } 1126 1127 /** 1128 * iommu_group_for_each_dev - iterate over each device in the group 1129 * @group: the group 1130 * @data: caller opaque data to be passed to callback function 1131 * @fn: caller supplied callback function 1132 * 1133 * This function is called by group users to iterate over group devices. 1134 * Callers should hold a reference count to the group during callback. 1135 * The group->mutex is held across callbacks, which will block calls to 1136 * iommu_group_add/remove_device. 1137 */ 1138 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 1139 int (*fn)(struct device *, void *)) 1140 { 1141 int ret; 1142 1143 mutex_lock(&group->mutex); 1144 ret = __iommu_group_for_each_dev(group, data, fn); 1145 mutex_unlock(&group->mutex); 1146 1147 return ret; 1148 } 1149 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1150 1151 /** 1152 * iommu_group_get - Return the group for a device and increment reference 1153 * @dev: get the group that this device belongs to 1154 * 1155 * This function is called by iommu drivers and users to get the group 1156 * for the specified device. If found, the group is returned and the group 1157 * reference in incremented, else NULL. 1158 */ 1159 struct iommu_group *iommu_group_get(struct device *dev) 1160 { 1161 struct iommu_group *group = dev->iommu_group; 1162 1163 if (group) 1164 kobject_get(group->devices_kobj); 1165 1166 return group; 1167 } 1168 EXPORT_SYMBOL_GPL(iommu_group_get); 1169 1170 /** 1171 * iommu_group_ref_get - Increment reference on a group 1172 * @group: the group to use, must not be NULL 1173 * 1174 * This function is called by iommu drivers to take additional references on an 1175 * existing group. Returns the given group for convenience. 1176 */ 1177 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1178 { 1179 kobject_get(group->devices_kobj); 1180 return group; 1181 } 1182 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1183 1184 /** 1185 * iommu_group_put - Decrement group reference 1186 * @group: the group to use 1187 * 1188 * This function is called by iommu drivers and users to release the 1189 * iommu group. Once the reference count is zero, the group is released. 1190 */ 1191 void iommu_group_put(struct iommu_group *group) 1192 { 1193 if (group) 1194 kobject_put(group->devices_kobj); 1195 } 1196 EXPORT_SYMBOL_GPL(iommu_group_put); 1197 1198 /** 1199 * iommu_register_device_fault_handler() - Register a device fault handler 1200 * @dev: the device 1201 * @handler: the fault handler 1202 * @data: private data passed as argument to the handler 1203 * 1204 * When an IOMMU fault event is received, this handler gets called with the 1205 * fault event and data as argument. The handler should return 0 on success. If 1206 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1207 * complete the fault by calling iommu_page_response() with one of the following 1208 * response code: 1209 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1210 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1211 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1212 * page faults if possible. 1213 * 1214 * Return 0 if the fault handler was installed successfully, or an error. 1215 */ 1216 int iommu_register_device_fault_handler(struct device *dev, 1217 iommu_dev_fault_handler_t handler, 1218 void *data) 1219 { 1220 struct dev_iommu *param = dev->iommu; 1221 int ret = 0; 1222 1223 if (!param) 1224 return -EINVAL; 1225 1226 mutex_lock(¶m->lock); 1227 /* Only allow one fault handler registered for each device */ 1228 if (param->fault_param) { 1229 ret = -EBUSY; 1230 goto done_unlock; 1231 } 1232 1233 get_device(dev); 1234 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1235 if (!param->fault_param) { 1236 put_device(dev); 1237 ret = -ENOMEM; 1238 goto done_unlock; 1239 } 1240 param->fault_param->handler = handler; 1241 param->fault_param->data = data; 1242 mutex_init(¶m->fault_param->lock); 1243 INIT_LIST_HEAD(¶m->fault_param->faults); 1244 1245 done_unlock: 1246 mutex_unlock(¶m->lock); 1247 1248 return ret; 1249 } 1250 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1251 1252 /** 1253 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1254 * @dev: the device 1255 * 1256 * Remove the device fault handler installed with 1257 * iommu_register_device_fault_handler(). 1258 * 1259 * Return 0 on success, or an error. 1260 */ 1261 int iommu_unregister_device_fault_handler(struct device *dev) 1262 { 1263 struct dev_iommu *param = dev->iommu; 1264 int ret = 0; 1265 1266 if (!param) 1267 return -EINVAL; 1268 1269 mutex_lock(¶m->lock); 1270 1271 if (!param->fault_param) 1272 goto unlock; 1273 1274 /* we cannot unregister handler if there are pending faults */ 1275 if (!list_empty(¶m->fault_param->faults)) { 1276 ret = -EBUSY; 1277 goto unlock; 1278 } 1279 1280 kfree(param->fault_param); 1281 param->fault_param = NULL; 1282 put_device(dev); 1283 unlock: 1284 mutex_unlock(¶m->lock); 1285 1286 return ret; 1287 } 1288 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1289 1290 /** 1291 * iommu_report_device_fault() - Report fault event to device driver 1292 * @dev: the device 1293 * @evt: fault event data 1294 * 1295 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1296 * handler. When this function fails and the fault is recoverable, it is the 1297 * caller's responsibility to complete the fault. 1298 * 1299 * Return 0 on success, or an error. 1300 */ 1301 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1302 { 1303 struct dev_iommu *param = dev->iommu; 1304 struct iommu_fault_event *evt_pending = NULL; 1305 struct iommu_fault_param *fparam; 1306 int ret = 0; 1307 1308 if (!param || !evt) 1309 return -EINVAL; 1310 1311 /* we only report device fault if there is a handler registered */ 1312 mutex_lock(¶m->lock); 1313 fparam = param->fault_param; 1314 if (!fparam || !fparam->handler) { 1315 ret = -EINVAL; 1316 goto done_unlock; 1317 } 1318 1319 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1320 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1321 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1322 GFP_KERNEL); 1323 if (!evt_pending) { 1324 ret = -ENOMEM; 1325 goto done_unlock; 1326 } 1327 mutex_lock(&fparam->lock); 1328 list_add_tail(&evt_pending->list, &fparam->faults); 1329 mutex_unlock(&fparam->lock); 1330 } 1331 1332 ret = fparam->handler(&evt->fault, fparam->data); 1333 if (ret && evt_pending) { 1334 mutex_lock(&fparam->lock); 1335 list_del(&evt_pending->list); 1336 mutex_unlock(&fparam->lock); 1337 kfree(evt_pending); 1338 } 1339 done_unlock: 1340 mutex_unlock(¶m->lock); 1341 return ret; 1342 } 1343 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1344 1345 int iommu_page_response(struct device *dev, 1346 struct iommu_page_response *msg) 1347 { 1348 bool needs_pasid; 1349 int ret = -EINVAL; 1350 struct iommu_fault_event *evt; 1351 struct iommu_fault_page_request *prm; 1352 struct dev_iommu *param = dev->iommu; 1353 const struct iommu_ops *ops = dev_iommu_ops(dev); 1354 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1355 1356 if (!ops->page_response) 1357 return -ENODEV; 1358 1359 if (!param || !param->fault_param) 1360 return -EINVAL; 1361 1362 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1363 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1364 return -EINVAL; 1365 1366 /* Only send response if there is a fault report pending */ 1367 mutex_lock(¶m->fault_param->lock); 1368 if (list_empty(¶m->fault_param->faults)) { 1369 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1370 goto done_unlock; 1371 } 1372 /* 1373 * Check if we have a matching page request pending to respond, 1374 * otherwise return -EINVAL 1375 */ 1376 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1377 prm = &evt->fault.prm; 1378 if (prm->grpid != msg->grpid) 1379 continue; 1380 1381 /* 1382 * If the PASID is required, the corresponding request is 1383 * matched using the group ID, the PASID valid bit and the PASID 1384 * value. Otherwise only the group ID matches request and 1385 * response. 1386 */ 1387 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1388 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1389 continue; 1390 1391 if (!needs_pasid && has_pasid) { 1392 /* No big deal, just clear it. */ 1393 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1394 msg->pasid = 0; 1395 } 1396 1397 ret = ops->page_response(dev, evt, msg); 1398 list_del(&evt->list); 1399 kfree(evt); 1400 break; 1401 } 1402 1403 done_unlock: 1404 mutex_unlock(¶m->fault_param->lock); 1405 return ret; 1406 } 1407 EXPORT_SYMBOL_GPL(iommu_page_response); 1408 1409 /** 1410 * iommu_group_id - Return ID for a group 1411 * @group: the group to ID 1412 * 1413 * Return the unique ID for the group matching the sysfs group number. 1414 */ 1415 int iommu_group_id(struct iommu_group *group) 1416 { 1417 return group->id; 1418 } 1419 EXPORT_SYMBOL_GPL(iommu_group_id); 1420 1421 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1422 unsigned long *devfns); 1423 1424 /* 1425 * To consider a PCI device isolated, we require ACS to support Source 1426 * Validation, Request Redirection, Completer Redirection, and Upstream 1427 * Forwarding. This effectively means that devices cannot spoof their 1428 * requester ID, requests and completions cannot be redirected, and all 1429 * transactions are forwarded upstream, even as it passes through a 1430 * bridge where the target device is downstream. 1431 */ 1432 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1433 1434 /* 1435 * For multifunction devices which are not isolated from each other, find 1436 * all the other non-isolated functions and look for existing groups. For 1437 * each function, we also need to look for aliases to or from other devices 1438 * that may already have a group. 1439 */ 1440 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1441 unsigned long *devfns) 1442 { 1443 struct pci_dev *tmp = NULL; 1444 struct iommu_group *group; 1445 1446 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1447 return NULL; 1448 1449 for_each_pci_dev(tmp) { 1450 if (tmp == pdev || tmp->bus != pdev->bus || 1451 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1452 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1453 continue; 1454 1455 group = get_pci_alias_group(tmp, devfns); 1456 if (group) { 1457 pci_dev_put(tmp); 1458 return group; 1459 } 1460 } 1461 1462 return NULL; 1463 } 1464 1465 /* 1466 * Look for aliases to or from the given device for existing groups. DMA 1467 * aliases are only supported on the same bus, therefore the search 1468 * space is quite small (especially since we're really only looking at pcie 1469 * device, and therefore only expect multiple slots on the root complex or 1470 * downstream switch ports). It's conceivable though that a pair of 1471 * multifunction devices could have aliases between them that would cause a 1472 * loop. To prevent this, we use a bitmap to track where we've been. 1473 */ 1474 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1475 unsigned long *devfns) 1476 { 1477 struct pci_dev *tmp = NULL; 1478 struct iommu_group *group; 1479 1480 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1481 return NULL; 1482 1483 group = iommu_group_get(&pdev->dev); 1484 if (group) 1485 return group; 1486 1487 for_each_pci_dev(tmp) { 1488 if (tmp == pdev || tmp->bus != pdev->bus) 1489 continue; 1490 1491 /* We alias them or they alias us */ 1492 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1493 group = get_pci_alias_group(tmp, devfns); 1494 if (group) { 1495 pci_dev_put(tmp); 1496 return group; 1497 } 1498 1499 group = get_pci_function_alias_group(tmp, devfns); 1500 if (group) { 1501 pci_dev_put(tmp); 1502 return group; 1503 } 1504 } 1505 } 1506 1507 return NULL; 1508 } 1509 1510 struct group_for_pci_data { 1511 struct pci_dev *pdev; 1512 struct iommu_group *group; 1513 }; 1514 1515 /* 1516 * DMA alias iterator callback, return the last seen device. Stop and return 1517 * the IOMMU group if we find one along the way. 1518 */ 1519 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1520 { 1521 struct group_for_pci_data *data = opaque; 1522 1523 data->pdev = pdev; 1524 data->group = iommu_group_get(&pdev->dev); 1525 1526 return data->group != NULL; 1527 } 1528 1529 /* 1530 * Generic device_group call-back function. It just allocates one 1531 * iommu-group per device. 1532 */ 1533 struct iommu_group *generic_device_group(struct device *dev) 1534 { 1535 return iommu_group_alloc(); 1536 } 1537 EXPORT_SYMBOL_GPL(generic_device_group); 1538 1539 /* 1540 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1541 * to find or create an IOMMU group for a device. 1542 */ 1543 struct iommu_group *pci_device_group(struct device *dev) 1544 { 1545 struct pci_dev *pdev = to_pci_dev(dev); 1546 struct group_for_pci_data data; 1547 struct pci_bus *bus; 1548 struct iommu_group *group = NULL; 1549 u64 devfns[4] = { 0 }; 1550 1551 if (WARN_ON(!dev_is_pci(dev))) 1552 return ERR_PTR(-EINVAL); 1553 1554 /* 1555 * Find the upstream DMA alias for the device. A device must not 1556 * be aliased due to topology in order to have its own IOMMU group. 1557 * If we find an alias along the way that already belongs to a 1558 * group, use it. 1559 */ 1560 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1561 return data.group; 1562 1563 pdev = data.pdev; 1564 1565 /* 1566 * Continue upstream from the point of minimum IOMMU granularity 1567 * due to aliases to the point where devices are protected from 1568 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1569 * group, use it. 1570 */ 1571 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1572 if (!bus->self) 1573 continue; 1574 1575 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1576 break; 1577 1578 pdev = bus->self; 1579 1580 group = iommu_group_get(&pdev->dev); 1581 if (group) 1582 return group; 1583 } 1584 1585 /* 1586 * Look for existing groups on device aliases. If we alias another 1587 * device or another device aliases us, use the same group. 1588 */ 1589 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1590 if (group) 1591 return group; 1592 1593 /* 1594 * Look for existing groups on non-isolated functions on the same 1595 * slot and aliases of those funcions, if any. No need to clear 1596 * the search bitmap, the tested devfns are still valid. 1597 */ 1598 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1599 if (group) 1600 return group; 1601 1602 /* No shared group found, allocate new */ 1603 return iommu_group_alloc(); 1604 } 1605 EXPORT_SYMBOL_GPL(pci_device_group); 1606 1607 /* Get the IOMMU group for device on fsl-mc bus */ 1608 struct iommu_group *fsl_mc_device_group(struct device *dev) 1609 { 1610 struct device *cont_dev = fsl_mc_cont_dev(dev); 1611 struct iommu_group *group; 1612 1613 group = iommu_group_get(cont_dev); 1614 if (!group) 1615 group = iommu_group_alloc(); 1616 return group; 1617 } 1618 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1619 1620 static int iommu_get_def_domain_type(struct device *dev) 1621 { 1622 const struct iommu_ops *ops = dev_iommu_ops(dev); 1623 1624 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 1625 return IOMMU_DOMAIN_DMA; 1626 1627 if (ops->def_domain_type) 1628 return ops->def_domain_type(dev); 1629 1630 return 0; 1631 } 1632 1633 static struct iommu_domain * 1634 __iommu_group_alloc_default_domain(const struct bus_type *bus, 1635 struct iommu_group *group, int req_type) 1636 { 1637 if (group->default_domain && group->default_domain->type == req_type) 1638 return group->default_domain; 1639 return __iommu_domain_alloc(bus, req_type); 1640 } 1641 1642 /* 1643 * req_type of 0 means "auto" which means to select a domain based on 1644 * iommu_def_domain_type or what the driver actually supports. 1645 */ 1646 static struct iommu_domain * 1647 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) 1648 { 1649 const struct bus_type *bus = 1650 list_first_entry(&group->devices, struct group_device, list) 1651 ->dev->bus; 1652 struct iommu_domain *dom; 1653 1654 lockdep_assert_held(&group->mutex); 1655 1656 if (req_type) 1657 return __iommu_group_alloc_default_domain(bus, group, req_type); 1658 1659 /* The driver gave no guidance on what type to use, try the default */ 1660 dom = __iommu_group_alloc_default_domain(bus, group, iommu_def_domain_type); 1661 if (dom) 1662 return dom; 1663 1664 /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ 1665 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) 1666 return NULL; 1667 dom = __iommu_group_alloc_default_domain(bus, group, IOMMU_DOMAIN_DMA); 1668 if (!dom) 1669 return NULL; 1670 1671 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1672 iommu_def_domain_type, group->name); 1673 return dom; 1674 } 1675 1676 /** 1677 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1678 * @dev: target device 1679 * 1680 * This function is intended to be called by IOMMU drivers and extended to 1681 * support common, bus-defined algorithms when determining or creating the 1682 * IOMMU group for a device. On success, the caller will hold a reference 1683 * to the returned IOMMU group, which will already include the provided 1684 * device. The reference should be released with iommu_group_put(). 1685 */ 1686 static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1687 { 1688 const struct iommu_ops *ops = dev_iommu_ops(dev); 1689 struct iommu_group *group; 1690 int ret; 1691 1692 group = iommu_group_get(dev); 1693 if (group) 1694 return group; 1695 1696 group = ops->device_group(dev); 1697 if (WARN_ON_ONCE(group == NULL)) 1698 return ERR_PTR(-EINVAL); 1699 1700 if (IS_ERR(group)) 1701 return group; 1702 1703 ret = iommu_group_add_device(group, dev); 1704 if (ret) 1705 goto out_put_group; 1706 1707 return group; 1708 1709 out_put_group: 1710 iommu_group_put(group); 1711 1712 return ERR_PTR(ret); 1713 } 1714 1715 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1716 { 1717 return group->default_domain; 1718 } 1719 1720 static int probe_iommu_group(struct device *dev, void *data) 1721 { 1722 struct list_head *group_list = data; 1723 struct iommu_group *group; 1724 int ret; 1725 1726 /* Device is probed already if in a group */ 1727 group = iommu_group_get(dev); 1728 if (group) { 1729 iommu_group_put(group); 1730 return 0; 1731 } 1732 1733 ret = __iommu_probe_device(dev, group_list); 1734 if (ret == -ENODEV) 1735 ret = 0; 1736 1737 return ret; 1738 } 1739 1740 static int iommu_bus_notifier(struct notifier_block *nb, 1741 unsigned long action, void *data) 1742 { 1743 struct device *dev = data; 1744 1745 if (action == BUS_NOTIFY_ADD_DEVICE) { 1746 int ret; 1747 1748 ret = iommu_probe_device(dev); 1749 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1750 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1751 iommu_release_device(dev); 1752 return NOTIFY_OK; 1753 } 1754 1755 return 0; 1756 } 1757 1758 /* A target_type of 0 will select the best domain type and cannot fail */ 1759 static int iommu_get_default_domain_type(struct iommu_group *group, 1760 int target_type) 1761 { 1762 int best_type = target_type; 1763 struct group_device *gdev; 1764 struct device *last_dev; 1765 1766 lockdep_assert_held(&group->mutex); 1767 1768 for_each_group_device(group, gdev) { 1769 unsigned int type = iommu_get_def_domain_type(gdev->dev); 1770 1771 if (best_type && type && best_type != type) { 1772 if (target_type) { 1773 dev_err_ratelimited( 1774 gdev->dev, 1775 "Device cannot be in %s domain\n", 1776 iommu_domain_type_str(target_type)); 1777 return -1; 1778 } 1779 1780 dev_warn( 1781 gdev->dev, 1782 "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", 1783 iommu_domain_type_str(type), dev_name(last_dev), 1784 iommu_domain_type_str(best_type)); 1785 return 0; 1786 } 1787 if (!best_type) 1788 best_type = type; 1789 last_dev = gdev->dev; 1790 } 1791 return best_type; 1792 } 1793 1794 static int iommu_group_do_probe_finalize(struct device *dev, void *data) 1795 { 1796 const struct iommu_ops *ops = dev_iommu_ops(dev); 1797 1798 if (ops->probe_finalize) 1799 ops->probe_finalize(dev); 1800 1801 return 0; 1802 } 1803 1804 static void __iommu_group_dma_finalize(struct iommu_group *group) 1805 { 1806 __iommu_group_for_each_dev(group, group->default_domain, 1807 iommu_group_do_probe_finalize); 1808 } 1809 1810 int bus_iommu_probe(const struct bus_type *bus) 1811 { 1812 struct iommu_group *group, *next; 1813 LIST_HEAD(group_list); 1814 int ret; 1815 1816 /* 1817 * This code-path does not allocate the default domain when 1818 * creating the iommu group, so do it after the groups are 1819 * created. 1820 */ 1821 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1822 if (ret) 1823 return ret; 1824 1825 list_for_each_entry_safe(group, next, &group_list, entry) { 1826 mutex_lock(&group->mutex); 1827 1828 /* Remove item from the list */ 1829 list_del_init(&group->entry); 1830 1831 ret = iommu_setup_default_domain(group, 0); 1832 if (ret) { 1833 mutex_unlock(&group->mutex); 1834 return ret; 1835 } 1836 mutex_unlock(&group->mutex); 1837 __iommu_group_dma_finalize(group); 1838 } 1839 1840 return 0; 1841 } 1842 1843 bool iommu_present(const struct bus_type *bus) 1844 { 1845 return bus->iommu_ops != NULL; 1846 } 1847 EXPORT_SYMBOL_GPL(iommu_present); 1848 1849 /** 1850 * device_iommu_capable() - check for a general IOMMU capability 1851 * @dev: device to which the capability would be relevant, if available 1852 * @cap: IOMMU capability 1853 * 1854 * Return: true if an IOMMU is present and supports the given capability 1855 * for the given device, otherwise false. 1856 */ 1857 bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 1858 { 1859 const struct iommu_ops *ops; 1860 1861 if (!dev->iommu || !dev->iommu->iommu_dev) 1862 return false; 1863 1864 ops = dev_iommu_ops(dev); 1865 if (!ops->capable) 1866 return false; 1867 1868 return ops->capable(dev, cap); 1869 } 1870 EXPORT_SYMBOL_GPL(device_iommu_capable); 1871 1872 /** 1873 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi() 1874 * for a group 1875 * @group: Group to query 1876 * 1877 * IOMMU groups should not have differing values of 1878 * msi_device_has_isolated_msi() for devices in a group. However nothing 1879 * directly prevents this, so ensure mistakes don't result in isolation failures 1880 * by checking that all the devices are the same. 1881 */ 1882 bool iommu_group_has_isolated_msi(struct iommu_group *group) 1883 { 1884 struct group_device *group_dev; 1885 bool ret = true; 1886 1887 mutex_lock(&group->mutex); 1888 for_each_group_device(group, group_dev) 1889 ret &= msi_device_has_isolated_msi(group_dev->dev); 1890 mutex_unlock(&group->mutex); 1891 return ret; 1892 } 1893 EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi); 1894 1895 /** 1896 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1897 * @domain: iommu domain 1898 * @handler: fault handler 1899 * @token: user data, will be passed back to the fault handler 1900 * 1901 * This function should be used by IOMMU users which want to be notified 1902 * whenever an IOMMU fault happens. 1903 * 1904 * The fault handler itself should return 0 on success, and an appropriate 1905 * error code otherwise. 1906 */ 1907 void iommu_set_fault_handler(struct iommu_domain *domain, 1908 iommu_fault_handler_t handler, 1909 void *token) 1910 { 1911 BUG_ON(!domain); 1912 1913 domain->handler = handler; 1914 domain->handler_token = token; 1915 } 1916 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1917 1918 static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus, 1919 unsigned type) 1920 { 1921 struct iommu_domain *domain; 1922 unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS; 1923 1924 if (bus == NULL || bus->iommu_ops == NULL) 1925 return NULL; 1926 1927 domain = bus->iommu_ops->domain_alloc(alloc_type); 1928 if (!domain) 1929 return NULL; 1930 1931 domain->type = type; 1932 /* 1933 * If not already set, assume all sizes by default; the driver 1934 * may override this later 1935 */ 1936 if (!domain->pgsize_bitmap) 1937 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1938 1939 if (!domain->ops) 1940 domain->ops = bus->iommu_ops->default_domain_ops; 1941 1942 if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { 1943 iommu_domain_free(domain); 1944 domain = NULL; 1945 } 1946 return domain; 1947 } 1948 1949 struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) 1950 { 1951 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1952 } 1953 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1954 1955 void iommu_domain_free(struct iommu_domain *domain) 1956 { 1957 if (domain->type == IOMMU_DOMAIN_SVA) 1958 mmdrop(domain->mm); 1959 iommu_put_dma_cookie(domain); 1960 domain->ops->free(domain); 1961 } 1962 EXPORT_SYMBOL_GPL(iommu_domain_free); 1963 1964 /* 1965 * Put the group's domain back to the appropriate core-owned domain - either the 1966 * standard kernel-mode DMA configuration or an all-DMA-blocked domain. 1967 */ 1968 static void __iommu_group_set_core_domain(struct iommu_group *group) 1969 { 1970 struct iommu_domain *new_domain; 1971 1972 if (group->owner) 1973 new_domain = group->blocking_domain; 1974 else 1975 new_domain = group->default_domain; 1976 1977 __iommu_group_set_domain_nofail(group, new_domain); 1978 } 1979 1980 static int __iommu_attach_device(struct iommu_domain *domain, 1981 struct device *dev) 1982 { 1983 int ret; 1984 1985 if (unlikely(domain->ops->attach_dev == NULL)) 1986 return -ENODEV; 1987 1988 ret = domain->ops->attach_dev(domain, dev); 1989 if (ret) 1990 return ret; 1991 dev->iommu->attach_deferred = 0; 1992 trace_attach_device_to_domain(dev); 1993 return 0; 1994 } 1995 1996 /** 1997 * iommu_attach_device - Attach an IOMMU domain to a device 1998 * @domain: IOMMU domain to attach 1999 * @dev: Device that will be attached 2000 * 2001 * Returns 0 on success and error code on failure 2002 * 2003 * Note that EINVAL can be treated as a soft failure, indicating 2004 * that certain configuration of the domain is incompatible with 2005 * the device. In this case attaching a different domain to the 2006 * device may succeed. 2007 */ 2008 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 2009 { 2010 struct iommu_group *group; 2011 int ret; 2012 2013 group = iommu_group_get(dev); 2014 if (!group) 2015 return -ENODEV; 2016 2017 /* 2018 * Lock the group to make sure the device-count doesn't 2019 * change while we are attaching 2020 */ 2021 mutex_lock(&group->mutex); 2022 ret = -EINVAL; 2023 if (list_count_nodes(&group->devices) != 1) 2024 goto out_unlock; 2025 2026 ret = __iommu_attach_group(domain, group); 2027 2028 out_unlock: 2029 mutex_unlock(&group->mutex); 2030 iommu_group_put(group); 2031 2032 return ret; 2033 } 2034 EXPORT_SYMBOL_GPL(iommu_attach_device); 2035 2036 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2037 { 2038 if (dev->iommu && dev->iommu->attach_deferred) 2039 return __iommu_attach_device(domain, dev); 2040 2041 return 0; 2042 } 2043 2044 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2045 { 2046 struct iommu_group *group; 2047 2048 group = iommu_group_get(dev); 2049 if (!group) 2050 return; 2051 2052 mutex_lock(&group->mutex); 2053 if (WARN_ON(domain != group->domain) || 2054 WARN_ON(list_count_nodes(&group->devices) != 1)) 2055 goto out_unlock; 2056 __iommu_group_set_core_domain(group); 2057 2058 out_unlock: 2059 mutex_unlock(&group->mutex); 2060 iommu_group_put(group); 2061 } 2062 EXPORT_SYMBOL_GPL(iommu_detach_device); 2063 2064 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2065 { 2066 struct iommu_domain *domain; 2067 struct iommu_group *group; 2068 2069 group = iommu_group_get(dev); 2070 if (!group) 2071 return NULL; 2072 2073 domain = group->domain; 2074 2075 iommu_group_put(group); 2076 2077 return domain; 2078 } 2079 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2080 2081 /* 2082 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2083 * guarantees that the group and its default domain are valid and correct. 2084 */ 2085 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2086 { 2087 return dev->iommu_group->default_domain; 2088 } 2089 2090 static int __iommu_attach_group(struct iommu_domain *domain, 2091 struct iommu_group *group) 2092 { 2093 if (group->domain && group->domain != group->default_domain && 2094 group->domain != group->blocking_domain) 2095 return -EBUSY; 2096 2097 return __iommu_group_set_domain(group, domain); 2098 } 2099 2100 /** 2101 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group 2102 * @domain: IOMMU domain to attach 2103 * @group: IOMMU group that will be attached 2104 * 2105 * Returns 0 on success and error code on failure 2106 * 2107 * Note that EINVAL can be treated as a soft failure, indicating 2108 * that certain configuration of the domain is incompatible with 2109 * the group. In this case attaching a different domain to the 2110 * group may succeed. 2111 */ 2112 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2113 { 2114 int ret; 2115 2116 mutex_lock(&group->mutex); 2117 ret = __iommu_attach_group(domain, group); 2118 mutex_unlock(&group->mutex); 2119 2120 return ret; 2121 } 2122 EXPORT_SYMBOL_GPL(iommu_attach_group); 2123 2124 static int __iommu_device_set_domain(struct iommu_group *group, 2125 struct device *dev, 2126 struct iommu_domain *new_domain, 2127 unsigned int flags) 2128 { 2129 int ret; 2130 2131 if (dev->iommu->attach_deferred) { 2132 if (new_domain == group->default_domain) 2133 return 0; 2134 dev->iommu->attach_deferred = 0; 2135 } 2136 2137 ret = __iommu_attach_device(new_domain, dev); 2138 if (ret) { 2139 /* 2140 * If we have a blocking domain then try to attach that in hopes 2141 * of avoiding a UAF. Modern drivers should implement blocking 2142 * domains as global statics that cannot fail. 2143 */ 2144 if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) && 2145 group->blocking_domain && 2146 group->blocking_domain != new_domain) 2147 __iommu_attach_device(group->blocking_domain, dev); 2148 return ret; 2149 } 2150 return 0; 2151 } 2152 2153 /* 2154 * If 0 is returned the group's domain is new_domain. If an error is returned 2155 * then the group's domain will be set back to the existing domain unless 2156 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's 2157 * domains is left inconsistent. This is a driver bug to fail attach with a 2158 * previously good domain. We try to avoid a kernel UAF because of this. 2159 * 2160 * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU 2161 * API works on domains and devices. Bridge that gap by iterating over the 2162 * devices in a group. Ideally we'd have a single device which represents the 2163 * requestor ID of the group, but we also allow IOMMU drivers to create policy 2164 * defined minimum sets, where the physical hardware may be able to distiguish 2165 * members, but we wish to group them at a higher level (ex. untrusted 2166 * multi-function PCI devices). Thus we attach each device. 2167 */ 2168 static int __iommu_group_set_domain_internal(struct iommu_group *group, 2169 struct iommu_domain *new_domain, 2170 unsigned int flags) 2171 { 2172 struct group_device *last_gdev; 2173 struct group_device *gdev; 2174 int result; 2175 int ret; 2176 2177 lockdep_assert_held(&group->mutex); 2178 2179 if (group->domain == new_domain) 2180 return 0; 2181 2182 /* 2183 * New drivers should support default domains, so set_platform_dma() 2184 * op will never be called. Otherwise the NULL domain represents some 2185 * platform specific behavior. 2186 */ 2187 if (!new_domain) { 2188 for_each_group_device(group, gdev) { 2189 const struct iommu_ops *ops = dev_iommu_ops(gdev->dev); 2190 2191 if (!WARN_ON(!ops->set_platform_dma_ops)) 2192 ops->set_platform_dma_ops(gdev->dev); 2193 } 2194 group->domain = NULL; 2195 return 0; 2196 } 2197 2198 /* 2199 * Changing the domain is done by calling attach_dev() on the new 2200 * domain. This switch does not have to be atomic and DMA can be 2201 * discarded during the transition. DMA must only be able to access 2202 * either new_domain or group->domain, never something else. 2203 */ 2204 result = 0; 2205 for_each_group_device(group, gdev) { 2206 ret = __iommu_device_set_domain(group, gdev->dev, new_domain, 2207 flags); 2208 if (ret) { 2209 result = ret; 2210 /* 2211 * Keep trying the other devices in the group. If a 2212 * driver fails attach to an otherwise good domain, and 2213 * does not support blocking domains, it should at least 2214 * drop its reference on the current domain so we don't 2215 * UAF. 2216 */ 2217 if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) 2218 continue; 2219 goto err_revert; 2220 } 2221 } 2222 group->domain = new_domain; 2223 return result; 2224 2225 err_revert: 2226 /* 2227 * This is called in error unwind paths. A well behaved driver should 2228 * always allow us to attach to a domain that was already attached. 2229 */ 2230 last_gdev = gdev; 2231 for_each_group_device(group, gdev) { 2232 const struct iommu_ops *ops = dev_iommu_ops(gdev->dev); 2233 2234 /* 2235 * If set_platform_dma_ops is not present a NULL domain can 2236 * happen only for first probe, in which case we leave 2237 * group->domain as NULL and let release clean everything up. 2238 */ 2239 if (group->domain) 2240 WARN_ON(__iommu_device_set_domain( 2241 group, gdev->dev, group->domain, 2242 IOMMU_SET_DOMAIN_MUST_SUCCEED)); 2243 else if (ops->set_platform_dma_ops) 2244 ops->set_platform_dma_ops(gdev->dev); 2245 if (gdev == last_gdev) 2246 break; 2247 } 2248 return ret; 2249 } 2250 2251 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2252 { 2253 mutex_lock(&group->mutex); 2254 __iommu_group_set_core_domain(group); 2255 mutex_unlock(&group->mutex); 2256 } 2257 EXPORT_SYMBOL_GPL(iommu_detach_group); 2258 2259 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2260 { 2261 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2262 return iova; 2263 2264 if (domain->type == IOMMU_DOMAIN_BLOCKED) 2265 return 0; 2266 2267 return domain->ops->iova_to_phys(domain, iova); 2268 } 2269 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2270 2271 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, 2272 phys_addr_t paddr, size_t size, size_t *count) 2273 { 2274 unsigned int pgsize_idx, pgsize_idx_next; 2275 unsigned long pgsizes; 2276 size_t offset, pgsize, pgsize_next; 2277 unsigned long addr_merge = paddr | iova; 2278 2279 /* Page sizes supported by the hardware and small enough for @size */ 2280 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); 2281 2282 /* Constrain the page sizes further based on the maximum alignment */ 2283 if (likely(addr_merge)) 2284 pgsizes &= GENMASK(__ffs(addr_merge), 0); 2285 2286 /* Make sure we have at least one suitable page size */ 2287 BUG_ON(!pgsizes); 2288 2289 /* Pick the biggest page size remaining */ 2290 pgsize_idx = __fls(pgsizes); 2291 pgsize = BIT(pgsize_idx); 2292 if (!count) 2293 return pgsize; 2294 2295 /* Find the next biggest support page size, if it exists */ 2296 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); 2297 if (!pgsizes) 2298 goto out_set_count; 2299 2300 pgsize_idx_next = __ffs(pgsizes); 2301 pgsize_next = BIT(pgsize_idx_next); 2302 2303 /* 2304 * There's no point trying a bigger page size unless the virtual 2305 * and physical addresses are similarly offset within the larger page. 2306 */ 2307 if ((iova ^ paddr) & (pgsize_next - 1)) 2308 goto out_set_count; 2309 2310 /* Calculate the offset to the next page size alignment boundary */ 2311 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); 2312 2313 /* 2314 * If size is big enough to accommodate the larger page, reduce 2315 * the number of smaller pages. 2316 */ 2317 if (offset + pgsize_next <= size) 2318 size = offset; 2319 2320 out_set_count: 2321 *count = size >> pgsize_idx; 2322 return pgsize; 2323 } 2324 2325 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, 2326 phys_addr_t paddr, size_t size, int prot, 2327 gfp_t gfp, size_t *mapped) 2328 { 2329 const struct iommu_domain_ops *ops = domain->ops; 2330 size_t pgsize, count; 2331 int ret; 2332 2333 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); 2334 2335 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", 2336 iova, &paddr, pgsize, count); 2337 2338 if (ops->map_pages) { 2339 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, 2340 gfp, mapped); 2341 } else { 2342 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2343 *mapped = ret ? 0 : pgsize; 2344 } 2345 2346 return ret; 2347 } 2348 2349 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2350 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2351 { 2352 const struct iommu_domain_ops *ops = domain->ops; 2353 unsigned long orig_iova = iova; 2354 unsigned int min_pagesz; 2355 size_t orig_size = size; 2356 phys_addr_t orig_paddr = paddr; 2357 int ret = 0; 2358 2359 if (unlikely(!(ops->map || ops->map_pages) || 2360 domain->pgsize_bitmap == 0UL)) 2361 return -ENODEV; 2362 2363 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2364 return -EINVAL; 2365 2366 /* find out the minimum page size supported */ 2367 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2368 2369 /* 2370 * both the virtual address and the physical one, as well as 2371 * the size of the mapping, must be aligned (at least) to the 2372 * size of the smallest page supported by the hardware 2373 */ 2374 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2375 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2376 iova, &paddr, size, min_pagesz); 2377 return -EINVAL; 2378 } 2379 2380 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2381 2382 while (size) { 2383 size_t mapped = 0; 2384 2385 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, 2386 &mapped); 2387 /* 2388 * Some pages may have been mapped, even if an error occurred, 2389 * so we should account for those so they can be unmapped. 2390 */ 2391 size -= mapped; 2392 2393 if (ret) 2394 break; 2395 2396 iova += mapped; 2397 paddr += mapped; 2398 } 2399 2400 /* unroll mapping in case something went wrong */ 2401 if (ret) 2402 iommu_unmap(domain, orig_iova, orig_size - size); 2403 else 2404 trace_map(orig_iova, orig_paddr, orig_size); 2405 2406 return ret; 2407 } 2408 2409 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2410 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2411 { 2412 const struct iommu_domain_ops *ops = domain->ops; 2413 int ret; 2414 2415 might_sleep_if(gfpflags_allow_blocking(gfp)); 2416 2417 /* Discourage passing strange GFP flags */ 2418 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2419 __GFP_HIGHMEM))) 2420 return -EINVAL; 2421 2422 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2423 if (ret == 0 && ops->iotlb_sync_map) 2424 ops->iotlb_sync_map(domain, iova, size); 2425 2426 return ret; 2427 } 2428 EXPORT_SYMBOL_GPL(iommu_map); 2429 2430 static size_t __iommu_unmap_pages(struct iommu_domain *domain, 2431 unsigned long iova, size_t size, 2432 struct iommu_iotlb_gather *iotlb_gather) 2433 { 2434 const struct iommu_domain_ops *ops = domain->ops; 2435 size_t pgsize, count; 2436 2437 pgsize = iommu_pgsize(domain, iova, iova, size, &count); 2438 return ops->unmap_pages ? 2439 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : 2440 ops->unmap(domain, iova, pgsize, iotlb_gather); 2441 } 2442 2443 static size_t __iommu_unmap(struct iommu_domain *domain, 2444 unsigned long iova, size_t size, 2445 struct iommu_iotlb_gather *iotlb_gather) 2446 { 2447 const struct iommu_domain_ops *ops = domain->ops; 2448 size_t unmapped_page, unmapped = 0; 2449 unsigned long orig_iova = iova; 2450 unsigned int min_pagesz; 2451 2452 if (unlikely(!(ops->unmap || ops->unmap_pages) || 2453 domain->pgsize_bitmap == 0UL)) 2454 return 0; 2455 2456 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2457 return 0; 2458 2459 /* find out the minimum page size supported */ 2460 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2461 2462 /* 2463 * The virtual address, as well as the size of the mapping, must be 2464 * aligned (at least) to the size of the smallest page supported 2465 * by the hardware 2466 */ 2467 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2468 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2469 iova, size, min_pagesz); 2470 return 0; 2471 } 2472 2473 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2474 2475 /* 2476 * Keep iterating until we either unmap 'size' bytes (or more) 2477 * or we hit an area that isn't mapped. 2478 */ 2479 while (unmapped < size) { 2480 unmapped_page = __iommu_unmap_pages(domain, iova, 2481 size - unmapped, 2482 iotlb_gather); 2483 if (!unmapped_page) 2484 break; 2485 2486 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2487 iova, unmapped_page); 2488 2489 iova += unmapped_page; 2490 unmapped += unmapped_page; 2491 } 2492 2493 trace_unmap(orig_iova, size, unmapped); 2494 return unmapped; 2495 } 2496 2497 size_t iommu_unmap(struct iommu_domain *domain, 2498 unsigned long iova, size_t size) 2499 { 2500 struct iommu_iotlb_gather iotlb_gather; 2501 size_t ret; 2502 2503 iommu_iotlb_gather_init(&iotlb_gather); 2504 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2505 iommu_iotlb_sync(domain, &iotlb_gather); 2506 2507 return ret; 2508 } 2509 EXPORT_SYMBOL_GPL(iommu_unmap); 2510 2511 size_t iommu_unmap_fast(struct iommu_domain *domain, 2512 unsigned long iova, size_t size, 2513 struct iommu_iotlb_gather *iotlb_gather) 2514 { 2515 return __iommu_unmap(domain, iova, size, iotlb_gather); 2516 } 2517 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2518 2519 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2520 struct scatterlist *sg, unsigned int nents, int prot, 2521 gfp_t gfp) 2522 { 2523 const struct iommu_domain_ops *ops = domain->ops; 2524 size_t len = 0, mapped = 0; 2525 phys_addr_t start; 2526 unsigned int i = 0; 2527 int ret; 2528 2529 might_sleep_if(gfpflags_allow_blocking(gfp)); 2530 2531 /* Discourage passing strange GFP flags */ 2532 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2533 __GFP_HIGHMEM))) 2534 return -EINVAL; 2535 2536 while (i <= nents) { 2537 phys_addr_t s_phys = sg_phys(sg); 2538 2539 if (len && s_phys != start + len) { 2540 ret = __iommu_map(domain, iova + mapped, start, 2541 len, prot, gfp); 2542 2543 if (ret) 2544 goto out_err; 2545 2546 mapped += len; 2547 len = 0; 2548 } 2549 2550 if (sg_is_dma_bus_address(sg)) 2551 goto next; 2552 2553 if (len) { 2554 len += sg->length; 2555 } else { 2556 len = sg->length; 2557 start = s_phys; 2558 } 2559 2560 next: 2561 if (++i < nents) 2562 sg = sg_next(sg); 2563 } 2564 2565 if (ops->iotlb_sync_map) 2566 ops->iotlb_sync_map(domain, iova, mapped); 2567 return mapped; 2568 2569 out_err: 2570 /* undo mappings already done */ 2571 iommu_unmap(domain, iova, mapped); 2572 2573 return ret; 2574 } 2575 EXPORT_SYMBOL_GPL(iommu_map_sg); 2576 2577 /** 2578 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2579 * @domain: the iommu domain where the fault has happened 2580 * @dev: the device where the fault has happened 2581 * @iova: the faulting address 2582 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2583 * 2584 * This function should be called by the low-level IOMMU implementations 2585 * whenever IOMMU faults happen, to allow high-level users, that are 2586 * interested in such events, to know about them. 2587 * 2588 * This event may be useful for several possible use cases: 2589 * - mere logging of the event 2590 * - dynamic TLB/PTE loading 2591 * - if restarting of the faulting device is required 2592 * 2593 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2594 * PTE/TLB loading will one day be supported, implementations will be able 2595 * to tell whether it succeeded or not according to this return value). 2596 * 2597 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2598 * (though fault handlers can also return -ENOSYS, in case they want to 2599 * elicit the default behavior of the IOMMU drivers). 2600 */ 2601 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2602 unsigned long iova, int flags) 2603 { 2604 int ret = -ENOSYS; 2605 2606 /* 2607 * if upper layers showed interest and installed a fault handler, 2608 * invoke it. 2609 */ 2610 if (domain->handler) 2611 ret = domain->handler(domain, dev, iova, flags, 2612 domain->handler_token); 2613 2614 trace_io_page_fault(dev, iova, flags); 2615 return ret; 2616 } 2617 EXPORT_SYMBOL_GPL(report_iommu_fault); 2618 2619 static int __init iommu_init(void) 2620 { 2621 iommu_group_kset = kset_create_and_add("iommu_groups", 2622 NULL, kernel_kobj); 2623 BUG_ON(!iommu_group_kset); 2624 2625 iommu_debugfs_setup(); 2626 2627 return 0; 2628 } 2629 core_initcall(iommu_init); 2630 2631 int iommu_enable_nesting(struct iommu_domain *domain) 2632 { 2633 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2634 return -EINVAL; 2635 if (!domain->ops->enable_nesting) 2636 return -EINVAL; 2637 return domain->ops->enable_nesting(domain); 2638 } 2639 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2640 2641 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2642 unsigned long quirk) 2643 { 2644 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2645 return -EINVAL; 2646 if (!domain->ops->set_pgtable_quirks) 2647 return -EINVAL; 2648 return domain->ops->set_pgtable_quirks(domain, quirk); 2649 } 2650 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2651 2652 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2653 { 2654 const struct iommu_ops *ops = dev_iommu_ops(dev); 2655 2656 if (ops->get_resv_regions) 2657 ops->get_resv_regions(dev, list); 2658 } 2659 2660 /** 2661 * iommu_put_resv_regions - release resered regions 2662 * @dev: device for which to free reserved regions 2663 * @list: reserved region list for device 2664 * 2665 * This releases a reserved region list acquired by iommu_get_resv_regions(). 2666 */ 2667 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2668 { 2669 struct iommu_resv_region *entry, *next; 2670 2671 list_for_each_entry_safe(entry, next, list, list) { 2672 if (entry->free) 2673 entry->free(dev, entry); 2674 else 2675 kfree(entry); 2676 } 2677 } 2678 EXPORT_SYMBOL(iommu_put_resv_regions); 2679 2680 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2681 size_t length, int prot, 2682 enum iommu_resv_type type, 2683 gfp_t gfp) 2684 { 2685 struct iommu_resv_region *region; 2686 2687 region = kzalloc(sizeof(*region), gfp); 2688 if (!region) 2689 return NULL; 2690 2691 INIT_LIST_HEAD(®ion->list); 2692 region->start = start; 2693 region->length = length; 2694 region->prot = prot; 2695 region->type = type; 2696 return region; 2697 } 2698 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2699 2700 void iommu_set_default_passthrough(bool cmd_line) 2701 { 2702 if (cmd_line) 2703 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2704 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2705 } 2706 2707 void iommu_set_default_translated(bool cmd_line) 2708 { 2709 if (cmd_line) 2710 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2711 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2712 } 2713 2714 bool iommu_default_passthrough(void) 2715 { 2716 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2717 } 2718 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2719 2720 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2721 { 2722 const struct iommu_ops *ops = NULL; 2723 struct iommu_device *iommu; 2724 2725 spin_lock(&iommu_device_lock); 2726 list_for_each_entry(iommu, &iommu_device_list, list) 2727 if (iommu->fwnode == fwnode) { 2728 ops = iommu->ops; 2729 break; 2730 } 2731 spin_unlock(&iommu_device_lock); 2732 return ops; 2733 } 2734 2735 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2736 const struct iommu_ops *ops) 2737 { 2738 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2739 2740 if (fwspec) 2741 return ops == fwspec->ops ? 0 : -EINVAL; 2742 2743 if (!dev_iommu_get(dev)) 2744 return -ENOMEM; 2745 2746 /* Preallocate for the overwhelmingly common case of 1 ID */ 2747 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2748 if (!fwspec) 2749 return -ENOMEM; 2750 2751 of_node_get(to_of_node(iommu_fwnode)); 2752 fwspec->iommu_fwnode = iommu_fwnode; 2753 fwspec->ops = ops; 2754 dev_iommu_fwspec_set(dev, fwspec); 2755 return 0; 2756 } 2757 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2758 2759 void iommu_fwspec_free(struct device *dev) 2760 { 2761 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2762 2763 if (fwspec) { 2764 fwnode_handle_put(fwspec->iommu_fwnode); 2765 kfree(fwspec); 2766 dev_iommu_fwspec_set(dev, NULL); 2767 } 2768 } 2769 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2770 2771 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2772 { 2773 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2774 int i, new_num; 2775 2776 if (!fwspec) 2777 return -EINVAL; 2778 2779 new_num = fwspec->num_ids + num_ids; 2780 if (new_num > 1) { 2781 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2782 GFP_KERNEL); 2783 if (!fwspec) 2784 return -ENOMEM; 2785 2786 dev_iommu_fwspec_set(dev, fwspec); 2787 } 2788 2789 for (i = 0; i < num_ids; i++) 2790 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2791 2792 fwspec->num_ids = new_num; 2793 return 0; 2794 } 2795 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2796 2797 /* 2798 * Per device IOMMU features. 2799 */ 2800 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2801 { 2802 if (dev->iommu && dev->iommu->iommu_dev) { 2803 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2804 2805 if (ops->dev_enable_feat) 2806 return ops->dev_enable_feat(dev, feat); 2807 } 2808 2809 return -ENODEV; 2810 } 2811 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2812 2813 /* 2814 * The device drivers should do the necessary cleanups before calling this. 2815 */ 2816 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2817 { 2818 if (dev->iommu && dev->iommu->iommu_dev) { 2819 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2820 2821 if (ops->dev_disable_feat) 2822 return ops->dev_disable_feat(dev, feat); 2823 } 2824 2825 return -EBUSY; 2826 } 2827 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2828 2829 /** 2830 * iommu_setup_default_domain - Set the default_domain for the group 2831 * @group: Group to change 2832 * @target_type: Domain type to set as the default_domain 2833 * 2834 * Allocate a default domain and set it as the current domain on the group. If 2835 * the group already has a default domain it will be changed to the target_type. 2836 * When target_type is 0 the default domain is selected based on driver and 2837 * system preferences. 2838 */ 2839 static int iommu_setup_default_domain(struct iommu_group *group, 2840 int target_type) 2841 { 2842 struct iommu_domain *old_dom = group->default_domain; 2843 struct group_device *gdev; 2844 struct iommu_domain *dom; 2845 int req_type; 2846 int ret; 2847 2848 lockdep_assert_held(&group->mutex); 2849 2850 req_type = iommu_get_default_domain_type(group, target_type); 2851 if (req_type < 0) 2852 return -EINVAL; 2853 2854 /* 2855 * There are still some drivers which don't support default domains, so 2856 * we ignore the failure and leave group->default_domain NULL. 2857 * 2858 * We assume that the iommu driver starts up the device in 2859 * 'set_platform_dma_ops' mode if it does not support default domains. 2860 */ 2861 dom = iommu_group_alloc_default_domain(group, req_type); 2862 if (!dom) { 2863 /* Once in default_domain mode we never leave */ 2864 if (group->default_domain) 2865 return -ENODEV; 2866 group->default_domain = NULL; 2867 return 0; 2868 } 2869 2870 if (group->default_domain == dom) 2871 return 0; 2872 2873 /* 2874 * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be 2875 * mapped before their device is attached, in order to guarantee 2876 * continuity with any FW activity 2877 */ 2878 for_each_group_device(group, gdev) 2879 iommu_create_device_direct_mappings(dom, gdev->dev); 2880 2881 /* We must set default_domain early for __iommu_device_set_domain */ 2882 group->default_domain = dom; 2883 if (!group->domain) { 2884 /* 2885 * Drivers are not allowed to fail the first domain attach. 2886 * The only way to recover from this is to fail attaching the 2887 * iommu driver and call ops->release_device. Put the domain 2888 * in group->default_domain so it is freed after. 2889 */ 2890 ret = __iommu_group_set_domain_internal( 2891 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 2892 if (WARN_ON(ret)) 2893 goto out_free; 2894 } else { 2895 ret = __iommu_group_set_domain(group, dom); 2896 if (ret) { 2897 iommu_domain_free(dom); 2898 group->default_domain = old_dom; 2899 return ret; 2900 } 2901 } 2902 2903 out_free: 2904 if (old_dom) 2905 iommu_domain_free(old_dom); 2906 return ret; 2907 } 2908 2909 /* 2910 * Changing the default domain through sysfs requires the users to unbind the 2911 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ 2912 * transition. Return failure if this isn't met. 2913 * 2914 * We need to consider the race between this and the device release path. 2915 * group->mutex is used here to guarantee that the device release path 2916 * will not be entered at the same time. 2917 */ 2918 static ssize_t iommu_group_store_type(struct iommu_group *group, 2919 const char *buf, size_t count) 2920 { 2921 int ret, req_type; 2922 2923 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2924 return -EACCES; 2925 2926 if (WARN_ON(!group) || !group->default_domain) 2927 return -EINVAL; 2928 2929 if (sysfs_streq(buf, "identity")) 2930 req_type = IOMMU_DOMAIN_IDENTITY; 2931 else if (sysfs_streq(buf, "DMA")) 2932 req_type = IOMMU_DOMAIN_DMA; 2933 else if (sysfs_streq(buf, "DMA-FQ")) 2934 req_type = IOMMU_DOMAIN_DMA_FQ; 2935 else if (sysfs_streq(buf, "auto")) 2936 req_type = 0; 2937 else 2938 return -EINVAL; 2939 2940 mutex_lock(&group->mutex); 2941 /* We can bring up a flush queue without tearing down the domain. */ 2942 if (req_type == IOMMU_DOMAIN_DMA_FQ && 2943 group->default_domain->type == IOMMU_DOMAIN_DMA) { 2944 ret = iommu_dma_init_fq(group->default_domain); 2945 if (!ret) 2946 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; 2947 mutex_unlock(&group->mutex); 2948 2949 return ret ?: count; 2950 } 2951 2952 /* Otherwise, ensure that device exists and no driver is bound. */ 2953 if (list_empty(&group->devices) || group->owner_cnt) { 2954 mutex_unlock(&group->mutex); 2955 return -EPERM; 2956 } 2957 2958 ret = iommu_setup_default_domain(group, req_type); 2959 2960 /* 2961 * Release the mutex here because ops->probe_finalize() call-back of 2962 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 2963 * in-turn might call back into IOMMU core code, where it tries to take 2964 * group->mutex, resulting in a deadlock. 2965 */ 2966 mutex_unlock(&group->mutex); 2967 2968 /* Make sure dma_ops is appropriatley set */ 2969 if (!ret) 2970 __iommu_group_dma_finalize(group); 2971 2972 return ret ?: count; 2973 } 2974 2975 static bool iommu_is_default_domain(struct iommu_group *group) 2976 { 2977 if (group->domain == group->default_domain) 2978 return true; 2979 2980 /* 2981 * If the default domain was set to identity and it is still an identity 2982 * domain then we consider this a pass. This happens because of 2983 * amd_iommu_init_device() replacing the default idenytity domain with an 2984 * identity domain that has a different configuration for AMDGPU. 2985 */ 2986 if (group->default_domain && 2987 group->default_domain->type == IOMMU_DOMAIN_IDENTITY && 2988 group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY) 2989 return true; 2990 return false; 2991 } 2992 2993 /** 2994 * iommu_device_use_default_domain() - Device driver wants to handle device 2995 * DMA through the kernel DMA API. 2996 * @dev: The device. 2997 * 2998 * The device driver about to bind @dev wants to do DMA through the kernel 2999 * DMA API. Return 0 if it is allowed, otherwise an error. 3000 */ 3001 int iommu_device_use_default_domain(struct device *dev) 3002 { 3003 struct iommu_group *group = iommu_group_get(dev); 3004 int ret = 0; 3005 3006 if (!group) 3007 return 0; 3008 3009 mutex_lock(&group->mutex); 3010 if (group->owner_cnt) { 3011 if (group->owner || !iommu_is_default_domain(group) || 3012 !xa_empty(&group->pasid_array)) { 3013 ret = -EBUSY; 3014 goto unlock_out; 3015 } 3016 } 3017 3018 group->owner_cnt++; 3019 3020 unlock_out: 3021 mutex_unlock(&group->mutex); 3022 iommu_group_put(group); 3023 3024 return ret; 3025 } 3026 3027 /** 3028 * iommu_device_unuse_default_domain() - Device driver stops handling device 3029 * DMA through the kernel DMA API. 3030 * @dev: The device. 3031 * 3032 * The device driver doesn't want to do DMA through kernel DMA API anymore. 3033 * It must be called after iommu_device_use_default_domain(). 3034 */ 3035 void iommu_device_unuse_default_domain(struct device *dev) 3036 { 3037 struct iommu_group *group = iommu_group_get(dev); 3038 3039 if (!group) 3040 return; 3041 3042 mutex_lock(&group->mutex); 3043 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) 3044 group->owner_cnt--; 3045 3046 mutex_unlock(&group->mutex); 3047 iommu_group_put(group); 3048 } 3049 3050 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) 3051 { 3052 struct group_device *dev = 3053 list_first_entry(&group->devices, struct group_device, list); 3054 3055 if (group->blocking_domain) 3056 return 0; 3057 3058 group->blocking_domain = 3059 __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED); 3060 if (!group->blocking_domain) { 3061 /* 3062 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED 3063 * create an empty domain instead. 3064 */ 3065 group->blocking_domain = __iommu_domain_alloc( 3066 dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); 3067 if (!group->blocking_domain) 3068 return -EINVAL; 3069 } 3070 return 0; 3071 } 3072 3073 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) 3074 { 3075 int ret; 3076 3077 if ((group->domain && group->domain != group->default_domain) || 3078 !xa_empty(&group->pasid_array)) 3079 return -EBUSY; 3080 3081 ret = __iommu_group_alloc_blocking_domain(group); 3082 if (ret) 3083 return ret; 3084 ret = __iommu_group_set_domain(group, group->blocking_domain); 3085 if (ret) 3086 return ret; 3087 3088 group->owner = owner; 3089 group->owner_cnt++; 3090 return 0; 3091 } 3092 3093 /** 3094 * iommu_group_claim_dma_owner() - Set DMA ownership of a group 3095 * @group: The group. 3096 * @owner: Caller specified pointer. Used for exclusive ownership. 3097 * 3098 * This is to support backward compatibility for vfio which manages the dma 3099 * ownership in iommu_group level. New invocations on this interface should be 3100 * prohibited. Only a single owner may exist for a group. 3101 */ 3102 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 3103 { 3104 int ret = 0; 3105 3106 if (WARN_ON(!owner)) 3107 return -EINVAL; 3108 3109 mutex_lock(&group->mutex); 3110 if (group->owner_cnt) { 3111 ret = -EPERM; 3112 goto unlock_out; 3113 } 3114 3115 ret = __iommu_take_dma_ownership(group, owner); 3116 unlock_out: 3117 mutex_unlock(&group->mutex); 3118 3119 return ret; 3120 } 3121 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); 3122 3123 /** 3124 * iommu_device_claim_dma_owner() - Set DMA ownership of a device 3125 * @dev: The device. 3126 * @owner: Caller specified pointer. Used for exclusive ownership. 3127 * 3128 * Claim the DMA ownership of a device. Multiple devices in the same group may 3129 * concurrently claim ownership if they present the same owner value. Returns 0 3130 * on success and error code on failure 3131 */ 3132 int iommu_device_claim_dma_owner(struct device *dev, void *owner) 3133 { 3134 struct iommu_group *group; 3135 int ret = 0; 3136 3137 if (WARN_ON(!owner)) 3138 return -EINVAL; 3139 3140 group = iommu_group_get(dev); 3141 if (!group) 3142 return -ENODEV; 3143 3144 mutex_lock(&group->mutex); 3145 if (group->owner_cnt) { 3146 if (group->owner != owner) { 3147 ret = -EPERM; 3148 goto unlock_out; 3149 } 3150 group->owner_cnt++; 3151 goto unlock_out; 3152 } 3153 3154 ret = __iommu_take_dma_ownership(group, owner); 3155 unlock_out: 3156 mutex_unlock(&group->mutex); 3157 iommu_group_put(group); 3158 3159 return ret; 3160 } 3161 EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); 3162 3163 static void __iommu_release_dma_ownership(struct iommu_group *group) 3164 { 3165 if (WARN_ON(!group->owner_cnt || !group->owner || 3166 !xa_empty(&group->pasid_array))) 3167 return; 3168 3169 group->owner_cnt = 0; 3170 group->owner = NULL; 3171 __iommu_group_set_domain_nofail(group, group->default_domain); 3172 } 3173 3174 /** 3175 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3176 * @dev: The device 3177 * 3178 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3179 */ 3180 void iommu_group_release_dma_owner(struct iommu_group *group) 3181 { 3182 mutex_lock(&group->mutex); 3183 __iommu_release_dma_ownership(group); 3184 mutex_unlock(&group->mutex); 3185 } 3186 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); 3187 3188 /** 3189 * iommu_device_release_dma_owner() - Release DMA ownership of a device 3190 * @group: The device. 3191 * 3192 * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). 3193 */ 3194 void iommu_device_release_dma_owner(struct device *dev) 3195 { 3196 struct iommu_group *group = iommu_group_get(dev); 3197 3198 mutex_lock(&group->mutex); 3199 if (group->owner_cnt > 1) 3200 group->owner_cnt--; 3201 else 3202 __iommu_release_dma_ownership(group); 3203 mutex_unlock(&group->mutex); 3204 iommu_group_put(group); 3205 } 3206 EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); 3207 3208 /** 3209 * iommu_group_dma_owner_claimed() - Query group dma ownership status 3210 * @group: The group. 3211 * 3212 * This provides status query on a given group. It is racy and only for 3213 * non-binding status reporting. 3214 */ 3215 bool iommu_group_dma_owner_claimed(struct iommu_group *group) 3216 { 3217 unsigned int user; 3218 3219 mutex_lock(&group->mutex); 3220 user = group->owner_cnt; 3221 mutex_unlock(&group->mutex); 3222 3223 return user; 3224 } 3225 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); 3226 3227 static int __iommu_set_group_pasid(struct iommu_domain *domain, 3228 struct iommu_group *group, ioasid_t pasid) 3229 { 3230 struct group_device *device; 3231 int ret = 0; 3232 3233 for_each_group_device(group, device) { 3234 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); 3235 if (ret) 3236 break; 3237 } 3238 3239 return ret; 3240 } 3241 3242 static void __iommu_remove_group_pasid(struct iommu_group *group, 3243 ioasid_t pasid) 3244 { 3245 struct group_device *device; 3246 const struct iommu_ops *ops; 3247 3248 for_each_group_device(group, device) { 3249 ops = dev_iommu_ops(device->dev); 3250 ops->remove_dev_pasid(device->dev, pasid); 3251 } 3252 } 3253 3254 /* 3255 * iommu_attach_device_pasid() - Attach a domain to pasid of device 3256 * @domain: the iommu domain. 3257 * @dev: the attached device. 3258 * @pasid: the pasid of the device. 3259 * 3260 * Return: 0 on success, or an error. 3261 */ 3262 int iommu_attach_device_pasid(struct iommu_domain *domain, 3263 struct device *dev, ioasid_t pasid) 3264 { 3265 struct iommu_group *group; 3266 void *curr; 3267 int ret; 3268 3269 if (!domain->ops->set_dev_pasid) 3270 return -EOPNOTSUPP; 3271 3272 group = iommu_group_get(dev); 3273 if (!group) 3274 return -ENODEV; 3275 3276 mutex_lock(&group->mutex); 3277 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); 3278 if (curr) { 3279 ret = xa_err(curr) ? : -EBUSY; 3280 goto out_unlock; 3281 } 3282 3283 ret = __iommu_set_group_pasid(domain, group, pasid); 3284 if (ret) { 3285 __iommu_remove_group_pasid(group, pasid); 3286 xa_erase(&group->pasid_array, pasid); 3287 } 3288 out_unlock: 3289 mutex_unlock(&group->mutex); 3290 iommu_group_put(group); 3291 3292 return ret; 3293 } 3294 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); 3295 3296 /* 3297 * iommu_detach_device_pasid() - Detach the domain from pasid of device 3298 * @domain: the iommu domain. 3299 * @dev: the attached device. 3300 * @pasid: the pasid of the device. 3301 * 3302 * The @domain must have been attached to @pasid of the @dev with 3303 * iommu_attach_device_pasid(). 3304 */ 3305 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, 3306 ioasid_t pasid) 3307 { 3308 struct iommu_group *group = iommu_group_get(dev); 3309 3310 mutex_lock(&group->mutex); 3311 __iommu_remove_group_pasid(group, pasid); 3312 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); 3313 mutex_unlock(&group->mutex); 3314 3315 iommu_group_put(group); 3316 } 3317 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); 3318 3319 /* 3320 * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev 3321 * @dev: the queried device 3322 * @pasid: the pasid of the device 3323 * @type: matched domain type, 0 for any match 3324 * 3325 * This is a variant of iommu_get_domain_for_dev(). It returns the existing 3326 * domain attached to pasid of a device. Callers must hold a lock around this 3327 * function, and both iommu_attach/detach_dev_pasid() whenever a domain of 3328 * type is being manipulated. This API does not internally resolve races with 3329 * attach/detach. 3330 * 3331 * Return: attached domain on success, NULL otherwise. 3332 */ 3333 struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, 3334 ioasid_t pasid, 3335 unsigned int type) 3336 { 3337 struct iommu_domain *domain; 3338 struct iommu_group *group; 3339 3340 group = iommu_group_get(dev); 3341 if (!group) 3342 return NULL; 3343 3344 xa_lock(&group->pasid_array); 3345 domain = xa_load(&group->pasid_array, pasid); 3346 if (type && domain && domain->type != type) 3347 domain = ERR_PTR(-EBUSY); 3348 xa_unlock(&group->pasid_array); 3349 iommu_group_put(group); 3350 3351 return domain; 3352 } 3353 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); 3354 3355 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 3356 struct mm_struct *mm) 3357 { 3358 const struct iommu_ops *ops = dev_iommu_ops(dev); 3359 struct iommu_domain *domain; 3360 3361 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); 3362 if (!domain) 3363 return NULL; 3364 3365 domain->type = IOMMU_DOMAIN_SVA; 3366 mmgrab(mm); 3367 domain->mm = mm; 3368 domain->iopf_handler = iommu_sva_handle_iopf; 3369 domain->fault_data = mm; 3370 3371 return domain; 3372 } 3373