1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/amba/bus.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/bits.h> 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/host1x_context_bus.h> 20 #include <linux/iommu.h> 21 #include <linux/idr.h> 22 #include <linux/err.h> 23 #include <linux/pci.h> 24 #include <linux/pci-ats.h> 25 #include <linux/bitops.h> 26 #include <linux/platform_device.h> 27 #include <linux/property.h> 28 #include <linux/fsl/mc.h> 29 #include <linux/module.h> 30 #include <linux/cc_platform.h> 31 #include <linux/cdx/cdx_bus.h> 32 #include <trace/events/iommu.h> 33 #include <linux/sched/mm.h> 34 #include <linux/msi.h> 35 36 #include "dma-iommu.h" 37 38 #include "iommu-sva.h" 39 40 static struct kset *iommu_group_kset; 41 static DEFINE_IDA(iommu_group_ida); 42 43 static unsigned int iommu_def_domain_type __read_mostly; 44 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); 45 static u32 iommu_cmd_line __read_mostly; 46 47 struct iommu_group { 48 struct kobject kobj; 49 struct kobject *devices_kobj; 50 struct list_head devices; 51 struct xarray pasid_array; 52 struct mutex mutex; 53 void *iommu_data; 54 void (*iommu_data_release)(void *iommu_data); 55 char *name; 56 int id; 57 struct iommu_domain *default_domain; 58 struct iommu_domain *blocking_domain; 59 struct iommu_domain *domain; 60 struct list_head entry; 61 unsigned int owner_cnt; 62 void *owner; 63 }; 64 65 struct group_device { 66 struct list_head list; 67 struct device *dev; 68 char *name; 69 }; 70 71 /* Iterate over each struct group_device in a struct iommu_group */ 72 #define for_each_group_device(group, pos) \ 73 list_for_each_entry(pos, &(group)->devices, list) 74 75 struct iommu_group_attribute { 76 struct attribute attr; 77 ssize_t (*show)(struct iommu_group *group, char *buf); 78 ssize_t (*store)(struct iommu_group *group, 79 const char *buf, size_t count); 80 }; 81 82 static const char * const iommu_group_resv_type_string[] = { 83 [IOMMU_RESV_DIRECT] = "direct", 84 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 85 [IOMMU_RESV_RESERVED] = "reserved", 86 [IOMMU_RESV_MSI] = "msi", 87 [IOMMU_RESV_SW_MSI] = "msi", 88 }; 89 90 #define IOMMU_CMD_LINE_DMA_API BIT(0) 91 #define IOMMU_CMD_LINE_STRICT BIT(1) 92 93 static int iommu_bus_notifier(struct notifier_block *nb, 94 unsigned long action, void *data); 95 static void iommu_release_device(struct device *dev); 96 static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus, 97 unsigned type); 98 static int __iommu_attach_device(struct iommu_domain *domain, 99 struct device *dev); 100 static int __iommu_attach_group(struct iommu_domain *domain, 101 struct iommu_group *group); 102 103 enum { 104 IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0, 105 }; 106 107 static int __iommu_device_set_domain(struct iommu_group *group, 108 struct device *dev, 109 struct iommu_domain *new_domain, 110 unsigned int flags); 111 static int __iommu_group_set_domain_internal(struct iommu_group *group, 112 struct iommu_domain *new_domain, 113 unsigned int flags); 114 static int __iommu_group_set_domain(struct iommu_group *group, 115 struct iommu_domain *new_domain) 116 { 117 return __iommu_group_set_domain_internal(group, new_domain, 0); 118 } 119 static void __iommu_group_set_domain_nofail(struct iommu_group *group, 120 struct iommu_domain *new_domain) 121 { 122 WARN_ON(__iommu_group_set_domain_internal( 123 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); 124 } 125 126 static int iommu_setup_default_domain(struct iommu_group *group, 127 int target_type); 128 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 129 struct device *dev); 130 static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 131 static ssize_t iommu_group_store_type(struct iommu_group *group, 132 const char *buf, size_t count); 133 134 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 135 struct iommu_group_attribute iommu_group_attr_##_name = \ 136 __ATTR(_name, _mode, _show, _store) 137 138 #define to_iommu_group_attr(_attr) \ 139 container_of(_attr, struct iommu_group_attribute, attr) 140 #define to_iommu_group(_kobj) \ 141 container_of(_kobj, struct iommu_group, kobj) 142 143 static LIST_HEAD(iommu_device_list); 144 static DEFINE_SPINLOCK(iommu_device_lock); 145 146 static struct bus_type * const iommu_buses[] = { 147 &platform_bus_type, 148 #ifdef CONFIG_PCI 149 &pci_bus_type, 150 #endif 151 #ifdef CONFIG_ARM_AMBA 152 &amba_bustype, 153 #endif 154 #ifdef CONFIG_FSL_MC_BUS 155 &fsl_mc_bus_type, 156 #endif 157 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS 158 &host1x_context_device_bus_type, 159 #endif 160 #ifdef CONFIG_CDX_BUS 161 &cdx_bus_type, 162 #endif 163 }; 164 165 /* 166 * Use a function instead of an array here because the domain-type is a 167 * bit-field, so an array would waste memory. 168 */ 169 static const char *iommu_domain_type_str(unsigned int t) 170 { 171 switch (t) { 172 case IOMMU_DOMAIN_BLOCKED: 173 return "Blocked"; 174 case IOMMU_DOMAIN_IDENTITY: 175 return "Passthrough"; 176 case IOMMU_DOMAIN_UNMANAGED: 177 return "Unmanaged"; 178 case IOMMU_DOMAIN_DMA: 179 case IOMMU_DOMAIN_DMA_FQ: 180 return "Translated"; 181 default: 182 return "Unknown"; 183 } 184 } 185 186 static int __init iommu_subsys_init(void) 187 { 188 struct notifier_block *nb; 189 190 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 191 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 192 iommu_set_default_passthrough(false); 193 else 194 iommu_set_default_translated(false); 195 196 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 197 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 198 iommu_set_default_translated(false); 199 } 200 } 201 202 if (!iommu_default_passthrough() && !iommu_dma_strict) 203 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; 204 205 pr_info("Default domain type: %s%s\n", 206 iommu_domain_type_str(iommu_def_domain_type), 207 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 208 " (set via kernel command line)" : ""); 209 210 if (!iommu_default_passthrough()) 211 pr_info("DMA domain TLB invalidation policy: %s mode%s\n", 212 iommu_dma_strict ? "strict" : "lazy", 213 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? 214 " (set via kernel command line)" : ""); 215 216 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); 217 if (!nb) 218 return -ENOMEM; 219 220 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { 221 nb[i].notifier_call = iommu_bus_notifier; 222 bus_register_notifier(iommu_buses[i], &nb[i]); 223 } 224 225 return 0; 226 } 227 subsys_initcall(iommu_subsys_init); 228 229 static int remove_iommu_group(struct device *dev, void *data) 230 { 231 if (dev->iommu && dev->iommu->iommu_dev == data) 232 iommu_release_device(dev); 233 234 return 0; 235 } 236 237 /** 238 * iommu_device_register() - Register an IOMMU hardware instance 239 * @iommu: IOMMU handle for the instance 240 * @ops: IOMMU ops to associate with the instance 241 * @hwdev: (optional) actual instance device, used for fwnode lookup 242 * 243 * Return: 0 on success, or an error. 244 */ 245 int iommu_device_register(struct iommu_device *iommu, 246 const struct iommu_ops *ops, struct device *hwdev) 247 { 248 int err = 0; 249 250 /* We need to be able to take module references appropriately */ 251 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 252 return -EINVAL; 253 /* 254 * Temporarily enforce global restriction to a single driver. This was 255 * already the de-facto behaviour, since any possible combination of 256 * existing drivers would compete for at least the PCI or platform bus. 257 */ 258 if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops) 259 return -EBUSY; 260 261 iommu->ops = ops; 262 if (hwdev) 263 iommu->fwnode = dev_fwnode(hwdev); 264 265 spin_lock(&iommu_device_lock); 266 list_add_tail(&iommu->list, &iommu_device_list); 267 spin_unlock(&iommu_device_lock); 268 269 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) { 270 iommu_buses[i]->iommu_ops = ops; 271 err = bus_iommu_probe(iommu_buses[i]); 272 } 273 if (err) 274 iommu_device_unregister(iommu); 275 return err; 276 } 277 EXPORT_SYMBOL_GPL(iommu_device_register); 278 279 void iommu_device_unregister(struct iommu_device *iommu) 280 { 281 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) 282 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); 283 284 spin_lock(&iommu_device_lock); 285 list_del(&iommu->list); 286 spin_unlock(&iommu_device_lock); 287 } 288 EXPORT_SYMBOL_GPL(iommu_device_unregister); 289 290 static struct dev_iommu *dev_iommu_get(struct device *dev) 291 { 292 struct dev_iommu *param = dev->iommu; 293 294 if (param) 295 return param; 296 297 param = kzalloc(sizeof(*param), GFP_KERNEL); 298 if (!param) 299 return NULL; 300 301 mutex_init(¶m->lock); 302 dev->iommu = param; 303 return param; 304 } 305 306 static void dev_iommu_free(struct device *dev) 307 { 308 struct dev_iommu *param = dev->iommu; 309 310 dev->iommu = NULL; 311 if (param->fwspec) { 312 fwnode_handle_put(param->fwspec->iommu_fwnode); 313 kfree(param->fwspec); 314 } 315 kfree(param); 316 } 317 318 static u32 dev_iommu_get_max_pasids(struct device *dev) 319 { 320 u32 max_pasids = 0, bits = 0; 321 int ret; 322 323 if (dev_is_pci(dev)) { 324 ret = pci_max_pasids(to_pci_dev(dev)); 325 if (ret > 0) 326 max_pasids = ret; 327 } else { 328 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); 329 if (!ret) 330 max_pasids = 1UL << bits; 331 } 332 333 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); 334 } 335 336 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 337 { 338 const struct iommu_ops *ops = dev->bus->iommu_ops; 339 struct iommu_device *iommu_dev; 340 struct iommu_group *group; 341 static DEFINE_MUTEX(iommu_probe_device_lock); 342 int ret; 343 344 if (!ops) 345 return -ENODEV; 346 /* 347 * Serialise to avoid races between IOMMU drivers registering in 348 * parallel and/or the "replay" calls from ACPI/OF code via client 349 * driver probe. Once the latter have been cleaned up we should 350 * probably be able to use device_lock() here to minimise the scope, 351 * but for now enforcing a simple global ordering is fine. 352 */ 353 mutex_lock(&iommu_probe_device_lock); 354 if (!dev_iommu_get(dev)) { 355 ret = -ENOMEM; 356 goto err_unlock; 357 } 358 359 if (!try_module_get(ops->owner)) { 360 ret = -EINVAL; 361 goto err_free; 362 } 363 364 iommu_dev = ops->probe_device(dev); 365 if (IS_ERR(iommu_dev)) { 366 ret = PTR_ERR(iommu_dev); 367 goto out_module_put; 368 } 369 370 dev->iommu->iommu_dev = iommu_dev; 371 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 372 if (ops->is_attach_deferred) 373 dev->iommu->attach_deferred = ops->is_attach_deferred(dev); 374 375 group = iommu_group_get_for_dev(dev); 376 if (IS_ERR(group)) { 377 ret = PTR_ERR(group); 378 goto out_release; 379 } 380 381 mutex_lock(&group->mutex); 382 if (group_list && !group->default_domain && list_empty(&group->entry)) 383 list_add_tail(&group->entry, group_list); 384 mutex_unlock(&group->mutex); 385 iommu_group_put(group); 386 387 mutex_unlock(&iommu_probe_device_lock); 388 iommu_device_link(iommu_dev, dev); 389 390 return 0; 391 392 out_release: 393 if (ops->release_device) 394 ops->release_device(dev); 395 396 out_module_put: 397 module_put(ops->owner); 398 399 err_free: 400 dev_iommu_free(dev); 401 402 err_unlock: 403 mutex_unlock(&iommu_probe_device_lock); 404 405 return ret; 406 } 407 408 int iommu_probe_device(struct device *dev) 409 { 410 const struct iommu_ops *ops; 411 struct iommu_group *group; 412 int ret; 413 414 ret = __iommu_probe_device(dev, NULL); 415 if (ret) 416 goto err_out; 417 418 group = iommu_group_get(dev); 419 if (!group) { 420 ret = -ENODEV; 421 goto err_release; 422 } 423 424 mutex_lock(&group->mutex); 425 426 if (group->default_domain) 427 iommu_create_device_direct_mappings(group->default_domain, dev); 428 429 if (group->domain) { 430 ret = __iommu_device_set_domain(group, dev, group->domain, 0); 431 if (ret) 432 goto err_unlock; 433 } else if (!group->default_domain) { 434 ret = iommu_setup_default_domain(group, 0); 435 if (ret) 436 goto err_unlock; 437 } 438 439 mutex_unlock(&group->mutex); 440 iommu_group_put(group); 441 442 ops = dev_iommu_ops(dev); 443 if (ops->probe_finalize) 444 ops->probe_finalize(dev); 445 446 return 0; 447 448 err_unlock: 449 mutex_unlock(&group->mutex); 450 iommu_group_put(group); 451 err_release: 452 iommu_release_device(dev); 453 454 err_out: 455 return ret; 456 457 } 458 459 /* 460 * Remove a device from a group's device list and return the group device 461 * if successful. 462 */ 463 static struct group_device * 464 __iommu_group_remove_device(struct iommu_group *group, struct device *dev) 465 { 466 struct group_device *device; 467 468 lockdep_assert_held(&group->mutex); 469 for_each_group_device(group, device) { 470 if (device->dev == dev) { 471 list_del(&device->list); 472 return device; 473 } 474 } 475 476 return NULL; 477 } 478 479 /* 480 * Release a device from its group and decrements the iommu group reference 481 * count. 482 */ 483 static void __iommu_group_release_device(struct iommu_group *group, 484 struct group_device *grp_dev) 485 { 486 struct device *dev = grp_dev->dev; 487 488 sysfs_remove_link(group->devices_kobj, grp_dev->name); 489 sysfs_remove_link(&dev->kobj, "iommu_group"); 490 491 trace_remove_device_from_group(group->id, dev); 492 493 kfree(grp_dev->name); 494 kfree(grp_dev); 495 dev->iommu_group = NULL; 496 kobject_put(group->devices_kobj); 497 } 498 499 static void iommu_release_device(struct device *dev) 500 { 501 struct iommu_group *group = dev->iommu_group; 502 struct group_device *device; 503 const struct iommu_ops *ops; 504 505 if (!dev->iommu || !group) 506 return; 507 508 iommu_device_unlink(dev->iommu->iommu_dev, dev); 509 510 mutex_lock(&group->mutex); 511 device = __iommu_group_remove_device(group, dev); 512 513 /* 514 * If the group has become empty then ownership must have been released, 515 * and the current domain must be set back to NULL or the default 516 * domain. 517 */ 518 if (list_empty(&group->devices)) 519 WARN_ON(group->owner_cnt || 520 group->domain != group->default_domain); 521 522 /* 523 * release_device() must stop using any attached domain on the device. 524 * If there are still other devices in the group they are not effected 525 * by this callback. 526 * 527 * The IOMMU driver must set the device to either an identity or 528 * blocking translation and stop using any domain pointer, as it is 529 * going to be freed. 530 */ 531 ops = dev_iommu_ops(dev); 532 if (ops->release_device) 533 ops->release_device(dev); 534 mutex_unlock(&group->mutex); 535 536 if (device) 537 __iommu_group_release_device(group, device); 538 539 module_put(ops->owner); 540 dev_iommu_free(dev); 541 } 542 543 static int __init iommu_set_def_domain_type(char *str) 544 { 545 bool pt; 546 int ret; 547 548 ret = kstrtobool(str, &pt); 549 if (ret) 550 return ret; 551 552 if (pt) 553 iommu_set_default_passthrough(true); 554 else 555 iommu_set_default_translated(true); 556 557 return 0; 558 } 559 early_param("iommu.passthrough", iommu_set_def_domain_type); 560 561 static int __init iommu_dma_setup(char *str) 562 { 563 int ret = kstrtobool(str, &iommu_dma_strict); 564 565 if (!ret) 566 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 567 return ret; 568 } 569 early_param("iommu.strict", iommu_dma_setup); 570 571 void iommu_set_dma_strict(void) 572 { 573 iommu_dma_strict = true; 574 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) 575 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 576 } 577 578 static ssize_t iommu_group_attr_show(struct kobject *kobj, 579 struct attribute *__attr, char *buf) 580 { 581 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 582 struct iommu_group *group = to_iommu_group(kobj); 583 ssize_t ret = -EIO; 584 585 if (attr->show) 586 ret = attr->show(group, buf); 587 return ret; 588 } 589 590 static ssize_t iommu_group_attr_store(struct kobject *kobj, 591 struct attribute *__attr, 592 const char *buf, size_t count) 593 { 594 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 595 struct iommu_group *group = to_iommu_group(kobj); 596 ssize_t ret = -EIO; 597 598 if (attr->store) 599 ret = attr->store(group, buf, count); 600 return ret; 601 } 602 603 static const struct sysfs_ops iommu_group_sysfs_ops = { 604 .show = iommu_group_attr_show, 605 .store = iommu_group_attr_store, 606 }; 607 608 static int iommu_group_create_file(struct iommu_group *group, 609 struct iommu_group_attribute *attr) 610 { 611 return sysfs_create_file(&group->kobj, &attr->attr); 612 } 613 614 static void iommu_group_remove_file(struct iommu_group *group, 615 struct iommu_group_attribute *attr) 616 { 617 sysfs_remove_file(&group->kobj, &attr->attr); 618 } 619 620 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 621 { 622 return sysfs_emit(buf, "%s\n", group->name); 623 } 624 625 /** 626 * iommu_insert_resv_region - Insert a new region in the 627 * list of reserved regions. 628 * @new: new region to insert 629 * @regions: list of regions 630 * 631 * Elements are sorted by start address and overlapping segments 632 * of the same type are merged. 633 */ 634 static int iommu_insert_resv_region(struct iommu_resv_region *new, 635 struct list_head *regions) 636 { 637 struct iommu_resv_region *iter, *tmp, *nr, *top; 638 LIST_HEAD(stack); 639 640 nr = iommu_alloc_resv_region(new->start, new->length, 641 new->prot, new->type, GFP_KERNEL); 642 if (!nr) 643 return -ENOMEM; 644 645 /* First add the new element based on start address sorting */ 646 list_for_each_entry(iter, regions, list) { 647 if (nr->start < iter->start || 648 (nr->start == iter->start && nr->type <= iter->type)) 649 break; 650 } 651 list_add_tail(&nr->list, &iter->list); 652 653 /* Merge overlapping segments of type nr->type in @regions, if any */ 654 list_for_each_entry_safe(iter, tmp, regions, list) { 655 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 656 657 /* no merge needed on elements of different types than @new */ 658 if (iter->type != new->type) { 659 list_move_tail(&iter->list, &stack); 660 continue; 661 } 662 663 /* look for the last stack element of same type as @iter */ 664 list_for_each_entry_reverse(top, &stack, list) 665 if (top->type == iter->type) 666 goto check_overlap; 667 668 list_move_tail(&iter->list, &stack); 669 continue; 670 671 check_overlap: 672 top_end = top->start + top->length - 1; 673 674 if (iter->start > top_end + 1) { 675 list_move_tail(&iter->list, &stack); 676 } else { 677 top->length = max(top_end, iter_end) - top->start + 1; 678 list_del(&iter->list); 679 kfree(iter); 680 } 681 } 682 list_splice(&stack, regions); 683 return 0; 684 } 685 686 static int 687 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 688 struct list_head *group_resv_regions) 689 { 690 struct iommu_resv_region *entry; 691 int ret = 0; 692 693 list_for_each_entry(entry, dev_resv_regions, list) { 694 ret = iommu_insert_resv_region(entry, group_resv_regions); 695 if (ret) 696 break; 697 } 698 return ret; 699 } 700 701 int iommu_get_group_resv_regions(struct iommu_group *group, 702 struct list_head *head) 703 { 704 struct group_device *device; 705 int ret = 0; 706 707 mutex_lock(&group->mutex); 708 for_each_group_device(group, device) { 709 struct list_head dev_resv_regions; 710 711 /* 712 * Non-API groups still expose reserved_regions in sysfs, 713 * so filter out calls that get here that way. 714 */ 715 if (!device->dev->iommu) 716 break; 717 718 INIT_LIST_HEAD(&dev_resv_regions); 719 iommu_get_resv_regions(device->dev, &dev_resv_regions); 720 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 721 iommu_put_resv_regions(device->dev, &dev_resv_regions); 722 if (ret) 723 break; 724 } 725 mutex_unlock(&group->mutex); 726 return ret; 727 } 728 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 729 730 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 731 char *buf) 732 { 733 struct iommu_resv_region *region, *next; 734 struct list_head group_resv_regions; 735 int offset = 0; 736 737 INIT_LIST_HEAD(&group_resv_regions); 738 iommu_get_group_resv_regions(group, &group_resv_regions); 739 740 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 741 offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n", 742 (long long)region->start, 743 (long long)(region->start + 744 region->length - 1), 745 iommu_group_resv_type_string[region->type]); 746 kfree(region); 747 } 748 749 return offset; 750 } 751 752 static ssize_t iommu_group_show_type(struct iommu_group *group, 753 char *buf) 754 { 755 char *type = "unknown"; 756 757 mutex_lock(&group->mutex); 758 if (group->default_domain) { 759 switch (group->default_domain->type) { 760 case IOMMU_DOMAIN_BLOCKED: 761 type = "blocked"; 762 break; 763 case IOMMU_DOMAIN_IDENTITY: 764 type = "identity"; 765 break; 766 case IOMMU_DOMAIN_UNMANAGED: 767 type = "unmanaged"; 768 break; 769 case IOMMU_DOMAIN_DMA: 770 type = "DMA"; 771 break; 772 case IOMMU_DOMAIN_DMA_FQ: 773 type = "DMA-FQ"; 774 break; 775 } 776 } 777 mutex_unlock(&group->mutex); 778 779 return sysfs_emit(buf, "%s\n", type); 780 } 781 782 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 783 784 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 785 iommu_group_show_resv_regions, NULL); 786 787 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 788 iommu_group_store_type); 789 790 static void iommu_group_release(struct kobject *kobj) 791 { 792 struct iommu_group *group = to_iommu_group(kobj); 793 794 pr_debug("Releasing group %d\n", group->id); 795 796 if (group->iommu_data_release) 797 group->iommu_data_release(group->iommu_data); 798 799 ida_free(&iommu_group_ida, group->id); 800 801 if (group->default_domain) 802 iommu_domain_free(group->default_domain); 803 if (group->blocking_domain) 804 iommu_domain_free(group->blocking_domain); 805 806 kfree(group->name); 807 kfree(group); 808 } 809 810 static const struct kobj_type iommu_group_ktype = { 811 .sysfs_ops = &iommu_group_sysfs_ops, 812 .release = iommu_group_release, 813 }; 814 815 /** 816 * iommu_group_alloc - Allocate a new group 817 * 818 * This function is called by an iommu driver to allocate a new iommu 819 * group. The iommu group represents the minimum granularity of the iommu. 820 * Upon successful return, the caller holds a reference to the supplied 821 * group in order to hold the group until devices are added. Use 822 * iommu_group_put() to release this extra reference count, allowing the 823 * group to be automatically reclaimed once it has no devices or external 824 * references. 825 */ 826 struct iommu_group *iommu_group_alloc(void) 827 { 828 struct iommu_group *group; 829 int ret; 830 831 group = kzalloc(sizeof(*group), GFP_KERNEL); 832 if (!group) 833 return ERR_PTR(-ENOMEM); 834 835 group->kobj.kset = iommu_group_kset; 836 mutex_init(&group->mutex); 837 INIT_LIST_HEAD(&group->devices); 838 INIT_LIST_HEAD(&group->entry); 839 xa_init(&group->pasid_array); 840 841 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); 842 if (ret < 0) { 843 kfree(group); 844 return ERR_PTR(ret); 845 } 846 group->id = ret; 847 848 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 849 NULL, "%d", group->id); 850 if (ret) { 851 kobject_put(&group->kobj); 852 return ERR_PTR(ret); 853 } 854 855 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 856 if (!group->devices_kobj) { 857 kobject_put(&group->kobj); /* triggers .release & free */ 858 return ERR_PTR(-ENOMEM); 859 } 860 861 /* 862 * The devices_kobj holds a reference on the group kobject, so 863 * as long as that exists so will the group. We can therefore 864 * use the devices_kobj for reference counting. 865 */ 866 kobject_put(&group->kobj); 867 868 ret = iommu_group_create_file(group, 869 &iommu_group_attr_reserved_regions); 870 if (ret) { 871 kobject_put(group->devices_kobj); 872 return ERR_PTR(ret); 873 } 874 875 ret = iommu_group_create_file(group, &iommu_group_attr_type); 876 if (ret) { 877 kobject_put(group->devices_kobj); 878 return ERR_PTR(ret); 879 } 880 881 pr_debug("Allocated group %d\n", group->id); 882 883 return group; 884 } 885 EXPORT_SYMBOL_GPL(iommu_group_alloc); 886 887 /** 888 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 889 * @group: the group 890 * 891 * iommu drivers can store data in the group for use when doing iommu 892 * operations. This function provides a way to retrieve it. Caller 893 * should hold a group reference. 894 */ 895 void *iommu_group_get_iommudata(struct iommu_group *group) 896 { 897 return group->iommu_data; 898 } 899 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 900 901 /** 902 * iommu_group_set_iommudata - set iommu_data for a group 903 * @group: the group 904 * @iommu_data: new data 905 * @release: release function for iommu_data 906 * 907 * iommu drivers can store data in the group for use when doing iommu 908 * operations. This function provides a way to set the data after 909 * the group has been allocated. Caller should hold a group reference. 910 */ 911 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 912 void (*release)(void *iommu_data)) 913 { 914 group->iommu_data = iommu_data; 915 group->iommu_data_release = release; 916 } 917 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 918 919 /** 920 * iommu_group_set_name - set name for a group 921 * @group: the group 922 * @name: name 923 * 924 * Allow iommu driver to set a name for a group. When set it will 925 * appear in a name attribute file under the group in sysfs. 926 */ 927 int iommu_group_set_name(struct iommu_group *group, const char *name) 928 { 929 int ret; 930 931 if (group->name) { 932 iommu_group_remove_file(group, &iommu_group_attr_name); 933 kfree(group->name); 934 group->name = NULL; 935 if (!name) 936 return 0; 937 } 938 939 group->name = kstrdup(name, GFP_KERNEL); 940 if (!group->name) 941 return -ENOMEM; 942 943 ret = iommu_group_create_file(group, &iommu_group_attr_name); 944 if (ret) { 945 kfree(group->name); 946 group->name = NULL; 947 return ret; 948 } 949 950 return 0; 951 } 952 EXPORT_SYMBOL_GPL(iommu_group_set_name); 953 954 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 955 struct device *dev) 956 { 957 struct iommu_resv_region *entry; 958 struct list_head mappings; 959 unsigned long pg_size; 960 int ret = 0; 961 962 if (!iommu_is_dma_domain(domain)) 963 return 0; 964 965 BUG_ON(!domain->pgsize_bitmap); 966 967 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 968 INIT_LIST_HEAD(&mappings); 969 970 iommu_get_resv_regions(dev, &mappings); 971 972 /* We need to consider overlapping regions for different devices */ 973 list_for_each_entry(entry, &mappings, list) { 974 dma_addr_t start, end, addr; 975 size_t map_size = 0; 976 977 start = ALIGN(entry->start, pg_size); 978 end = ALIGN(entry->start + entry->length, pg_size); 979 980 if (entry->type != IOMMU_RESV_DIRECT && 981 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 982 continue; 983 984 for (addr = start; addr <= end; addr += pg_size) { 985 phys_addr_t phys_addr; 986 987 if (addr == end) 988 goto map_end; 989 990 phys_addr = iommu_iova_to_phys(domain, addr); 991 if (!phys_addr) { 992 map_size += pg_size; 993 continue; 994 } 995 996 map_end: 997 if (map_size) { 998 ret = iommu_map(domain, addr - map_size, 999 addr - map_size, map_size, 1000 entry->prot, GFP_KERNEL); 1001 if (ret) 1002 goto out; 1003 map_size = 0; 1004 } 1005 } 1006 1007 } 1008 1009 iommu_flush_iotlb_all(domain); 1010 1011 out: 1012 iommu_put_resv_regions(dev, &mappings); 1013 1014 return ret; 1015 } 1016 1017 /** 1018 * iommu_group_add_device - add a device to an iommu group 1019 * @group: the group into which to add the device (reference should be held) 1020 * @dev: the device 1021 * 1022 * This function is called by an iommu driver to add a device into a 1023 * group. Adding a device increments the group reference count. 1024 */ 1025 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 1026 { 1027 int ret, i = 0; 1028 struct group_device *device; 1029 1030 device = kzalloc(sizeof(*device), GFP_KERNEL); 1031 if (!device) 1032 return -ENOMEM; 1033 1034 device->dev = dev; 1035 1036 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 1037 if (ret) 1038 goto err_free_device; 1039 1040 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 1041 rename: 1042 if (!device->name) { 1043 ret = -ENOMEM; 1044 goto err_remove_link; 1045 } 1046 1047 ret = sysfs_create_link_nowarn(group->devices_kobj, 1048 &dev->kobj, device->name); 1049 if (ret) { 1050 if (ret == -EEXIST && i >= 0) { 1051 /* 1052 * Account for the slim chance of collision 1053 * and append an instance to the name. 1054 */ 1055 kfree(device->name); 1056 device->name = kasprintf(GFP_KERNEL, "%s.%d", 1057 kobject_name(&dev->kobj), i++); 1058 goto rename; 1059 } 1060 goto err_free_name; 1061 } 1062 1063 kobject_get(group->devices_kobj); 1064 1065 dev->iommu_group = group; 1066 1067 mutex_lock(&group->mutex); 1068 list_add_tail(&device->list, &group->devices); 1069 mutex_unlock(&group->mutex); 1070 trace_add_device_to_group(group->id, dev); 1071 1072 dev_info(dev, "Adding to iommu group %d\n", group->id); 1073 1074 return 0; 1075 1076 err_free_name: 1077 kfree(device->name); 1078 err_remove_link: 1079 sysfs_remove_link(&dev->kobj, "iommu_group"); 1080 err_free_device: 1081 kfree(device); 1082 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 1083 return ret; 1084 } 1085 EXPORT_SYMBOL_GPL(iommu_group_add_device); 1086 1087 /** 1088 * iommu_group_remove_device - remove a device from it's current group 1089 * @dev: device to be removed 1090 * 1091 * This function is called by an iommu driver to remove the device from 1092 * it's current group. This decrements the iommu group reference count. 1093 */ 1094 void iommu_group_remove_device(struct device *dev) 1095 { 1096 struct iommu_group *group = dev->iommu_group; 1097 struct group_device *device; 1098 1099 if (!group) 1100 return; 1101 1102 dev_info(dev, "Removing from iommu group %d\n", group->id); 1103 1104 mutex_lock(&group->mutex); 1105 device = __iommu_group_remove_device(group, dev); 1106 mutex_unlock(&group->mutex); 1107 1108 if (device) 1109 __iommu_group_release_device(group, device); 1110 } 1111 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 1112 1113 /** 1114 * iommu_group_for_each_dev - iterate over each device in the group 1115 * @group: the group 1116 * @data: caller opaque data to be passed to callback function 1117 * @fn: caller supplied callback function 1118 * 1119 * This function is called by group users to iterate over group devices. 1120 * Callers should hold a reference count to the group during callback. 1121 * The group->mutex is held across callbacks, which will block calls to 1122 * iommu_group_add/remove_device. 1123 */ 1124 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 1125 int (*fn)(struct device *, void *)) 1126 { 1127 struct group_device *device; 1128 int ret = 0; 1129 1130 mutex_lock(&group->mutex); 1131 for_each_group_device(group, device) { 1132 ret = fn(device->dev, data); 1133 if (ret) 1134 break; 1135 } 1136 mutex_unlock(&group->mutex); 1137 1138 return ret; 1139 } 1140 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1141 1142 /** 1143 * iommu_group_get - Return the group for a device and increment reference 1144 * @dev: get the group that this device belongs to 1145 * 1146 * This function is called by iommu drivers and users to get the group 1147 * for the specified device. If found, the group is returned and the group 1148 * reference in incremented, else NULL. 1149 */ 1150 struct iommu_group *iommu_group_get(struct device *dev) 1151 { 1152 struct iommu_group *group = dev->iommu_group; 1153 1154 if (group) 1155 kobject_get(group->devices_kobj); 1156 1157 return group; 1158 } 1159 EXPORT_SYMBOL_GPL(iommu_group_get); 1160 1161 /** 1162 * iommu_group_ref_get - Increment reference on a group 1163 * @group: the group to use, must not be NULL 1164 * 1165 * This function is called by iommu drivers to take additional references on an 1166 * existing group. Returns the given group for convenience. 1167 */ 1168 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1169 { 1170 kobject_get(group->devices_kobj); 1171 return group; 1172 } 1173 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1174 1175 /** 1176 * iommu_group_put - Decrement group reference 1177 * @group: the group to use 1178 * 1179 * This function is called by iommu drivers and users to release the 1180 * iommu group. Once the reference count is zero, the group is released. 1181 */ 1182 void iommu_group_put(struct iommu_group *group) 1183 { 1184 if (group) 1185 kobject_put(group->devices_kobj); 1186 } 1187 EXPORT_SYMBOL_GPL(iommu_group_put); 1188 1189 /** 1190 * iommu_register_device_fault_handler() - Register a device fault handler 1191 * @dev: the device 1192 * @handler: the fault handler 1193 * @data: private data passed as argument to the handler 1194 * 1195 * When an IOMMU fault event is received, this handler gets called with the 1196 * fault event and data as argument. The handler should return 0 on success. If 1197 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1198 * complete the fault by calling iommu_page_response() with one of the following 1199 * response code: 1200 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1201 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1202 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1203 * page faults if possible. 1204 * 1205 * Return 0 if the fault handler was installed successfully, or an error. 1206 */ 1207 int iommu_register_device_fault_handler(struct device *dev, 1208 iommu_dev_fault_handler_t handler, 1209 void *data) 1210 { 1211 struct dev_iommu *param = dev->iommu; 1212 int ret = 0; 1213 1214 if (!param) 1215 return -EINVAL; 1216 1217 mutex_lock(¶m->lock); 1218 /* Only allow one fault handler registered for each device */ 1219 if (param->fault_param) { 1220 ret = -EBUSY; 1221 goto done_unlock; 1222 } 1223 1224 get_device(dev); 1225 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1226 if (!param->fault_param) { 1227 put_device(dev); 1228 ret = -ENOMEM; 1229 goto done_unlock; 1230 } 1231 param->fault_param->handler = handler; 1232 param->fault_param->data = data; 1233 mutex_init(¶m->fault_param->lock); 1234 INIT_LIST_HEAD(¶m->fault_param->faults); 1235 1236 done_unlock: 1237 mutex_unlock(¶m->lock); 1238 1239 return ret; 1240 } 1241 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1242 1243 /** 1244 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1245 * @dev: the device 1246 * 1247 * Remove the device fault handler installed with 1248 * iommu_register_device_fault_handler(). 1249 * 1250 * Return 0 on success, or an error. 1251 */ 1252 int iommu_unregister_device_fault_handler(struct device *dev) 1253 { 1254 struct dev_iommu *param = dev->iommu; 1255 int ret = 0; 1256 1257 if (!param) 1258 return -EINVAL; 1259 1260 mutex_lock(¶m->lock); 1261 1262 if (!param->fault_param) 1263 goto unlock; 1264 1265 /* we cannot unregister handler if there are pending faults */ 1266 if (!list_empty(¶m->fault_param->faults)) { 1267 ret = -EBUSY; 1268 goto unlock; 1269 } 1270 1271 kfree(param->fault_param); 1272 param->fault_param = NULL; 1273 put_device(dev); 1274 unlock: 1275 mutex_unlock(¶m->lock); 1276 1277 return ret; 1278 } 1279 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1280 1281 /** 1282 * iommu_report_device_fault() - Report fault event to device driver 1283 * @dev: the device 1284 * @evt: fault event data 1285 * 1286 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1287 * handler. When this function fails and the fault is recoverable, it is the 1288 * caller's responsibility to complete the fault. 1289 * 1290 * Return 0 on success, or an error. 1291 */ 1292 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1293 { 1294 struct dev_iommu *param = dev->iommu; 1295 struct iommu_fault_event *evt_pending = NULL; 1296 struct iommu_fault_param *fparam; 1297 int ret = 0; 1298 1299 if (!param || !evt) 1300 return -EINVAL; 1301 1302 /* we only report device fault if there is a handler registered */ 1303 mutex_lock(¶m->lock); 1304 fparam = param->fault_param; 1305 if (!fparam || !fparam->handler) { 1306 ret = -EINVAL; 1307 goto done_unlock; 1308 } 1309 1310 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1311 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1312 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1313 GFP_KERNEL); 1314 if (!evt_pending) { 1315 ret = -ENOMEM; 1316 goto done_unlock; 1317 } 1318 mutex_lock(&fparam->lock); 1319 list_add_tail(&evt_pending->list, &fparam->faults); 1320 mutex_unlock(&fparam->lock); 1321 } 1322 1323 ret = fparam->handler(&evt->fault, fparam->data); 1324 if (ret && evt_pending) { 1325 mutex_lock(&fparam->lock); 1326 list_del(&evt_pending->list); 1327 mutex_unlock(&fparam->lock); 1328 kfree(evt_pending); 1329 } 1330 done_unlock: 1331 mutex_unlock(¶m->lock); 1332 return ret; 1333 } 1334 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1335 1336 int iommu_page_response(struct device *dev, 1337 struct iommu_page_response *msg) 1338 { 1339 bool needs_pasid; 1340 int ret = -EINVAL; 1341 struct iommu_fault_event *evt; 1342 struct iommu_fault_page_request *prm; 1343 struct dev_iommu *param = dev->iommu; 1344 const struct iommu_ops *ops = dev_iommu_ops(dev); 1345 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1346 1347 if (!ops->page_response) 1348 return -ENODEV; 1349 1350 if (!param || !param->fault_param) 1351 return -EINVAL; 1352 1353 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1354 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1355 return -EINVAL; 1356 1357 /* Only send response if there is a fault report pending */ 1358 mutex_lock(¶m->fault_param->lock); 1359 if (list_empty(¶m->fault_param->faults)) { 1360 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1361 goto done_unlock; 1362 } 1363 /* 1364 * Check if we have a matching page request pending to respond, 1365 * otherwise return -EINVAL 1366 */ 1367 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1368 prm = &evt->fault.prm; 1369 if (prm->grpid != msg->grpid) 1370 continue; 1371 1372 /* 1373 * If the PASID is required, the corresponding request is 1374 * matched using the group ID, the PASID valid bit and the PASID 1375 * value. Otherwise only the group ID matches request and 1376 * response. 1377 */ 1378 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1379 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1380 continue; 1381 1382 if (!needs_pasid && has_pasid) { 1383 /* No big deal, just clear it. */ 1384 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1385 msg->pasid = 0; 1386 } 1387 1388 ret = ops->page_response(dev, evt, msg); 1389 list_del(&evt->list); 1390 kfree(evt); 1391 break; 1392 } 1393 1394 done_unlock: 1395 mutex_unlock(¶m->fault_param->lock); 1396 return ret; 1397 } 1398 EXPORT_SYMBOL_GPL(iommu_page_response); 1399 1400 /** 1401 * iommu_group_id - Return ID for a group 1402 * @group: the group to ID 1403 * 1404 * Return the unique ID for the group matching the sysfs group number. 1405 */ 1406 int iommu_group_id(struct iommu_group *group) 1407 { 1408 return group->id; 1409 } 1410 EXPORT_SYMBOL_GPL(iommu_group_id); 1411 1412 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1413 unsigned long *devfns); 1414 1415 /* 1416 * To consider a PCI device isolated, we require ACS to support Source 1417 * Validation, Request Redirection, Completer Redirection, and Upstream 1418 * Forwarding. This effectively means that devices cannot spoof their 1419 * requester ID, requests and completions cannot be redirected, and all 1420 * transactions are forwarded upstream, even as it passes through a 1421 * bridge where the target device is downstream. 1422 */ 1423 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1424 1425 /* 1426 * For multifunction devices which are not isolated from each other, find 1427 * all the other non-isolated functions and look for existing groups. For 1428 * each function, we also need to look for aliases to or from other devices 1429 * that may already have a group. 1430 */ 1431 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1432 unsigned long *devfns) 1433 { 1434 struct pci_dev *tmp = NULL; 1435 struct iommu_group *group; 1436 1437 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1438 return NULL; 1439 1440 for_each_pci_dev(tmp) { 1441 if (tmp == pdev || tmp->bus != pdev->bus || 1442 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1443 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1444 continue; 1445 1446 group = get_pci_alias_group(tmp, devfns); 1447 if (group) { 1448 pci_dev_put(tmp); 1449 return group; 1450 } 1451 } 1452 1453 return NULL; 1454 } 1455 1456 /* 1457 * Look for aliases to or from the given device for existing groups. DMA 1458 * aliases are only supported on the same bus, therefore the search 1459 * space is quite small (especially since we're really only looking at pcie 1460 * device, and therefore only expect multiple slots on the root complex or 1461 * downstream switch ports). It's conceivable though that a pair of 1462 * multifunction devices could have aliases between them that would cause a 1463 * loop. To prevent this, we use a bitmap to track where we've been. 1464 */ 1465 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1466 unsigned long *devfns) 1467 { 1468 struct pci_dev *tmp = NULL; 1469 struct iommu_group *group; 1470 1471 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1472 return NULL; 1473 1474 group = iommu_group_get(&pdev->dev); 1475 if (group) 1476 return group; 1477 1478 for_each_pci_dev(tmp) { 1479 if (tmp == pdev || tmp->bus != pdev->bus) 1480 continue; 1481 1482 /* We alias them or they alias us */ 1483 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1484 group = get_pci_alias_group(tmp, devfns); 1485 if (group) { 1486 pci_dev_put(tmp); 1487 return group; 1488 } 1489 1490 group = get_pci_function_alias_group(tmp, devfns); 1491 if (group) { 1492 pci_dev_put(tmp); 1493 return group; 1494 } 1495 } 1496 } 1497 1498 return NULL; 1499 } 1500 1501 struct group_for_pci_data { 1502 struct pci_dev *pdev; 1503 struct iommu_group *group; 1504 }; 1505 1506 /* 1507 * DMA alias iterator callback, return the last seen device. Stop and return 1508 * the IOMMU group if we find one along the way. 1509 */ 1510 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1511 { 1512 struct group_for_pci_data *data = opaque; 1513 1514 data->pdev = pdev; 1515 data->group = iommu_group_get(&pdev->dev); 1516 1517 return data->group != NULL; 1518 } 1519 1520 /* 1521 * Generic device_group call-back function. It just allocates one 1522 * iommu-group per device. 1523 */ 1524 struct iommu_group *generic_device_group(struct device *dev) 1525 { 1526 return iommu_group_alloc(); 1527 } 1528 EXPORT_SYMBOL_GPL(generic_device_group); 1529 1530 /* 1531 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1532 * to find or create an IOMMU group for a device. 1533 */ 1534 struct iommu_group *pci_device_group(struct device *dev) 1535 { 1536 struct pci_dev *pdev = to_pci_dev(dev); 1537 struct group_for_pci_data data; 1538 struct pci_bus *bus; 1539 struct iommu_group *group = NULL; 1540 u64 devfns[4] = { 0 }; 1541 1542 if (WARN_ON(!dev_is_pci(dev))) 1543 return ERR_PTR(-EINVAL); 1544 1545 /* 1546 * Find the upstream DMA alias for the device. A device must not 1547 * be aliased due to topology in order to have its own IOMMU group. 1548 * If we find an alias along the way that already belongs to a 1549 * group, use it. 1550 */ 1551 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1552 return data.group; 1553 1554 pdev = data.pdev; 1555 1556 /* 1557 * Continue upstream from the point of minimum IOMMU granularity 1558 * due to aliases to the point where devices are protected from 1559 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1560 * group, use it. 1561 */ 1562 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1563 if (!bus->self) 1564 continue; 1565 1566 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1567 break; 1568 1569 pdev = bus->self; 1570 1571 group = iommu_group_get(&pdev->dev); 1572 if (group) 1573 return group; 1574 } 1575 1576 /* 1577 * Look for existing groups on device aliases. If we alias another 1578 * device or another device aliases us, use the same group. 1579 */ 1580 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1581 if (group) 1582 return group; 1583 1584 /* 1585 * Look for existing groups on non-isolated functions on the same 1586 * slot and aliases of those funcions, if any. No need to clear 1587 * the search bitmap, the tested devfns are still valid. 1588 */ 1589 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1590 if (group) 1591 return group; 1592 1593 /* No shared group found, allocate new */ 1594 return iommu_group_alloc(); 1595 } 1596 EXPORT_SYMBOL_GPL(pci_device_group); 1597 1598 /* Get the IOMMU group for device on fsl-mc bus */ 1599 struct iommu_group *fsl_mc_device_group(struct device *dev) 1600 { 1601 struct device *cont_dev = fsl_mc_cont_dev(dev); 1602 struct iommu_group *group; 1603 1604 group = iommu_group_get(cont_dev); 1605 if (!group) 1606 group = iommu_group_alloc(); 1607 return group; 1608 } 1609 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1610 1611 static int iommu_get_def_domain_type(struct device *dev) 1612 { 1613 const struct iommu_ops *ops = dev_iommu_ops(dev); 1614 1615 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 1616 return IOMMU_DOMAIN_DMA; 1617 1618 if (ops->def_domain_type) 1619 return ops->def_domain_type(dev); 1620 1621 return 0; 1622 } 1623 1624 static struct iommu_domain * 1625 __iommu_group_alloc_default_domain(const struct bus_type *bus, 1626 struct iommu_group *group, int req_type) 1627 { 1628 if (group->default_domain && group->default_domain->type == req_type) 1629 return group->default_domain; 1630 return __iommu_domain_alloc(bus, req_type); 1631 } 1632 1633 /* 1634 * req_type of 0 means "auto" which means to select a domain based on 1635 * iommu_def_domain_type or what the driver actually supports. 1636 */ 1637 static struct iommu_domain * 1638 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) 1639 { 1640 const struct bus_type *bus = 1641 list_first_entry(&group->devices, struct group_device, list) 1642 ->dev->bus; 1643 struct iommu_domain *dom; 1644 1645 lockdep_assert_held(&group->mutex); 1646 1647 if (req_type) 1648 return __iommu_group_alloc_default_domain(bus, group, req_type); 1649 1650 /* The driver gave no guidance on what type to use, try the default */ 1651 dom = __iommu_group_alloc_default_domain(bus, group, iommu_def_domain_type); 1652 if (dom) 1653 return dom; 1654 1655 /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ 1656 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) 1657 return NULL; 1658 dom = __iommu_group_alloc_default_domain(bus, group, IOMMU_DOMAIN_DMA); 1659 if (!dom) 1660 return NULL; 1661 1662 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1663 iommu_def_domain_type, group->name); 1664 return dom; 1665 } 1666 1667 /** 1668 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1669 * @dev: target device 1670 * 1671 * This function is intended to be called by IOMMU drivers and extended to 1672 * support common, bus-defined algorithms when determining or creating the 1673 * IOMMU group for a device. On success, the caller will hold a reference 1674 * to the returned IOMMU group, which will already include the provided 1675 * device. The reference should be released with iommu_group_put(). 1676 */ 1677 static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1678 { 1679 const struct iommu_ops *ops = dev_iommu_ops(dev); 1680 struct iommu_group *group; 1681 int ret; 1682 1683 group = iommu_group_get(dev); 1684 if (group) 1685 return group; 1686 1687 group = ops->device_group(dev); 1688 if (WARN_ON_ONCE(group == NULL)) 1689 return ERR_PTR(-EINVAL); 1690 1691 if (IS_ERR(group)) 1692 return group; 1693 1694 ret = iommu_group_add_device(group, dev); 1695 if (ret) 1696 goto out_put_group; 1697 1698 return group; 1699 1700 out_put_group: 1701 iommu_group_put(group); 1702 1703 return ERR_PTR(ret); 1704 } 1705 1706 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1707 { 1708 return group->default_domain; 1709 } 1710 1711 static int probe_iommu_group(struct device *dev, void *data) 1712 { 1713 struct list_head *group_list = data; 1714 struct iommu_group *group; 1715 int ret; 1716 1717 /* Device is probed already if in a group */ 1718 group = iommu_group_get(dev); 1719 if (group) { 1720 iommu_group_put(group); 1721 return 0; 1722 } 1723 1724 ret = __iommu_probe_device(dev, group_list); 1725 if (ret == -ENODEV) 1726 ret = 0; 1727 1728 return ret; 1729 } 1730 1731 static int iommu_bus_notifier(struct notifier_block *nb, 1732 unsigned long action, void *data) 1733 { 1734 struct device *dev = data; 1735 1736 if (action == BUS_NOTIFY_ADD_DEVICE) { 1737 int ret; 1738 1739 ret = iommu_probe_device(dev); 1740 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1741 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1742 iommu_release_device(dev); 1743 return NOTIFY_OK; 1744 } 1745 1746 return 0; 1747 } 1748 1749 /* A target_type of 0 will select the best domain type and cannot fail */ 1750 static int iommu_get_default_domain_type(struct iommu_group *group, 1751 int target_type) 1752 { 1753 int best_type = target_type; 1754 struct group_device *gdev; 1755 struct device *last_dev; 1756 1757 lockdep_assert_held(&group->mutex); 1758 1759 for_each_group_device(group, gdev) { 1760 unsigned int type = iommu_get_def_domain_type(gdev->dev); 1761 1762 if (best_type && type && best_type != type) { 1763 if (target_type) { 1764 dev_err_ratelimited( 1765 gdev->dev, 1766 "Device cannot be in %s domain\n", 1767 iommu_domain_type_str(target_type)); 1768 return -1; 1769 } 1770 1771 dev_warn( 1772 gdev->dev, 1773 "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", 1774 iommu_domain_type_str(type), dev_name(last_dev), 1775 iommu_domain_type_str(best_type)); 1776 return 0; 1777 } 1778 if (!best_type) 1779 best_type = type; 1780 last_dev = gdev->dev; 1781 } 1782 return best_type; 1783 } 1784 1785 static void iommu_group_do_probe_finalize(struct device *dev) 1786 { 1787 const struct iommu_ops *ops = dev_iommu_ops(dev); 1788 1789 if (ops->probe_finalize) 1790 ops->probe_finalize(dev); 1791 } 1792 1793 int bus_iommu_probe(const struct bus_type *bus) 1794 { 1795 struct iommu_group *group, *next; 1796 LIST_HEAD(group_list); 1797 int ret; 1798 1799 /* 1800 * This code-path does not allocate the default domain when 1801 * creating the iommu group, so do it after the groups are 1802 * created. 1803 */ 1804 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1805 if (ret) 1806 return ret; 1807 1808 list_for_each_entry_safe(group, next, &group_list, entry) { 1809 struct group_device *gdev; 1810 1811 mutex_lock(&group->mutex); 1812 1813 /* Remove item from the list */ 1814 list_del_init(&group->entry); 1815 1816 ret = iommu_setup_default_domain(group, 0); 1817 if (ret) { 1818 mutex_unlock(&group->mutex); 1819 return ret; 1820 } 1821 mutex_unlock(&group->mutex); 1822 1823 /* 1824 * FIXME: Mis-locked because the ops->probe_finalize() call-back 1825 * of some IOMMU drivers calls arm_iommu_attach_device() which 1826 * in-turn might call back into IOMMU core code, where it tries 1827 * to take group->mutex, resulting in a deadlock. 1828 */ 1829 for_each_group_device(group, gdev) 1830 iommu_group_do_probe_finalize(gdev->dev); 1831 } 1832 1833 return 0; 1834 } 1835 1836 bool iommu_present(const struct bus_type *bus) 1837 { 1838 return bus->iommu_ops != NULL; 1839 } 1840 EXPORT_SYMBOL_GPL(iommu_present); 1841 1842 /** 1843 * device_iommu_capable() - check for a general IOMMU capability 1844 * @dev: device to which the capability would be relevant, if available 1845 * @cap: IOMMU capability 1846 * 1847 * Return: true if an IOMMU is present and supports the given capability 1848 * for the given device, otherwise false. 1849 */ 1850 bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 1851 { 1852 const struct iommu_ops *ops; 1853 1854 if (!dev->iommu || !dev->iommu->iommu_dev) 1855 return false; 1856 1857 ops = dev_iommu_ops(dev); 1858 if (!ops->capable) 1859 return false; 1860 1861 return ops->capable(dev, cap); 1862 } 1863 EXPORT_SYMBOL_GPL(device_iommu_capable); 1864 1865 /** 1866 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi() 1867 * for a group 1868 * @group: Group to query 1869 * 1870 * IOMMU groups should not have differing values of 1871 * msi_device_has_isolated_msi() for devices in a group. However nothing 1872 * directly prevents this, so ensure mistakes don't result in isolation failures 1873 * by checking that all the devices are the same. 1874 */ 1875 bool iommu_group_has_isolated_msi(struct iommu_group *group) 1876 { 1877 struct group_device *group_dev; 1878 bool ret = true; 1879 1880 mutex_lock(&group->mutex); 1881 for_each_group_device(group, group_dev) 1882 ret &= msi_device_has_isolated_msi(group_dev->dev); 1883 mutex_unlock(&group->mutex); 1884 return ret; 1885 } 1886 EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi); 1887 1888 /** 1889 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1890 * @domain: iommu domain 1891 * @handler: fault handler 1892 * @token: user data, will be passed back to the fault handler 1893 * 1894 * This function should be used by IOMMU users which want to be notified 1895 * whenever an IOMMU fault happens. 1896 * 1897 * The fault handler itself should return 0 on success, and an appropriate 1898 * error code otherwise. 1899 */ 1900 void iommu_set_fault_handler(struct iommu_domain *domain, 1901 iommu_fault_handler_t handler, 1902 void *token) 1903 { 1904 BUG_ON(!domain); 1905 1906 domain->handler = handler; 1907 domain->handler_token = token; 1908 } 1909 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1910 1911 static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus, 1912 unsigned type) 1913 { 1914 struct iommu_domain *domain; 1915 unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS; 1916 1917 if (bus == NULL || bus->iommu_ops == NULL) 1918 return NULL; 1919 1920 domain = bus->iommu_ops->domain_alloc(alloc_type); 1921 if (!domain) 1922 return NULL; 1923 1924 domain->type = type; 1925 /* 1926 * If not already set, assume all sizes by default; the driver 1927 * may override this later 1928 */ 1929 if (!domain->pgsize_bitmap) 1930 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1931 1932 if (!domain->ops) 1933 domain->ops = bus->iommu_ops->default_domain_ops; 1934 1935 if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { 1936 iommu_domain_free(domain); 1937 domain = NULL; 1938 } 1939 return domain; 1940 } 1941 1942 struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) 1943 { 1944 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1945 } 1946 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1947 1948 void iommu_domain_free(struct iommu_domain *domain) 1949 { 1950 if (domain->type == IOMMU_DOMAIN_SVA) 1951 mmdrop(domain->mm); 1952 iommu_put_dma_cookie(domain); 1953 domain->ops->free(domain); 1954 } 1955 EXPORT_SYMBOL_GPL(iommu_domain_free); 1956 1957 /* 1958 * Put the group's domain back to the appropriate core-owned domain - either the 1959 * standard kernel-mode DMA configuration or an all-DMA-blocked domain. 1960 */ 1961 static void __iommu_group_set_core_domain(struct iommu_group *group) 1962 { 1963 struct iommu_domain *new_domain; 1964 1965 if (group->owner) 1966 new_domain = group->blocking_domain; 1967 else 1968 new_domain = group->default_domain; 1969 1970 __iommu_group_set_domain_nofail(group, new_domain); 1971 } 1972 1973 static int __iommu_attach_device(struct iommu_domain *domain, 1974 struct device *dev) 1975 { 1976 int ret; 1977 1978 if (unlikely(domain->ops->attach_dev == NULL)) 1979 return -ENODEV; 1980 1981 ret = domain->ops->attach_dev(domain, dev); 1982 if (ret) 1983 return ret; 1984 dev->iommu->attach_deferred = 0; 1985 trace_attach_device_to_domain(dev); 1986 return 0; 1987 } 1988 1989 /** 1990 * iommu_attach_device - Attach an IOMMU domain to a device 1991 * @domain: IOMMU domain to attach 1992 * @dev: Device that will be attached 1993 * 1994 * Returns 0 on success and error code on failure 1995 * 1996 * Note that EINVAL can be treated as a soft failure, indicating 1997 * that certain configuration of the domain is incompatible with 1998 * the device. In this case attaching a different domain to the 1999 * device may succeed. 2000 */ 2001 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 2002 { 2003 struct iommu_group *group; 2004 int ret; 2005 2006 group = iommu_group_get(dev); 2007 if (!group) 2008 return -ENODEV; 2009 2010 /* 2011 * Lock the group to make sure the device-count doesn't 2012 * change while we are attaching 2013 */ 2014 mutex_lock(&group->mutex); 2015 ret = -EINVAL; 2016 if (list_count_nodes(&group->devices) != 1) 2017 goto out_unlock; 2018 2019 ret = __iommu_attach_group(domain, group); 2020 2021 out_unlock: 2022 mutex_unlock(&group->mutex); 2023 iommu_group_put(group); 2024 2025 return ret; 2026 } 2027 EXPORT_SYMBOL_GPL(iommu_attach_device); 2028 2029 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2030 { 2031 if (dev->iommu && dev->iommu->attach_deferred) 2032 return __iommu_attach_device(domain, dev); 2033 2034 return 0; 2035 } 2036 2037 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2038 { 2039 struct iommu_group *group; 2040 2041 group = iommu_group_get(dev); 2042 if (!group) 2043 return; 2044 2045 mutex_lock(&group->mutex); 2046 if (WARN_ON(domain != group->domain) || 2047 WARN_ON(list_count_nodes(&group->devices) != 1)) 2048 goto out_unlock; 2049 __iommu_group_set_core_domain(group); 2050 2051 out_unlock: 2052 mutex_unlock(&group->mutex); 2053 iommu_group_put(group); 2054 } 2055 EXPORT_SYMBOL_GPL(iommu_detach_device); 2056 2057 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2058 { 2059 struct iommu_domain *domain; 2060 struct iommu_group *group; 2061 2062 group = iommu_group_get(dev); 2063 if (!group) 2064 return NULL; 2065 2066 domain = group->domain; 2067 2068 iommu_group_put(group); 2069 2070 return domain; 2071 } 2072 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2073 2074 /* 2075 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2076 * guarantees that the group and its default domain are valid and correct. 2077 */ 2078 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2079 { 2080 return dev->iommu_group->default_domain; 2081 } 2082 2083 static int __iommu_attach_group(struct iommu_domain *domain, 2084 struct iommu_group *group) 2085 { 2086 if (group->domain && group->domain != group->default_domain && 2087 group->domain != group->blocking_domain) 2088 return -EBUSY; 2089 2090 return __iommu_group_set_domain(group, domain); 2091 } 2092 2093 /** 2094 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group 2095 * @domain: IOMMU domain to attach 2096 * @group: IOMMU group that will be attached 2097 * 2098 * Returns 0 on success and error code on failure 2099 * 2100 * Note that EINVAL can be treated as a soft failure, indicating 2101 * that certain configuration of the domain is incompatible with 2102 * the group. In this case attaching a different domain to the 2103 * group may succeed. 2104 */ 2105 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2106 { 2107 int ret; 2108 2109 mutex_lock(&group->mutex); 2110 ret = __iommu_attach_group(domain, group); 2111 mutex_unlock(&group->mutex); 2112 2113 return ret; 2114 } 2115 EXPORT_SYMBOL_GPL(iommu_attach_group); 2116 2117 static int __iommu_device_set_domain(struct iommu_group *group, 2118 struct device *dev, 2119 struct iommu_domain *new_domain, 2120 unsigned int flags) 2121 { 2122 int ret; 2123 2124 if (dev->iommu->attach_deferred) { 2125 if (new_domain == group->default_domain) 2126 return 0; 2127 dev->iommu->attach_deferred = 0; 2128 } 2129 2130 ret = __iommu_attach_device(new_domain, dev); 2131 if (ret) { 2132 /* 2133 * If we have a blocking domain then try to attach that in hopes 2134 * of avoiding a UAF. Modern drivers should implement blocking 2135 * domains as global statics that cannot fail. 2136 */ 2137 if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) && 2138 group->blocking_domain && 2139 group->blocking_domain != new_domain) 2140 __iommu_attach_device(group->blocking_domain, dev); 2141 return ret; 2142 } 2143 return 0; 2144 } 2145 2146 /* 2147 * If 0 is returned the group's domain is new_domain. If an error is returned 2148 * then the group's domain will be set back to the existing domain unless 2149 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's 2150 * domains is left inconsistent. This is a driver bug to fail attach with a 2151 * previously good domain. We try to avoid a kernel UAF because of this. 2152 * 2153 * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU 2154 * API works on domains and devices. Bridge that gap by iterating over the 2155 * devices in a group. Ideally we'd have a single device which represents the 2156 * requestor ID of the group, but we also allow IOMMU drivers to create policy 2157 * defined minimum sets, where the physical hardware may be able to distiguish 2158 * members, but we wish to group them at a higher level (ex. untrusted 2159 * multi-function PCI devices). Thus we attach each device. 2160 */ 2161 static int __iommu_group_set_domain_internal(struct iommu_group *group, 2162 struct iommu_domain *new_domain, 2163 unsigned int flags) 2164 { 2165 struct group_device *last_gdev; 2166 struct group_device *gdev; 2167 int result; 2168 int ret; 2169 2170 lockdep_assert_held(&group->mutex); 2171 2172 if (group->domain == new_domain) 2173 return 0; 2174 2175 /* 2176 * New drivers should support default domains, so set_platform_dma() 2177 * op will never be called. Otherwise the NULL domain represents some 2178 * platform specific behavior. 2179 */ 2180 if (!new_domain) { 2181 for_each_group_device(group, gdev) { 2182 const struct iommu_ops *ops = dev_iommu_ops(gdev->dev); 2183 2184 if (!WARN_ON(!ops->set_platform_dma_ops)) 2185 ops->set_platform_dma_ops(gdev->dev); 2186 } 2187 group->domain = NULL; 2188 return 0; 2189 } 2190 2191 /* 2192 * Changing the domain is done by calling attach_dev() on the new 2193 * domain. This switch does not have to be atomic and DMA can be 2194 * discarded during the transition. DMA must only be able to access 2195 * either new_domain or group->domain, never something else. 2196 */ 2197 result = 0; 2198 for_each_group_device(group, gdev) { 2199 ret = __iommu_device_set_domain(group, gdev->dev, new_domain, 2200 flags); 2201 if (ret) { 2202 result = ret; 2203 /* 2204 * Keep trying the other devices in the group. If a 2205 * driver fails attach to an otherwise good domain, and 2206 * does not support blocking domains, it should at least 2207 * drop its reference on the current domain so we don't 2208 * UAF. 2209 */ 2210 if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) 2211 continue; 2212 goto err_revert; 2213 } 2214 } 2215 group->domain = new_domain; 2216 return result; 2217 2218 err_revert: 2219 /* 2220 * This is called in error unwind paths. A well behaved driver should 2221 * always allow us to attach to a domain that was already attached. 2222 */ 2223 last_gdev = gdev; 2224 for_each_group_device(group, gdev) { 2225 const struct iommu_ops *ops = dev_iommu_ops(gdev->dev); 2226 2227 /* 2228 * If set_platform_dma_ops is not present a NULL domain can 2229 * happen only for first probe, in which case we leave 2230 * group->domain as NULL and let release clean everything up. 2231 */ 2232 if (group->domain) 2233 WARN_ON(__iommu_device_set_domain( 2234 group, gdev->dev, group->domain, 2235 IOMMU_SET_DOMAIN_MUST_SUCCEED)); 2236 else if (ops->set_platform_dma_ops) 2237 ops->set_platform_dma_ops(gdev->dev); 2238 if (gdev == last_gdev) 2239 break; 2240 } 2241 return ret; 2242 } 2243 2244 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2245 { 2246 mutex_lock(&group->mutex); 2247 __iommu_group_set_core_domain(group); 2248 mutex_unlock(&group->mutex); 2249 } 2250 EXPORT_SYMBOL_GPL(iommu_detach_group); 2251 2252 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2253 { 2254 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2255 return iova; 2256 2257 if (domain->type == IOMMU_DOMAIN_BLOCKED) 2258 return 0; 2259 2260 return domain->ops->iova_to_phys(domain, iova); 2261 } 2262 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2263 2264 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, 2265 phys_addr_t paddr, size_t size, size_t *count) 2266 { 2267 unsigned int pgsize_idx, pgsize_idx_next; 2268 unsigned long pgsizes; 2269 size_t offset, pgsize, pgsize_next; 2270 unsigned long addr_merge = paddr | iova; 2271 2272 /* Page sizes supported by the hardware and small enough for @size */ 2273 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); 2274 2275 /* Constrain the page sizes further based on the maximum alignment */ 2276 if (likely(addr_merge)) 2277 pgsizes &= GENMASK(__ffs(addr_merge), 0); 2278 2279 /* Make sure we have at least one suitable page size */ 2280 BUG_ON(!pgsizes); 2281 2282 /* Pick the biggest page size remaining */ 2283 pgsize_idx = __fls(pgsizes); 2284 pgsize = BIT(pgsize_idx); 2285 if (!count) 2286 return pgsize; 2287 2288 /* Find the next biggest support page size, if it exists */ 2289 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); 2290 if (!pgsizes) 2291 goto out_set_count; 2292 2293 pgsize_idx_next = __ffs(pgsizes); 2294 pgsize_next = BIT(pgsize_idx_next); 2295 2296 /* 2297 * There's no point trying a bigger page size unless the virtual 2298 * and physical addresses are similarly offset within the larger page. 2299 */ 2300 if ((iova ^ paddr) & (pgsize_next - 1)) 2301 goto out_set_count; 2302 2303 /* Calculate the offset to the next page size alignment boundary */ 2304 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); 2305 2306 /* 2307 * If size is big enough to accommodate the larger page, reduce 2308 * the number of smaller pages. 2309 */ 2310 if (offset + pgsize_next <= size) 2311 size = offset; 2312 2313 out_set_count: 2314 *count = size >> pgsize_idx; 2315 return pgsize; 2316 } 2317 2318 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, 2319 phys_addr_t paddr, size_t size, int prot, 2320 gfp_t gfp, size_t *mapped) 2321 { 2322 const struct iommu_domain_ops *ops = domain->ops; 2323 size_t pgsize, count; 2324 int ret; 2325 2326 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); 2327 2328 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", 2329 iova, &paddr, pgsize, count); 2330 2331 if (ops->map_pages) { 2332 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, 2333 gfp, mapped); 2334 } else { 2335 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2336 *mapped = ret ? 0 : pgsize; 2337 } 2338 2339 return ret; 2340 } 2341 2342 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2343 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2344 { 2345 const struct iommu_domain_ops *ops = domain->ops; 2346 unsigned long orig_iova = iova; 2347 unsigned int min_pagesz; 2348 size_t orig_size = size; 2349 phys_addr_t orig_paddr = paddr; 2350 int ret = 0; 2351 2352 if (unlikely(!(ops->map || ops->map_pages) || 2353 domain->pgsize_bitmap == 0UL)) 2354 return -ENODEV; 2355 2356 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2357 return -EINVAL; 2358 2359 /* find out the minimum page size supported */ 2360 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2361 2362 /* 2363 * both the virtual address and the physical one, as well as 2364 * the size of the mapping, must be aligned (at least) to the 2365 * size of the smallest page supported by the hardware 2366 */ 2367 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2368 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2369 iova, &paddr, size, min_pagesz); 2370 return -EINVAL; 2371 } 2372 2373 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2374 2375 while (size) { 2376 size_t mapped = 0; 2377 2378 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, 2379 &mapped); 2380 /* 2381 * Some pages may have been mapped, even if an error occurred, 2382 * so we should account for those so they can be unmapped. 2383 */ 2384 size -= mapped; 2385 2386 if (ret) 2387 break; 2388 2389 iova += mapped; 2390 paddr += mapped; 2391 } 2392 2393 /* unroll mapping in case something went wrong */ 2394 if (ret) 2395 iommu_unmap(domain, orig_iova, orig_size - size); 2396 else 2397 trace_map(orig_iova, orig_paddr, orig_size); 2398 2399 return ret; 2400 } 2401 2402 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2403 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2404 { 2405 const struct iommu_domain_ops *ops = domain->ops; 2406 int ret; 2407 2408 might_sleep_if(gfpflags_allow_blocking(gfp)); 2409 2410 /* Discourage passing strange GFP flags */ 2411 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2412 __GFP_HIGHMEM))) 2413 return -EINVAL; 2414 2415 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2416 if (ret == 0 && ops->iotlb_sync_map) 2417 ops->iotlb_sync_map(domain, iova, size); 2418 2419 return ret; 2420 } 2421 EXPORT_SYMBOL_GPL(iommu_map); 2422 2423 static size_t __iommu_unmap_pages(struct iommu_domain *domain, 2424 unsigned long iova, size_t size, 2425 struct iommu_iotlb_gather *iotlb_gather) 2426 { 2427 const struct iommu_domain_ops *ops = domain->ops; 2428 size_t pgsize, count; 2429 2430 pgsize = iommu_pgsize(domain, iova, iova, size, &count); 2431 return ops->unmap_pages ? 2432 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : 2433 ops->unmap(domain, iova, pgsize, iotlb_gather); 2434 } 2435 2436 static size_t __iommu_unmap(struct iommu_domain *domain, 2437 unsigned long iova, size_t size, 2438 struct iommu_iotlb_gather *iotlb_gather) 2439 { 2440 const struct iommu_domain_ops *ops = domain->ops; 2441 size_t unmapped_page, unmapped = 0; 2442 unsigned long orig_iova = iova; 2443 unsigned int min_pagesz; 2444 2445 if (unlikely(!(ops->unmap || ops->unmap_pages) || 2446 domain->pgsize_bitmap == 0UL)) 2447 return 0; 2448 2449 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2450 return 0; 2451 2452 /* find out the minimum page size supported */ 2453 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2454 2455 /* 2456 * The virtual address, as well as the size of the mapping, must be 2457 * aligned (at least) to the size of the smallest page supported 2458 * by the hardware 2459 */ 2460 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2461 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2462 iova, size, min_pagesz); 2463 return 0; 2464 } 2465 2466 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2467 2468 /* 2469 * Keep iterating until we either unmap 'size' bytes (or more) 2470 * or we hit an area that isn't mapped. 2471 */ 2472 while (unmapped < size) { 2473 unmapped_page = __iommu_unmap_pages(domain, iova, 2474 size - unmapped, 2475 iotlb_gather); 2476 if (!unmapped_page) 2477 break; 2478 2479 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2480 iova, unmapped_page); 2481 2482 iova += unmapped_page; 2483 unmapped += unmapped_page; 2484 } 2485 2486 trace_unmap(orig_iova, size, unmapped); 2487 return unmapped; 2488 } 2489 2490 size_t iommu_unmap(struct iommu_domain *domain, 2491 unsigned long iova, size_t size) 2492 { 2493 struct iommu_iotlb_gather iotlb_gather; 2494 size_t ret; 2495 2496 iommu_iotlb_gather_init(&iotlb_gather); 2497 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2498 iommu_iotlb_sync(domain, &iotlb_gather); 2499 2500 return ret; 2501 } 2502 EXPORT_SYMBOL_GPL(iommu_unmap); 2503 2504 size_t iommu_unmap_fast(struct iommu_domain *domain, 2505 unsigned long iova, size_t size, 2506 struct iommu_iotlb_gather *iotlb_gather) 2507 { 2508 return __iommu_unmap(domain, iova, size, iotlb_gather); 2509 } 2510 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2511 2512 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2513 struct scatterlist *sg, unsigned int nents, int prot, 2514 gfp_t gfp) 2515 { 2516 const struct iommu_domain_ops *ops = domain->ops; 2517 size_t len = 0, mapped = 0; 2518 phys_addr_t start; 2519 unsigned int i = 0; 2520 int ret; 2521 2522 might_sleep_if(gfpflags_allow_blocking(gfp)); 2523 2524 /* Discourage passing strange GFP flags */ 2525 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2526 __GFP_HIGHMEM))) 2527 return -EINVAL; 2528 2529 while (i <= nents) { 2530 phys_addr_t s_phys = sg_phys(sg); 2531 2532 if (len && s_phys != start + len) { 2533 ret = __iommu_map(domain, iova + mapped, start, 2534 len, prot, gfp); 2535 2536 if (ret) 2537 goto out_err; 2538 2539 mapped += len; 2540 len = 0; 2541 } 2542 2543 if (sg_dma_is_bus_address(sg)) 2544 goto next; 2545 2546 if (len) { 2547 len += sg->length; 2548 } else { 2549 len = sg->length; 2550 start = s_phys; 2551 } 2552 2553 next: 2554 if (++i < nents) 2555 sg = sg_next(sg); 2556 } 2557 2558 if (ops->iotlb_sync_map) 2559 ops->iotlb_sync_map(domain, iova, mapped); 2560 return mapped; 2561 2562 out_err: 2563 /* undo mappings already done */ 2564 iommu_unmap(domain, iova, mapped); 2565 2566 return ret; 2567 } 2568 EXPORT_SYMBOL_GPL(iommu_map_sg); 2569 2570 /** 2571 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2572 * @domain: the iommu domain where the fault has happened 2573 * @dev: the device where the fault has happened 2574 * @iova: the faulting address 2575 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2576 * 2577 * This function should be called by the low-level IOMMU implementations 2578 * whenever IOMMU faults happen, to allow high-level users, that are 2579 * interested in such events, to know about them. 2580 * 2581 * This event may be useful for several possible use cases: 2582 * - mere logging of the event 2583 * - dynamic TLB/PTE loading 2584 * - if restarting of the faulting device is required 2585 * 2586 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2587 * PTE/TLB loading will one day be supported, implementations will be able 2588 * to tell whether it succeeded or not according to this return value). 2589 * 2590 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2591 * (though fault handlers can also return -ENOSYS, in case they want to 2592 * elicit the default behavior of the IOMMU drivers). 2593 */ 2594 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2595 unsigned long iova, int flags) 2596 { 2597 int ret = -ENOSYS; 2598 2599 /* 2600 * if upper layers showed interest and installed a fault handler, 2601 * invoke it. 2602 */ 2603 if (domain->handler) 2604 ret = domain->handler(domain, dev, iova, flags, 2605 domain->handler_token); 2606 2607 trace_io_page_fault(dev, iova, flags); 2608 return ret; 2609 } 2610 EXPORT_SYMBOL_GPL(report_iommu_fault); 2611 2612 static int __init iommu_init(void) 2613 { 2614 iommu_group_kset = kset_create_and_add("iommu_groups", 2615 NULL, kernel_kobj); 2616 BUG_ON(!iommu_group_kset); 2617 2618 iommu_debugfs_setup(); 2619 2620 return 0; 2621 } 2622 core_initcall(iommu_init); 2623 2624 int iommu_enable_nesting(struct iommu_domain *domain) 2625 { 2626 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2627 return -EINVAL; 2628 if (!domain->ops->enable_nesting) 2629 return -EINVAL; 2630 return domain->ops->enable_nesting(domain); 2631 } 2632 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2633 2634 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2635 unsigned long quirk) 2636 { 2637 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2638 return -EINVAL; 2639 if (!domain->ops->set_pgtable_quirks) 2640 return -EINVAL; 2641 return domain->ops->set_pgtable_quirks(domain, quirk); 2642 } 2643 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2644 2645 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2646 { 2647 const struct iommu_ops *ops = dev_iommu_ops(dev); 2648 2649 if (ops->get_resv_regions) 2650 ops->get_resv_regions(dev, list); 2651 } 2652 2653 /** 2654 * iommu_put_resv_regions - release resered regions 2655 * @dev: device for which to free reserved regions 2656 * @list: reserved region list for device 2657 * 2658 * This releases a reserved region list acquired by iommu_get_resv_regions(). 2659 */ 2660 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2661 { 2662 struct iommu_resv_region *entry, *next; 2663 2664 list_for_each_entry_safe(entry, next, list, list) { 2665 if (entry->free) 2666 entry->free(dev, entry); 2667 else 2668 kfree(entry); 2669 } 2670 } 2671 EXPORT_SYMBOL(iommu_put_resv_regions); 2672 2673 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2674 size_t length, int prot, 2675 enum iommu_resv_type type, 2676 gfp_t gfp) 2677 { 2678 struct iommu_resv_region *region; 2679 2680 region = kzalloc(sizeof(*region), gfp); 2681 if (!region) 2682 return NULL; 2683 2684 INIT_LIST_HEAD(®ion->list); 2685 region->start = start; 2686 region->length = length; 2687 region->prot = prot; 2688 region->type = type; 2689 return region; 2690 } 2691 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2692 2693 void iommu_set_default_passthrough(bool cmd_line) 2694 { 2695 if (cmd_line) 2696 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2697 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2698 } 2699 2700 void iommu_set_default_translated(bool cmd_line) 2701 { 2702 if (cmd_line) 2703 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2704 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2705 } 2706 2707 bool iommu_default_passthrough(void) 2708 { 2709 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2710 } 2711 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2712 2713 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2714 { 2715 const struct iommu_ops *ops = NULL; 2716 struct iommu_device *iommu; 2717 2718 spin_lock(&iommu_device_lock); 2719 list_for_each_entry(iommu, &iommu_device_list, list) 2720 if (iommu->fwnode == fwnode) { 2721 ops = iommu->ops; 2722 break; 2723 } 2724 spin_unlock(&iommu_device_lock); 2725 return ops; 2726 } 2727 2728 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2729 const struct iommu_ops *ops) 2730 { 2731 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2732 2733 if (fwspec) 2734 return ops == fwspec->ops ? 0 : -EINVAL; 2735 2736 if (!dev_iommu_get(dev)) 2737 return -ENOMEM; 2738 2739 /* Preallocate for the overwhelmingly common case of 1 ID */ 2740 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2741 if (!fwspec) 2742 return -ENOMEM; 2743 2744 of_node_get(to_of_node(iommu_fwnode)); 2745 fwspec->iommu_fwnode = iommu_fwnode; 2746 fwspec->ops = ops; 2747 dev_iommu_fwspec_set(dev, fwspec); 2748 return 0; 2749 } 2750 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2751 2752 void iommu_fwspec_free(struct device *dev) 2753 { 2754 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2755 2756 if (fwspec) { 2757 fwnode_handle_put(fwspec->iommu_fwnode); 2758 kfree(fwspec); 2759 dev_iommu_fwspec_set(dev, NULL); 2760 } 2761 } 2762 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2763 2764 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2765 { 2766 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2767 int i, new_num; 2768 2769 if (!fwspec) 2770 return -EINVAL; 2771 2772 new_num = fwspec->num_ids + num_ids; 2773 if (new_num > 1) { 2774 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2775 GFP_KERNEL); 2776 if (!fwspec) 2777 return -ENOMEM; 2778 2779 dev_iommu_fwspec_set(dev, fwspec); 2780 } 2781 2782 for (i = 0; i < num_ids; i++) 2783 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2784 2785 fwspec->num_ids = new_num; 2786 return 0; 2787 } 2788 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2789 2790 /* 2791 * Per device IOMMU features. 2792 */ 2793 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2794 { 2795 if (dev->iommu && dev->iommu->iommu_dev) { 2796 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2797 2798 if (ops->dev_enable_feat) 2799 return ops->dev_enable_feat(dev, feat); 2800 } 2801 2802 return -ENODEV; 2803 } 2804 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2805 2806 /* 2807 * The device drivers should do the necessary cleanups before calling this. 2808 */ 2809 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2810 { 2811 if (dev->iommu && dev->iommu->iommu_dev) { 2812 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2813 2814 if (ops->dev_disable_feat) 2815 return ops->dev_disable_feat(dev, feat); 2816 } 2817 2818 return -EBUSY; 2819 } 2820 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2821 2822 /** 2823 * iommu_setup_default_domain - Set the default_domain for the group 2824 * @group: Group to change 2825 * @target_type: Domain type to set as the default_domain 2826 * 2827 * Allocate a default domain and set it as the current domain on the group. If 2828 * the group already has a default domain it will be changed to the target_type. 2829 * When target_type is 0 the default domain is selected based on driver and 2830 * system preferences. 2831 */ 2832 static int iommu_setup_default_domain(struct iommu_group *group, 2833 int target_type) 2834 { 2835 struct iommu_domain *old_dom = group->default_domain; 2836 struct group_device *gdev; 2837 struct iommu_domain *dom; 2838 bool direct_failed; 2839 int req_type; 2840 int ret; 2841 2842 lockdep_assert_held(&group->mutex); 2843 2844 req_type = iommu_get_default_domain_type(group, target_type); 2845 if (req_type < 0) 2846 return -EINVAL; 2847 2848 /* 2849 * There are still some drivers which don't support default domains, so 2850 * we ignore the failure and leave group->default_domain NULL. 2851 * 2852 * We assume that the iommu driver starts up the device in 2853 * 'set_platform_dma_ops' mode if it does not support default domains. 2854 */ 2855 dom = iommu_group_alloc_default_domain(group, req_type); 2856 if (!dom) { 2857 /* Once in default_domain mode we never leave */ 2858 if (group->default_domain) 2859 return -ENODEV; 2860 group->default_domain = NULL; 2861 return 0; 2862 } 2863 2864 if (group->default_domain == dom) 2865 return 0; 2866 2867 /* 2868 * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be 2869 * mapped before their device is attached, in order to guarantee 2870 * continuity with any FW activity 2871 */ 2872 direct_failed = false; 2873 for_each_group_device(group, gdev) { 2874 if (iommu_create_device_direct_mappings(dom, gdev->dev)) { 2875 direct_failed = true; 2876 dev_warn_once( 2877 gdev->dev->iommu->iommu_dev->dev, 2878 "IOMMU driver was not able to establish FW requested direct mapping."); 2879 } 2880 } 2881 2882 /* We must set default_domain early for __iommu_device_set_domain */ 2883 group->default_domain = dom; 2884 if (!group->domain) { 2885 /* 2886 * Drivers are not allowed to fail the first domain attach. 2887 * The only way to recover from this is to fail attaching the 2888 * iommu driver and call ops->release_device. Put the domain 2889 * in group->default_domain so it is freed after. 2890 */ 2891 ret = __iommu_group_set_domain_internal( 2892 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 2893 if (WARN_ON(ret)) 2894 goto out_free_old; 2895 } else { 2896 ret = __iommu_group_set_domain(group, dom); 2897 if (ret) 2898 goto err_restore_def_domain; 2899 } 2900 2901 /* 2902 * Drivers are supposed to allow mappings to be installed in a domain 2903 * before device attachment, but some don't. Hack around this defect by 2904 * trying again after attaching. If this happens it means the device 2905 * will not continuously have the IOMMU_RESV_DIRECT map. 2906 */ 2907 if (direct_failed) { 2908 for_each_group_device(group, gdev) { 2909 ret = iommu_create_device_direct_mappings(dom, gdev->dev); 2910 if (ret) 2911 goto err_restore_domain; 2912 } 2913 } 2914 2915 out_free_old: 2916 if (old_dom) 2917 iommu_domain_free(old_dom); 2918 return ret; 2919 2920 err_restore_domain: 2921 if (old_dom) 2922 __iommu_group_set_domain_internal( 2923 group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 2924 err_restore_def_domain: 2925 if (old_dom) { 2926 iommu_domain_free(dom); 2927 group->default_domain = old_dom; 2928 } 2929 return ret; 2930 } 2931 2932 /* 2933 * Changing the default domain through sysfs requires the users to unbind the 2934 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ 2935 * transition. Return failure if this isn't met. 2936 * 2937 * We need to consider the race between this and the device release path. 2938 * group->mutex is used here to guarantee that the device release path 2939 * will not be entered at the same time. 2940 */ 2941 static ssize_t iommu_group_store_type(struct iommu_group *group, 2942 const char *buf, size_t count) 2943 { 2944 struct group_device *gdev; 2945 int ret, req_type; 2946 2947 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2948 return -EACCES; 2949 2950 if (WARN_ON(!group) || !group->default_domain) 2951 return -EINVAL; 2952 2953 if (sysfs_streq(buf, "identity")) 2954 req_type = IOMMU_DOMAIN_IDENTITY; 2955 else if (sysfs_streq(buf, "DMA")) 2956 req_type = IOMMU_DOMAIN_DMA; 2957 else if (sysfs_streq(buf, "DMA-FQ")) 2958 req_type = IOMMU_DOMAIN_DMA_FQ; 2959 else if (sysfs_streq(buf, "auto")) 2960 req_type = 0; 2961 else 2962 return -EINVAL; 2963 2964 mutex_lock(&group->mutex); 2965 /* We can bring up a flush queue without tearing down the domain. */ 2966 if (req_type == IOMMU_DOMAIN_DMA_FQ && 2967 group->default_domain->type == IOMMU_DOMAIN_DMA) { 2968 ret = iommu_dma_init_fq(group->default_domain); 2969 if (ret) 2970 goto out_unlock; 2971 2972 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; 2973 ret = count; 2974 goto out_unlock; 2975 } 2976 2977 /* Otherwise, ensure that device exists and no driver is bound. */ 2978 if (list_empty(&group->devices) || group->owner_cnt) { 2979 ret = -EPERM; 2980 goto out_unlock; 2981 } 2982 2983 ret = iommu_setup_default_domain(group, req_type); 2984 if (ret) 2985 goto out_unlock; 2986 2987 /* 2988 * Release the mutex here because ops->probe_finalize() call-back of 2989 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 2990 * in-turn might call back into IOMMU core code, where it tries to take 2991 * group->mutex, resulting in a deadlock. 2992 */ 2993 mutex_unlock(&group->mutex); 2994 2995 /* Make sure dma_ops is appropriatley set */ 2996 for_each_group_device(group, gdev) 2997 iommu_group_do_probe_finalize(gdev->dev); 2998 return count; 2999 3000 out_unlock: 3001 mutex_unlock(&group->mutex); 3002 return ret ?: count; 3003 } 3004 3005 static bool iommu_is_default_domain(struct iommu_group *group) 3006 { 3007 if (group->domain == group->default_domain) 3008 return true; 3009 3010 /* 3011 * If the default domain was set to identity and it is still an identity 3012 * domain then we consider this a pass. This happens because of 3013 * amd_iommu_init_device() replacing the default idenytity domain with an 3014 * identity domain that has a different configuration for AMDGPU. 3015 */ 3016 if (group->default_domain && 3017 group->default_domain->type == IOMMU_DOMAIN_IDENTITY && 3018 group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY) 3019 return true; 3020 return false; 3021 } 3022 3023 /** 3024 * iommu_device_use_default_domain() - Device driver wants to handle device 3025 * DMA through the kernel DMA API. 3026 * @dev: The device. 3027 * 3028 * The device driver about to bind @dev wants to do DMA through the kernel 3029 * DMA API. Return 0 if it is allowed, otherwise an error. 3030 */ 3031 int iommu_device_use_default_domain(struct device *dev) 3032 { 3033 struct iommu_group *group = iommu_group_get(dev); 3034 int ret = 0; 3035 3036 if (!group) 3037 return 0; 3038 3039 mutex_lock(&group->mutex); 3040 if (group->owner_cnt) { 3041 if (group->owner || !iommu_is_default_domain(group) || 3042 !xa_empty(&group->pasid_array)) { 3043 ret = -EBUSY; 3044 goto unlock_out; 3045 } 3046 } 3047 3048 group->owner_cnt++; 3049 3050 unlock_out: 3051 mutex_unlock(&group->mutex); 3052 iommu_group_put(group); 3053 3054 return ret; 3055 } 3056 3057 /** 3058 * iommu_device_unuse_default_domain() - Device driver stops handling device 3059 * DMA through the kernel DMA API. 3060 * @dev: The device. 3061 * 3062 * The device driver doesn't want to do DMA through kernel DMA API anymore. 3063 * It must be called after iommu_device_use_default_domain(). 3064 */ 3065 void iommu_device_unuse_default_domain(struct device *dev) 3066 { 3067 struct iommu_group *group = iommu_group_get(dev); 3068 3069 if (!group) 3070 return; 3071 3072 mutex_lock(&group->mutex); 3073 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) 3074 group->owner_cnt--; 3075 3076 mutex_unlock(&group->mutex); 3077 iommu_group_put(group); 3078 } 3079 3080 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) 3081 { 3082 struct group_device *dev = 3083 list_first_entry(&group->devices, struct group_device, list); 3084 3085 if (group->blocking_domain) 3086 return 0; 3087 3088 group->blocking_domain = 3089 __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED); 3090 if (!group->blocking_domain) { 3091 /* 3092 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED 3093 * create an empty domain instead. 3094 */ 3095 group->blocking_domain = __iommu_domain_alloc( 3096 dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); 3097 if (!group->blocking_domain) 3098 return -EINVAL; 3099 } 3100 return 0; 3101 } 3102 3103 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) 3104 { 3105 int ret; 3106 3107 if ((group->domain && group->domain != group->default_domain) || 3108 !xa_empty(&group->pasid_array)) 3109 return -EBUSY; 3110 3111 ret = __iommu_group_alloc_blocking_domain(group); 3112 if (ret) 3113 return ret; 3114 ret = __iommu_group_set_domain(group, group->blocking_domain); 3115 if (ret) 3116 return ret; 3117 3118 group->owner = owner; 3119 group->owner_cnt++; 3120 return 0; 3121 } 3122 3123 /** 3124 * iommu_group_claim_dma_owner() - Set DMA ownership of a group 3125 * @group: The group. 3126 * @owner: Caller specified pointer. Used for exclusive ownership. 3127 * 3128 * This is to support backward compatibility for vfio which manages the dma 3129 * ownership in iommu_group level. New invocations on this interface should be 3130 * prohibited. Only a single owner may exist for a group. 3131 */ 3132 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 3133 { 3134 int ret = 0; 3135 3136 if (WARN_ON(!owner)) 3137 return -EINVAL; 3138 3139 mutex_lock(&group->mutex); 3140 if (group->owner_cnt) { 3141 ret = -EPERM; 3142 goto unlock_out; 3143 } 3144 3145 ret = __iommu_take_dma_ownership(group, owner); 3146 unlock_out: 3147 mutex_unlock(&group->mutex); 3148 3149 return ret; 3150 } 3151 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); 3152 3153 /** 3154 * iommu_device_claim_dma_owner() - Set DMA ownership of a device 3155 * @dev: The device. 3156 * @owner: Caller specified pointer. Used for exclusive ownership. 3157 * 3158 * Claim the DMA ownership of a device. Multiple devices in the same group may 3159 * concurrently claim ownership if they present the same owner value. Returns 0 3160 * on success and error code on failure 3161 */ 3162 int iommu_device_claim_dma_owner(struct device *dev, void *owner) 3163 { 3164 struct iommu_group *group; 3165 int ret = 0; 3166 3167 if (WARN_ON(!owner)) 3168 return -EINVAL; 3169 3170 group = iommu_group_get(dev); 3171 if (!group) 3172 return -ENODEV; 3173 3174 mutex_lock(&group->mutex); 3175 if (group->owner_cnt) { 3176 if (group->owner != owner) { 3177 ret = -EPERM; 3178 goto unlock_out; 3179 } 3180 group->owner_cnt++; 3181 goto unlock_out; 3182 } 3183 3184 ret = __iommu_take_dma_ownership(group, owner); 3185 unlock_out: 3186 mutex_unlock(&group->mutex); 3187 iommu_group_put(group); 3188 3189 return ret; 3190 } 3191 EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); 3192 3193 static void __iommu_release_dma_ownership(struct iommu_group *group) 3194 { 3195 if (WARN_ON(!group->owner_cnt || !group->owner || 3196 !xa_empty(&group->pasid_array))) 3197 return; 3198 3199 group->owner_cnt = 0; 3200 group->owner = NULL; 3201 __iommu_group_set_domain_nofail(group, group->default_domain); 3202 } 3203 3204 /** 3205 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3206 * @dev: The device 3207 * 3208 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3209 */ 3210 void iommu_group_release_dma_owner(struct iommu_group *group) 3211 { 3212 mutex_lock(&group->mutex); 3213 __iommu_release_dma_ownership(group); 3214 mutex_unlock(&group->mutex); 3215 } 3216 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); 3217 3218 /** 3219 * iommu_device_release_dma_owner() - Release DMA ownership of a device 3220 * @group: The device. 3221 * 3222 * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). 3223 */ 3224 void iommu_device_release_dma_owner(struct device *dev) 3225 { 3226 struct iommu_group *group = iommu_group_get(dev); 3227 3228 mutex_lock(&group->mutex); 3229 if (group->owner_cnt > 1) 3230 group->owner_cnt--; 3231 else 3232 __iommu_release_dma_ownership(group); 3233 mutex_unlock(&group->mutex); 3234 iommu_group_put(group); 3235 } 3236 EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); 3237 3238 /** 3239 * iommu_group_dma_owner_claimed() - Query group dma ownership status 3240 * @group: The group. 3241 * 3242 * This provides status query on a given group. It is racy and only for 3243 * non-binding status reporting. 3244 */ 3245 bool iommu_group_dma_owner_claimed(struct iommu_group *group) 3246 { 3247 unsigned int user; 3248 3249 mutex_lock(&group->mutex); 3250 user = group->owner_cnt; 3251 mutex_unlock(&group->mutex); 3252 3253 return user; 3254 } 3255 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); 3256 3257 static int __iommu_set_group_pasid(struct iommu_domain *domain, 3258 struct iommu_group *group, ioasid_t pasid) 3259 { 3260 struct group_device *device; 3261 int ret = 0; 3262 3263 for_each_group_device(group, device) { 3264 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); 3265 if (ret) 3266 break; 3267 } 3268 3269 return ret; 3270 } 3271 3272 static void __iommu_remove_group_pasid(struct iommu_group *group, 3273 ioasid_t pasid) 3274 { 3275 struct group_device *device; 3276 const struct iommu_ops *ops; 3277 3278 for_each_group_device(group, device) { 3279 ops = dev_iommu_ops(device->dev); 3280 ops->remove_dev_pasid(device->dev, pasid); 3281 } 3282 } 3283 3284 /* 3285 * iommu_attach_device_pasid() - Attach a domain to pasid of device 3286 * @domain: the iommu domain. 3287 * @dev: the attached device. 3288 * @pasid: the pasid of the device. 3289 * 3290 * Return: 0 on success, or an error. 3291 */ 3292 int iommu_attach_device_pasid(struct iommu_domain *domain, 3293 struct device *dev, ioasid_t pasid) 3294 { 3295 struct iommu_group *group; 3296 void *curr; 3297 int ret; 3298 3299 if (!domain->ops->set_dev_pasid) 3300 return -EOPNOTSUPP; 3301 3302 group = iommu_group_get(dev); 3303 if (!group) 3304 return -ENODEV; 3305 3306 mutex_lock(&group->mutex); 3307 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); 3308 if (curr) { 3309 ret = xa_err(curr) ? : -EBUSY; 3310 goto out_unlock; 3311 } 3312 3313 ret = __iommu_set_group_pasid(domain, group, pasid); 3314 if (ret) { 3315 __iommu_remove_group_pasid(group, pasid); 3316 xa_erase(&group->pasid_array, pasid); 3317 } 3318 out_unlock: 3319 mutex_unlock(&group->mutex); 3320 iommu_group_put(group); 3321 3322 return ret; 3323 } 3324 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); 3325 3326 /* 3327 * iommu_detach_device_pasid() - Detach the domain from pasid of device 3328 * @domain: the iommu domain. 3329 * @dev: the attached device. 3330 * @pasid: the pasid of the device. 3331 * 3332 * The @domain must have been attached to @pasid of the @dev with 3333 * iommu_attach_device_pasid(). 3334 */ 3335 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, 3336 ioasid_t pasid) 3337 { 3338 struct iommu_group *group = iommu_group_get(dev); 3339 3340 mutex_lock(&group->mutex); 3341 __iommu_remove_group_pasid(group, pasid); 3342 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); 3343 mutex_unlock(&group->mutex); 3344 3345 iommu_group_put(group); 3346 } 3347 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); 3348 3349 /* 3350 * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev 3351 * @dev: the queried device 3352 * @pasid: the pasid of the device 3353 * @type: matched domain type, 0 for any match 3354 * 3355 * This is a variant of iommu_get_domain_for_dev(). It returns the existing 3356 * domain attached to pasid of a device. Callers must hold a lock around this 3357 * function, and both iommu_attach/detach_dev_pasid() whenever a domain of 3358 * type is being manipulated. This API does not internally resolve races with 3359 * attach/detach. 3360 * 3361 * Return: attached domain on success, NULL otherwise. 3362 */ 3363 struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, 3364 ioasid_t pasid, 3365 unsigned int type) 3366 { 3367 struct iommu_domain *domain; 3368 struct iommu_group *group; 3369 3370 group = iommu_group_get(dev); 3371 if (!group) 3372 return NULL; 3373 3374 xa_lock(&group->pasid_array); 3375 domain = xa_load(&group->pasid_array, pasid); 3376 if (type && domain && domain->type != type) 3377 domain = ERR_PTR(-EBUSY); 3378 xa_unlock(&group->pasid_array); 3379 iommu_group_put(group); 3380 3381 return domain; 3382 } 3383 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); 3384 3385 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 3386 struct mm_struct *mm) 3387 { 3388 const struct iommu_ops *ops = dev_iommu_ops(dev); 3389 struct iommu_domain *domain; 3390 3391 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); 3392 if (!domain) 3393 return NULL; 3394 3395 domain->type = IOMMU_DOMAIN_SVA; 3396 mmgrab(mm); 3397 domain->mm = mm; 3398 domain->iopf_handler = iommu_sva_handle_iopf; 3399 domain->fault_data = mm; 3400 3401 return domain; 3402 } 3403