1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/amba/bus.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/bits.h> 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/host1x_context_bus.h> 20 #include <linux/iommu.h> 21 #include <linux/idr.h> 22 #include <linux/err.h> 23 #include <linux/pci.h> 24 #include <linux/pci-ats.h> 25 #include <linux/bitops.h> 26 #include <linux/platform_device.h> 27 #include <linux/property.h> 28 #include <linux/fsl/mc.h> 29 #include <linux/module.h> 30 #include <linux/cc_platform.h> 31 #include <linux/cdx/cdx_bus.h> 32 #include <trace/events/iommu.h> 33 #include <linux/sched/mm.h> 34 #include <linux/msi.h> 35 36 #include "dma-iommu.h" 37 38 #include "iommu-sva.h" 39 40 static struct kset *iommu_group_kset; 41 static DEFINE_IDA(iommu_group_ida); 42 43 static unsigned int iommu_def_domain_type __read_mostly; 44 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); 45 static u32 iommu_cmd_line __read_mostly; 46 47 struct iommu_group { 48 struct kobject kobj; 49 struct kobject *devices_kobj; 50 struct list_head devices; 51 struct xarray pasid_array; 52 struct mutex mutex; 53 void *iommu_data; 54 void (*iommu_data_release)(void *iommu_data); 55 char *name; 56 int id; 57 struct iommu_domain *default_domain; 58 struct iommu_domain *blocking_domain; 59 struct iommu_domain *domain; 60 struct list_head entry; 61 unsigned int owner_cnt; 62 void *owner; 63 }; 64 65 struct group_device { 66 struct list_head list; 67 struct device *dev; 68 char *name; 69 }; 70 71 /* Iterate over each struct group_device in a struct iommu_group */ 72 #define for_each_group_device(group, pos) \ 73 list_for_each_entry(pos, &(group)->devices, list) 74 75 struct iommu_group_attribute { 76 struct attribute attr; 77 ssize_t (*show)(struct iommu_group *group, char *buf); 78 ssize_t (*store)(struct iommu_group *group, 79 const char *buf, size_t count); 80 }; 81 82 static const char * const iommu_group_resv_type_string[] = { 83 [IOMMU_RESV_DIRECT] = "direct", 84 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 85 [IOMMU_RESV_RESERVED] = "reserved", 86 [IOMMU_RESV_MSI] = "msi", 87 [IOMMU_RESV_SW_MSI] = "msi", 88 }; 89 90 #define IOMMU_CMD_LINE_DMA_API BIT(0) 91 #define IOMMU_CMD_LINE_STRICT BIT(1) 92 93 static int iommu_bus_notifier(struct notifier_block *nb, 94 unsigned long action, void *data); 95 static void iommu_release_device(struct device *dev); 96 static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus, 97 unsigned type); 98 static int __iommu_attach_device(struct iommu_domain *domain, 99 struct device *dev); 100 static int __iommu_attach_group(struct iommu_domain *domain, 101 struct iommu_group *group); 102 103 enum { 104 IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0, 105 }; 106 107 static int __iommu_device_set_domain(struct iommu_group *group, 108 struct device *dev, 109 struct iommu_domain *new_domain, 110 unsigned int flags); 111 static int __iommu_group_set_domain_internal(struct iommu_group *group, 112 struct iommu_domain *new_domain, 113 unsigned int flags); 114 static int __iommu_group_set_domain(struct iommu_group *group, 115 struct iommu_domain *new_domain) 116 { 117 return __iommu_group_set_domain_internal(group, new_domain, 0); 118 } 119 static void __iommu_group_set_domain_nofail(struct iommu_group *group, 120 struct iommu_domain *new_domain) 121 { 122 WARN_ON(__iommu_group_set_domain_internal( 123 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); 124 } 125 126 static int iommu_setup_default_domain(struct iommu_group *group, 127 int target_type); 128 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 129 struct device *dev); 130 static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 131 static ssize_t iommu_group_store_type(struct iommu_group *group, 132 const char *buf, size_t count); 133 134 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 135 struct iommu_group_attribute iommu_group_attr_##_name = \ 136 __ATTR(_name, _mode, _show, _store) 137 138 #define to_iommu_group_attr(_attr) \ 139 container_of(_attr, struct iommu_group_attribute, attr) 140 #define to_iommu_group(_kobj) \ 141 container_of(_kobj, struct iommu_group, kobj) 142 143 static LIST_HEAD(iommu_device_list); 144 static DEFINE_SPINLOCK(iommu_device_lock); 145 146 static struct bus_type * const iommu_buses[] = { 147 &platform_bus_type, 148 #ifdef CONFIG_PCI 149 &pci_bus_type, 150 #endif 151 #ifdef CONFIG_ARM_AMBA 152 &amba_bustype, 153 #endif 154 #ifdef CONFIG_FSL_MC_BUS 155 &fsl_mc_bus_type, 156 #endif 157 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS 158 &host1x_context_device_bus_type, 159 #endif 160 #ifdef CONFIG_CDX_BUS 161 &cdx_bus_type, 162 #endif 163 }; 164 165 /* 166 * Use a function instead of an array here because the domain-type is a 167 * bit-field, so an array would waste memory. 168 */ 169 static const char *iommu_domain_type_str(unsigned int t) 170 { 171 switch (t) { 172 case IOMMU_DOMAIN_BLOCKED: 173 return "Blocked"; 174 case IOMMU_DOMAIN_IDENTITY: 175 return "Passthrough"; 176 case IOMMU_DOMAIN_UNMANAGED: 177 return "Unmanaged"; 178 case IOMMU_DOMAIN_DMA: 179 case IOMMU_DOMAIN_DMA_FQ: 180 return "Translated"; 181 default: 182 return "Unknown"; 183 } 184 } 185 186 static int __init iommu_subsys_init(void) 187 { 188 struct notifier_block *nb; 189 190 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 191 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 192 iommu_set_default_passthrough(false); 193 else 194 iommu_set_default_translated(false); 195 196 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 197 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 198 iommu_set_default_translated(false); 199 } 200 } 201 202 if (!iommu_default_passthrough() && !iommu_dma_strict) 203 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; 204 205 pr_info("Default domain type: %s%s\n", 206 iommu_domain_type_str(iommu_def_domain_type), 207 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 208 " (set via kernel command line)" : ""); 209 210 if (!iommu_default_passthrough()) 211 pr_info("DMA domain TLB invalidation policy: %s mode%s\n", 212 iommu_dma_strict ? "strict" : "lazy", 213 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? 214 " (set via kernel command line)" : ""); 215 216 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); 217 if (!nb) 218 return -ENOMEM; 219 220 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { 221 nb[i].notifier_call = iommu_bus_notifier; 222 bus_register_notifier(iommu_buses[i], &nb[i]); 223 } 224 225 return 0; 226 } 227 subsys_initcall(iommu_subsys_init); 228 229 static int remove_iommu_group(struct device *dev, void *data) 230 { 231 if (dev->iommu && dev->iommu->iommu_dev == data) 232 iommu_release_device(dev); 233 234 return 0; 235 } 236 237 /** 238 * iommu_device_register() - Register an IOMMU hardware instance 239 * @iommu: IOMMU handle for the instance 240 * @ops: IOMMU ops to associate with the instance 241 * @hwdev: (optional) actual instance device, used for fwnode lookup 242 * 243 * Return: 0 on success, or an error. 244 */ 245 int iommu_device_register(struct iommu_device *iommu, 246 const struct iommu_ops *ops, struct device *hwdev) 247 { 248 int err = 0; 249 250 /* We need to be able to take module references appropriately */ 251 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 252 return -EINVAL; 253 /* 254 * Temporarily enforce global restriction to a single driver. This was 255 * already the de-facto behaviour, since any possible combination of 256 * existing drivers would compete for at least the PCI or platform bus. 257 */ 258 if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops) 259 return -EBUSY; 260 261 iommu->ops = ops; 262 if (hwdev) 263 iommu->fwnode = dev_fwnode(hwdev); 264 265 spin_lock(&iommu_device_lock); 266 list_add_tail(&iommu->list, &iommu_device_list); 267 spin_unlock(&iommu_device_lock); 268 269 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) { 270 iommu_buses[i]->iommu_ops = ops; 271 err = bus_iommu_probe(iommu_buses[i]); 272 } 273 if (err) 274 iommu_device_unregister(iommu); 275 return err; 276 } 277 EXPORT_SYMBOL_GPL(iommu_device_register); 278 279 void iommu_device_unregister(struct iommu_device *iommu) 280 { 281 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) 282 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); 283 284 spin_lock(&iommu_device_lock); 285 list_del(&iommu->list); 286 spin_unlock(&iommu_device_lock); 287 } 288 EXPORT_SYMBOL_GPL(iommu_device_unregister); 289 290 static struct dev_iommu *dev_iommu_get(struct device *dev) 291 { 292 struct dev_iommu *param = dev->iommu; 293 294 if (param) 295 return param; 296 297 param = kzalloc(sizeof(*param), GFP_KERNEL); 298 if (!param) 299 return NULL; 300 301 mutex_init(¶m->lock); 302 dev->iommu = param; 303 return param; 304 } 305 306 static void dev_iommu_free(struct device *dev) 307 { 308 struct dev_iommu *param = dev->iommu; 309 310 dev->iommu = NULL; 311 if (param->fwspec) { 312 fwnode_handle_put(param->fwspec->iommu_fwnode); 313 kfree(param->fwspec); 314 } 315 kfree(param); 316 } 317 318 static u32 dev_iommu_get_max_pasids(struct device *dev) 319 { 320 u32 max_pasids = 0, bits = 0; 321 int ret; 322 323 if (dev_is_pci(dev)) { 324 ret = pci_max_pasids(to_pci_dev(dev)); 325 if (ret > 0) 326 max_pasids = ret; 327 } else { 328 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); 329 if (!ret) 330 max_pasids = 1UL << bits; 331 } 332 333 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); 334 } 335 336 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 337 { 338 const struct iommu_ops *ops = dev->bus->iommu_ops; 339 struct iommu_device *iommu_dev; 340 struct iommu_group *group; 341 static DEFINE_MUTEX(iommu_probe_device_lock); 342 int ret; 343 344 if (!ops) 345 return -ENODEV; 346 /* 347 * Serialise to avoid races between IOMMU drivers registering in 348 * parallel and/or the "replay" calls from ACPI/OF code via client 349 * driver probe. Once the latter have been cleaned up we should 350 * probably be able to use device_lock() here to minimise the scope, 351 * but for now enforcing a simple global ordering is fine. 352 */ 353 mutex_lock(&iommu_probe_device_lock); 354 355 /* Device is probed already if in a group */ 356 if (dev->iommu_group) { 357 ret = 0; 358 goto out_unlock; 359 } 360 361 if (!dev_iommu_get(dev)) { 362 ret = -ENOMEM; 363 goto out_unlock; 364 } 365 366 if (!try_module_get(ops->owner)) { 367 ret = -EINVAL; 368 goto err_free; 369 } 370 371 iommu_dev = ops->probe_device(dev); 372 if (IS_ERR(iommu_dev)) { 373 ret = PTR_ERR(iommu_dev); 374 goto out_module_put; 375 } 376 377 dev->iommu->iommu_dev = iommu_dev; 378 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 379 if (ops->is_attach_deferred) 380 dev->iommu->attach_deferred = ops->is_attach_deferred(dev); 381 382 group = iommu_group_get_for_dev(dev); 383 if (IS_ERR(group)) { 384 ret = PTR_ERR(group); 385 goto out_release; 386 } 387 388 mutex_lock(&group->mutex); 389 if (group_list && !group->default_domain && list_empty(&group->entry)) 390 list_add_tail(&group->entry, group_list); 391 mutex_unlock(&group->mutex); 392 iommu_group_put(group); 393 394 mutex_unlock(&iommu_probe_device_lock); 395 iommu_device_link(iommu_dev, dev); 396 397 return 0; 398 399 out_release: 400 if (ops->release_device) 401 ops->release_device(dev); 402 403 out_module_put: 404 module_put(ops->owner); 405 406 err_free: 407 dev_iommu_free(dev); 408 409 out_unlock: 410 mutex_unlock(&iommu_probe_device_lock); 411 412 return ret; 413 } 414 415 int iommu_probe_device(struct device *dev) 416 { 417 const struct iommu_ops *ops; 418 struct iommu_group *group; 419 int ret; 420 421 ret = __iommu_probe_device(dev, NULL); 422 if (ret) 423 goto err_out; 424 425 group = iommu_group_get(dev); 426 if (!group) { 427 ret = -ENODEV; 428 goto err_release; 429 } 430 431 mutex_lock(&group->mutex); 432 433 if (group->default_domain) 434 iommu_create_device_direct_mappings(group->default_domain, dev); 435 436 if (group->domain) { 437 ret = __iommu_device_set_domain(group, dev, group->domain, 0); 438 if (ret) 439 goto err_unlock; 440 } else if (!group->default_domain) { 441 ret = iommu_setup_default_domain(group, 0); 442 if (ret) 443 goto err_unlock; 444 } 445 446 mutex_unlock(&group->mutex); 447 iommu_group_put(group); 448 449 ops = dev_iommu_ops(dev); 450 if (ops->probe_finalize) 451 ops->probe_finalize(dev); 452 453 return 0; 454 455 err_unlock: 456 mutex_unlock(&group->mutex); 457 iommu_group_put(group); 458 err_release: 459 iommu_release_device(dev); 460 461 err_out: 462 return ret; 463 464 } 465 466 /* 467 * Remove a device from a group's device list and return the group device 468 * if successful. 469 */ 470 static struct group_device * 471 __iommu_group_remove_device(struct iommu_group *group, struct device *dev) 472 { 473 struct group_device *device; 474 475 lockdep_assert_held(&group->mutex); 476 for_each_group_device(group, device) { 477 if (device->dev == dev) { 478 list_del(&device->list); 479 return device; 480 } 481 } 482 483 return NULL; 484 } 485 486 /* 487 * Release a device from its group and decrements the iommu group reference 488 * count. 489 */ 490 static void __iommu_group_release_device(struct iommu_group *group, 491 struct group_device *grp_dev) 492 { 493 struct device *dev = grp_dev->dev; 494 495 sysfs_remove_link(group->devices_kobj, grp_dev->name); 496 sysfs_remove_link(&dev->kobj, "iommu_group"); 497 498 trace_remove_device_from_group(group->id, dev); 499 500 kfree(grp_dev->name); 501 kfree(grp_dev); 502 dev->iommu_group = NULL; 503 kobject_put(group->devices_kobj); 504 } 505 506 static void iommu_release_device(struct device *dev) 507 { 508 struct iommu_group *group = dev->iommu_group; 509 struct group_device *device; 510 const struct iommu_ops *ops; 511 512 if (!dev->iommu || !group) 513 return; 514 515 iommu_device_unlink(dev->iommu->iommu_dev, dev); 516 517 mutex_lock(&group->mutex); 518 device = __iommu_group_remove_device(group, dev); 519 520 /* 521 * If the group has become empty then ownership must have been released, 522 * and the current domain must be set back to NULL or the default 523 * domain. 524 */ 525 if (list_empty(&group->devices)) 526 WARN_ON(group->owner_cnt || 527 group->domain != group->default_domain); 528 529 /* 530 * release_device() must stop using any attached domain on the device. 531 * If there are still other devices in the group they are not effected 532 * by this callback. 533 * 534 * The IOMMU driver must set the device to either an identity or 535 * blocking translation and stop using any domain pointer, as it is 536 * going to be freed. 537 */ 538 ops = dev_iommu_ops(dev); 539 if (ops->release_device) 540 ops->release_device(dev); 541 mutex_unlock(&group->mutex); 542 543 if (device) 544 __iommu_group_release_device(group, device); 545 546 module_put(ops->owner); 547 dev_iommu_free(dev); 548 } 549 550 static int __init iommu_set_def_domain_type(char *str) 551 { 552 bool pt; 553 int ret; 554 555 ret = kstrtobool(str, &pt); 556 if (ret) 557 return ret; 558 559 if (pt) 560 iommu_set_default_passthrough(true); 561 else 562 iommu_set_default_translated(true); 563 564 return 0; 565 } 566 early_param("iommu.passthrough", iommu_set_def_domain_type); 567 568 static int __init iommu_dma_setup(char *str) 569 { 570 int ret = kstrtobool(str, &iommu_dma_strict); 571 572 if (!ret) 573 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 574 return ret; 575 } 576 early_param("iommu.strict", iommu_dma_setup); 577 578 void iommu_set_dma_strict(void) 579 { 580 iommu_dma_strict = true; 581 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) 582 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 583 } 584 585 static ssize_t iommu_group_attr_show(struct kobject *kobj, 586 struct attribute *__attr, char *buf) 587 { 588 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 589 struct iommu_group *group = to_iommu_group(kobj); 590 ssize_t ret = -EIO; 591 592 if (attr->show) 593 ret = attr->show(group, buf); 594 return ret; 595 } 596 597 static ssize_t iommu_group_attr_store(struct kobject *kobj, 598 struct attribute *__attr, 599 const char *buf, size_t count) 600 { 601 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 602 struct iommu_group *group = to_iommu_group(kobj); 603 ssize_t ret = -EIO; 604 605 if (attr->store) 606 ret = attr->store(group, buf, count); 607 return ret; 608 } 609 610 static const struct sysfs_ops iommu_group_sysfs_ops = { 611 .show = iommu_group_attr_show, 612 .store = iommu_group_attr_store, 613 }; 614 615 static int iommu_group_create_file(struct iommu_group *group, 616 struct iommu_group_attribute *attr) 617 { 618 return sysfs_create_file(&group->kobj, &attr->attr); 619 } 620 621 static void iommu_group_remove_file(struct iommu_group *group, 622 struct iommu_group_attribute *attr) 623 { 624 sysfs_remove_file(&group->kobj, &attr->attr); 625 } 626 627 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 628 { 629 return sysfs_emit(buf, "%s\n", group->name); 630 } 631 632 /** 633 * iommu_insert_resv_region - Insert a new region in the 634 * list of reserved regions. 635 * @new: new region to insert 636 * @regions: list of regions 637 * 638 * Elements are sorted by start address and overlapping segments 639 * of the same type are merged. 640 */ 641 static int iommu_insert_resv_region(struct iommu_resv_region *new, 642 struct list_head *regions) 643 { 644 struct iommu_resv_region *iter, *tmp, *nr, *top; 645 LIST_HEAD(stack); 646 647 nr = iommu_alloc_resv_region(new->start, new->length, 648 new->prot, new->type, GFP_KERNEL); 649 if (!nr) 650 return -ENOMEM; 651 652 /* First add the new element based on start address sorting */ 653 list_for_each_entry(iter, regions, list) { 654 if (nr->start < iter->start || 655 (nr->start == iter->start && nr->type <= iter->type)) 656 break; 657 } 658 list_add_tail(&nr->list, &iter->list); 659 660 /* Merge overlapping segments of type nr->type in @regions, if any */ 661 list_for_each_entry_safe(iter, tmp, regions, list) { 662 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 663 664 /* no merge needed on elements of different types than @new */ 665 if (iter->type != new->type) { 666 list_move_tail(&iter->list, &stack); 667 continue; 668 } 669 670 /* look for the last stack element of same type as @iter */ 671 list_for_each_entry_reverse(top, &stack, list) 672 if (top->type == iter->type) 673 goto check_overlap; 674 675 list_move_tail(&iter->list, &stack); 676 continue; 677 678 check_overlap: 679 top_end = top->start + top->length - 1; 680 681 if (iter->start > top_end + 1) { 682 list_move_tail(&iter->list, &stack); 683 } else { 684 top->length = max(top_end, iter_end) - top->start + 1; 685 list_del(&iter->list); 686 kfree(iter); 687 } 688 } 689 list_splice(&stack, regions); 690 return 0; 691 } 692 693 static int 694 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 695 struct list_head *group_resv_regions) 696 { 697 struct iommu_resv_region *entry; 698 int ret = 0; 699 700 list_for_each_entry(entry, dev_resv_regions, list) { 701 ret = iommu_insert_resv_region(entry, group_resv_regions); 702 if (ret) 703 break; 704 } 705 return ret; 706 } 707 708 int iommu_get_group_resv_regions(struct iommu_group *group, 709 struct list_head *head) 710 { 711 struct group_device *device; 712 int ret = 0; 713 714 mutex_lock(&group->mutex); 715 for_each_group_device(group, device) { 716 struct list_head dev_resv_regions; 717 718 /* 719 * Non-API groups still expose reserved_regions in sysfs, 720 * so filter out calls that get here that way. 721 */ 722 if (!device->dev->iommu) 723 break; 724 725 INIT_LIST_HEAD(&dev_resv_regions); 726 iommu_get_resv_regions(device->dev, &dev_resv_regions); 727 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 728 iommu_put_resv_regions(device->dev, &dev_resv_regions); 729 if (ret) 730 break; 731 } 732 mutex_unlock(&group->mutex); 733 return ret; 734 } 735 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 736 737 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 738 char *buf) 739 { 740 struct iommu_resv_region *region, *next; 741 struct list_head group_resv_regions; 742 int offset = 0; 743 744 INIT_LIST_HEAD(&group_resv_regions); 745 iommu_get_group_resv_regions(group, &group_resv_regions); 746 747 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 748 offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n", 749 (long long)region->start, 750 (long long)(region->start + 751 region->length - 1), 752 iommu_group_resv_type_string[region->type]); 753 kfree(region); 754 } 755 756 return offset; 757 } 758 759 static ssize_t iommu_group_show_type(struct iommu_group *group, 760 char *buf) 761 { 762 char *type = "unknown"; 763 764 mutex_lock(&group->mutex); 765 if (group->default_domain) { 766 switch (group->default_domain->type) { 767 case IOMMU_DOMAIN_BLOCKED: 768 type = "blocked"; 769 break; 770 case IOMMU_DOMAIN_IDENTITY: 771 type = "identity"; 772 break; 773 case IOMMU_DOMAIN_UNMANAGED: 774 type = "unmanaged"; 775 break; 776 case IOMMU_DOMAIN_DMA: 777 type = "DMA"; 778 break; 779 case IOMMU_DOMAIN_DMA_FQ: 780 type = "DMA-FQ"; 781 break; 782 } 783 } 784 mutex_unlock(&group->mutex); 785 786 return sysfs_emit(buf, "%s\n", type); 787 } 788 789 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 790 791 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 792 iommu_group_show_resv_regions, NULL); 793 794 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 795 iommu_group_store_type); 796 797 static void iommu_group_release(struct kobject *kobj) 798 { 799 struct iommu_group *group = to_iommu_group(kobj); 800 801 pr_debug("Releasing group %d\n", group->id); 802 803 if (group->iommu_data_release) 804 group->iommu_data_release(group->iommu_data); 805 806 ida_free(&iommu_group_ida, group->id); 807 808 if (group->default_domain) 809 iommu_domain_free(group->default_domain); 810 if (group->blocking_domain) 811 iommu_domain_free(group->blocking_domain); 812 813 kfree(group->name); 814 kfree(group); 815 } 816 817 static const struct kobj_type iommu_group_ktype = { 818 .sysfs_ops = &iommu_group_sysfs_ops, 819 .release = iommu_group_release, 820 }; 821 822 /** 823 * iommu_group_alloc - Allocate a new group 824 * 825 * This function is called by an iommu driver to allocate a new iommu 826 * group. The iommu group represents the minimum granularity of the iommu. 827 * Upon successful return, the caller holds a reference to the supplied 828 * group in order to hold the group until devices are added. Use 829 * iommu_group_put() to release this extra reference count, allowing the 830 * group to be automatically reclaimed once it has no devices or external 831 * references. 832 */ 833 struct iommu_group *iommu_group_alloc(void) 834 { 835 struct iommu_group *group; 836 int ret; 837 838 group = kzalloc(sizeof(*group), GFP_KERNEL); 839 if (!group) 840 return ERR_PTR(-ENOMEM); 841 842 group->kobj.kset = iommu_group_kset; 843 mutex_init(&group->mutex); 844 INIT_LIST_HEAD(&group->devices); 845 INIT_LIST_HEAD(&group->entry); 846 xa_init(&group->pasid_array); 847 848 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); 849 if (ret < 0) { 850 kfree(group); 851 return ERR_PTR(ret); 852 } 853 group->id = ret; 854 855 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 856 NULL, "%d", group->id); 857 if (ret) { 858 kobject_put(&group->kobj); 859 return ERR_PTR(ret); 860 } 861 862 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 863 if (!group->devices_kobj) { 864 kobject_put(&group->kobj); /* triggers .release & free */ 865 return ERR_PTR(-ENOMEM); 866 } 867 868 /* 869 * The devices_kobj holds a reference on the group kobject, so 870 * as long as that exists so will the group. We can therefore 871 * use the devices_kobj for reference counting. 872 */ 873 kobject_put(&group->kobj); 874 875 ret = iommu_group_create_file(group, 876 &iommu_group_attr_reserved_regions); 877 if (ret) { 878 kobject_put(group->devices_kobj); 879 return ERR_PTR(ret); 880 } 881 882 ret = iommu_group_create_file(group, &iommu_group_attr_type); 883 if (ret) { 884 kobject_put(group->devices_kobj); 885 return ERR_PTR(ret); 886 } 887 888 pr_debug("Allocated group %d\n", group->id); 889 890 return group; 891 } 892 EXPORT_SYMBOL_GPL(iommu_group_alloc); 893 894 /** 895 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 896 * @group: the group 897 * 898 * iommu drivers can store data in the group for use when doing iommu 899 * operations. This function provides a way to retrieve it. Caller 900 * should hold a group reference. 901 */ 902 void *iommu_group_get_iommudata(struct iommu_group *group) 903 { 904 return group->iommu_data; 905 } 906 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 907 908 /** 909 * iommu_group_set_iommudata - set iommu_data for a group 910 * @group: the group 911 * @iommu_data: new data 912 * @release: release function for iommu_data 913 * 914 * iommu drivers can store data in the group for use when doing iommu 915 * operations. This function provides a way to set the data after 916 * the group has been allocated. Caller should hold a group reference. 917 */ 918 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 919 void (*release)(void *iommu_data)) 920 { 921 group->iommu_data = iommu_data; 922 group->iommu_data_release = release; 923 } 924 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 925 926 /** 927 * iommu_group_set_name - set name for a group 928 * @group: the group 929 * @name: name 930 * 931 * Allow iommu driver to set a name for a group. When set it will 932 * appear in a name attribute file under the group in sysfs. 933 */ 934 int iommu_group_set_name(struct iommu_group *group, const char *name) 935 { 936 int ret; 937 938 if (group->name) { 939 iommu_group_remove_file(group, &iommu_group_attr_name); 940 kfree(group->name); 941 group->name = NULL; 942 if (!name) 943 return 0; 944 } 945 946 group->name = kstrdup(name, GFP_KERNEL); 947 if (!group->name) 948 return -ENOMEM; 949 950 ret = iommu_group_create_file(group, &iommu_group_attr_name); 951 if (ret) { 952 kfree(group->name); 953 group->name = NULL; 954 return ret; 955 } 956 957 return 0; 958 } 959 EXPORT_SYMBOL_GPL(iommu_group_set_name); 960 961 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 962 struct device *dev) 963 { 964 struct iommu_resv_region *entry; 965 struct list_head mappings; 966 unsigned long pg_size; 967 int ret = 0; 968 969 if (!iommu_is_dma_domain(domain)) 970 return 0; 971 972 BUG_ON(!domain->pgsize_bitmap); 973 974 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 975 INIT_LIST_HEAD(&mappings); 976 977 iommu_get_resv_regions(dev, &mappings); 978 979 /* We need to consider overlapping regions for different devices */ 980 list_for_each_entry(entry, &mappings, list) { 981 dma_addr_t start, end, addr; 982 size_t map_size = 0; 983 984 start = ALIGN(entry->start, pg_size); 985 end = ALIGN(entry->start + entry->length, pg_size); 986 987 if (entry->type != IOMMU_RESV_DIRECT && 988 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 989 continue; 990 991 for (addr = start; addr <= end; addr += pg_size) { 992 phys_addr_t phys_addr; 993 994 if (addr == end) 995 goto map_end; 996 997 phys_addr = iommu_iova_to_phys(domain, addr); 998 if (!phys_addr) { 999 map_size += pg_size; 1000 continue; 1001 } 1002 1003 map_end: 1004 if (map_size) { 1005 ret = iommu_map(domain, addr - map_size, 1006 addr - map_size, map_size, 1007 entry->prot, GFP_KERNEL); 1008 if (ret) 1009 goto out; 1010 map_size = 0; 1011 } 1012 } 1013 1014 } 1015 1016 iommu_flush_iotlb_all(domain); 1017 1018 out: 1019 iommu_put_resv_regions(dev, &mappings); 1020 1021 return ret; 1022 } 1023 1024 /** 1025 * iommu_group_add_device - add a device to an iommu group 1026 * @group: the group into which to add the device (reference should be held) 1027 * @dev: the device 1028 * 1029 * This function is called by an iommu driver to add a device into a 1030 * group. Adding a device increments the group reference count. 1031 */ 1032 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 1033 { 1034 int ret, i = 0; 1035 struct group_device *device; 1036 1037 device = kzalloc(sizeof(*device), GFP_KERNEL); 1038 if (!device) 1039 return -ENOMEM; 1040 1041 device->dev = dev; 1042 1043 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 1044 if (ret) 1045 goto err_free_device; 1046 1047 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 1048 rename: 1049 if (!device->name) { 1050 ret = -ENOMEM; 1051 goto err_remove_link; 1052 } 1053 1054 ret = sysfs_create_link_nowarn(group->devices_kobj, 1055 &dev->kobj, device->name); 1056 if (ret) { 1057 if (ret == -EEXIST && i >= 0) { 1058 /* 1059 * Account for the slim chance of collision 1060 * and append an instance to the name. 1061 */ 1062 kfree(device->name); 1063 device->name = kasprintf(GFP_KERNEL, "%s.%d", 1064 kobject_name(&dev->kobj), i++); 1065 goto rename; 1066 } 1067 goto err_free_name; 1068 } 1069 1070 kobject_get(group->devices_kobj); 1071 1072 dev->iommu_group = group; 1073 1074 mutex_lock(&group->mutex); 1075 list_add_tail(&device->list, &group->devices); 1076 mutex_unlock(&group->mutex); 1077 trace_add_device_to_group(group->id, dev); 1078 1079 dev_info(dev, "Adding to iommu group %d\n", group->id); 1080 1081 return 0; 1082 1083 err_free_name: 1084 kfree(device->name); 1085 err_remove_link: 1086 sysfs_remove_link(&dev->kobj, "iommu_group"); 1087 err_free_device: 1088 kfree(device); 1089 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 1090 return ret; 1091 } 1092 EXPORT_SYMBOL_GPL(iommu_group_add_device); 1093 1094 /** 1095 * iommu_group_remove_device - remove a device from it's current group 1096 * @dev: device to be removed 1097 * 1098 * This function is called by an iommu driver to remove the device from 1099 * it's current group. This decrements the iommu group reference count. 1100 */ 1101 void iommu_group_remove_device(struct device *dev) 1102 { 1103 struct iommu_group *group = dev->iommu_group; 1104 struct group_device *device; 1105 1106 if (!group) 1107 return; 1108 1109 dev_info(dev, "Removing from iommu group %d\n", group->id); 1110 1111 mutex_lock(&group->mutex); 1112 device = __iommu_group_remove_device(group, dev); 1113 mutex_unlock(&group->mutex); 1114 1115 if (device) 1116 __iommu_group_release_device(group, device); 1117 } 1118 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 1119 1120 /** 1121 * iommu_group_for_each_dev - iterate over each device in the group 1122 * @group: the group 1123 * @data: caller opaque data to be passed to callback function 1124 * @fn: caller supplied callback function 1125 * 1126 * This function is called by group users to iterate over group devices. 1127 * Callers should hold a reference count to the group during callback. 1128 * The group->mutex is held across callbacks, which will block calls to 1129 * iommu_group_add/remove_device. 1130 */ 1131 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 1132 int (*fn)(struct device *, void *)) 1133 { 1134 struct group_device *device; 1135 int ret = 0; 1136 1137 mutex_lock(&group->mutex); 1138 for_each_group_device(group, device) { 1139 ret = fn(device->dev, data); 1140 if (ret) 1141 break; 1142 } 1143 mutex_unlock(&group->mutex); 1144 1145 return ret; 1146 } 1147 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1148 1149 /** 1150 * iommu_group_get - Return the group for a device and increment reference 1151 * @dev: get the group that this device belongs to 1152 * 1153 * This function is called by iommu drivers and users to get the group 1154 * for the specified device. If found, the group is returned and the group 1155 * reference in incremented, else NULL. 1156 */ 1157 struct iommu_group *iommu_group_get(struct device *dev) 1158 { 1159 struct iommu_group *group = dev->iommu_group; 1160 1161 if (group) 1162 kobject_get(group->devices_kobj); 1163 1164 return group; 1165 } 1166 EXPORT_SYMBOL_GPL(iommu_group_get); 1167 1168 /** 1169 * iommu_group_ref_get - Increment reference on a group 1170 * @group: the group to use, must not be NULL 1171 * 1172 * This function is called by iommu drivers to take additional references on an 1173 * existing group. Returns the given group for convenience. 1174 */ 1175 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1176 { 1177 kobject_get(group->devices_kobj); 1178 return group; 1179 } 1180 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1181 1182 /** 1183 * iommu_group_put - Decrement group reference 1184 * @group: the group to use 1185 * 1186 * This function is called by iommu drivers and users to release the 1187 * iommu group. Once the reference count is zero, the group is released. 1188 */ 1189 void iommu_group_put(struct iommu_group *group) 1190 { 1191 if (group) 1192 kobject_put(group->devices_kobj); 1193 } 1194 EXPORT_SYMBOL_GPL(iommu_group_put); 1195 1196 /** 1197 * iommu_register_device_fault_handler() - Register a device fault handler 1198 * @dev: the device 1199 * @handler: the fault handler 1200 * @data: private data passed as argument to the handler 1201 * 1202 * When an IOMMU fault event is received, this handler gets called with the 1203 * fault event and data as argument. The handler should return 0 on success. If 1204 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1205 * complete the fault by calling iommu_page_response() with one of the following 1206 * response code: 1207 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1208 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1209 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1210 * page faults if possible. 1211 * 1212 * Return 0 if the fault handler was installed successfully, or an error. 1213 */ 1214 int iommu_register_device_fault_handler(struct device *dev, 1215 iommu_dev_fault_handler_t handler, 1216 void *data) 1217 { 1218 struct dev_iommu *param = dev->iommu; 1219 int ret = 0; 1220 1221 if (!param) 1222 return -EINVAL; 1223 1224 mutex_lock(¶m->lock); 1225 /* Only allow one fault handler registered for each device */ 1226 if (param->fault_param) { 1227 ret = -EBUSY; 1228 goto done_unlock; 1229 } 1230 1231 get_device(dev); 1232 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1233 if (!param->fault_param) { 1234 put_device(dev); 1235 ret = -ENOMEM; 1236 goto done_unlock; 1237 } 1238 param->fault_param->handler = handler; 1239 param->fault_param->data = data; 1240 mutex_init(¶m->fault_param->lock); 1241 INIT_LIST_HEAD(¶m->fault_param->faults); 1242 1243 done_unlock: 1244 mutex_unlock(¶m->lock); 1245 1246 return ret; 1247 } 1248 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1249 1250 /** 1251 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1252 * @dev: the device 1253 * 1254 * Remove the device fault handler installed with 1255 * iommu_register_device_fault_handler(). 1256 * 1257 * Return 0 on success, or an error. 1258 */ 1259 int iommu_unregister_device_fault_handler(struct device *dev) 1260 { 1261 struct dev_iommu *param = dev->iommu; 1262 int ret = 0; 1263 1264 if (!param) 1265 return -EINVAL; 1266 1267 mutex_lock(¶m->lock); 1268 1269 if (!param->fault_param) 1270 goto unlock; 1271 1272 /* we cannot unregister handler if there are pending faults */ 1273 if (!list_empty(¶m->fault_param->faults)) { 1274 ret = -EBUSY; 1275 goto unlock; 1276 } 1277 1278 kfree(param->fault_param); 1279 param->fault_param = NULL; 1280 put_device(dev); 1281 unlock: 1282 mutex_unlock(¶m->lock); 1283 1284 return ret; 1285 } 1286 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1287 1288 /** 1289 * iommu_report_device_fault() - Report fault event to device driver 1290 * @dev: the device 1291 * @evt: fault event data 1292 * 1293 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1294 * handler. When this function fails and the fault is recoverable, it is the 1295 * caller's responsibility to complete the fault. 1296 * 1297 * Return 0 on success, or an error. 1298 */ 1299 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1300 { 1301 struct dev_iommu *param = dev->iommu; 1302 struct iommu_fault_event *evt_pending = NULL; 1303 struct iommu_fault_param *fparam; 1304 int ret = 0; 1305 1306 if (!param || !evt) 1307 return -EINVAL; 1308 1309 /* we only report device fault if there is a handler registered */ 1310 mutex_lock(¶m->lock); 1311 fparam = param->fault_param; 1312 if (!fparam || !fparam->handler) { 1313 ret = -EINVAL; 1314 goto done_unlock; 1315 } 1316 1317 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1318 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1319 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1320 GFP_KERNEL); 1321 if (!evt_pending) { 1322 ret = -ENOMEM; 1323 goto done_unlock; 1324 } 1325 mutex_lock(&fparam->lock); 1326 list_add_tail(&evt_pending->list, &fparam->faults); 1327 mutex_unlock(&fparam->lock); 1328 } 1329 1330 ret = fparam->handler(&evt->fault, fparam->data); 1331 if (ret && evt_pending) { 1332 mutex_lock(&fparam->lock); 1333 list_del(&evt_pending->list); 1334 mutex_unlock(&fparam->lock); 1335 kfree(evt_pending); 1336 } 1337 done_unlock: 1338 mutex_unlock(¶m->lock); 1339 return ret; 1340 } 1341 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1342 1343 int iommu_page_response(struct device *dev, 1344 struct iommu_page_response *msg) 1345 { 1346 bool needs_pasid; 1347 int ret = -EINVAL; 1348 struct iommu_fault_event *evt; 1349 struct iommu_fault_page_request *prm; 1350 struct dev_iommu *param = dev->iommu; 1351 const struct iommu_ops *ops = dev_iommu_ops(dev); 1352 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1353 1354 if (!ops->page_response) 1355 return -ENODEV; 1356 1357 if (!param || !param->fault_param) 1358 return -EINVAL; 1359 1360 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1361 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1362 return -EINVAL; 1363 1364 /* Only send response if there is a fault report pending */ 1365 mutex_lock(¶m->fault_param->lock); 1366 if (list_empty(¶m->fault_param->faults)) { 1367 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1368 goto done_unlock; 1369 } 1370 /* 1371 * Check if we have a matching page request pending to respond, 1372 * otherwise return -EINVAL 1373 */ 1374 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1375 prm = &evt->fault.prm; 1376 if (prm->grpid != msg->grpid) 1377 continue; 1378 1379 /* 1380 * If the PASID is required, the corresponding request is 1381 * matched using the group ID, the PASID valid bit and the PASID 1382 * value. Otherwise only the group ID matches request and 1383 * response. 1384 */ 1385 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1386 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1387 continue; 1388 1389 if (!needs_pasid && has_pasid) { 1390 /* No big deal, just clear it. */ 1391 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1392 msg->pasid = 0; 1393 } 1394 1395 ret = ops->page_response(dev, evt, msg); 1396 list_del(&evt->list); 1397 kfree(evt); 1398 break; 1399 } 1400 1401 done_unlock: 1402 mutex_unlock(¶m->fault_param->lock); 1403 return ret; 1404 } 1405 EXPORT_SYMBOL_GPL(iommu_page_response); 1406 1407 /** 1408 * iommu_group_id - Return ID for a group 1409 * @group: the group to ID 1410 * 1411 * Return the unique ID for the group matching the sysfs group number. 1412 */ 1413 int iommu_group_id(struct iommu_group *group) 1414 { 1415 return group->id; 1416 } 1417 EXPORT_SYMBOL_GPL(iommu_group_id); 1418 1419 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1420 unsigned long *devfns); 1421 1422 /* 1423 * To consider a PCI device isolated, we require ACS to support Source 1424 * Validation, Request Redirection, Completer Redirection, and Upstream 1425 * Forwarding. This effectively means that devices cannot spoof their 1426 * requester ID, requests and completions cannot be redirected, and all 1427 * transactions are forwarded upstream, even as it passes through a 1428 * bridge where the target device is downstream. 1429 */ 1430 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1431 1432 /* 1433 * For multifunction devices which are not isolated from each other, find 1434 * all the other non-isolated functions and look for existing groups. For 1435 * each function, we also need to look for aliases to or from other devices 1436 * that may already have a group. 1437 */ 1438 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1439 unsigned long *devfns) 1440 { 1441 struct pci_dev *tmp = NULL; 1442 struct iommu_group *group; 1443 1444 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1445 return NULL; 1446 1447 for_each_pci_dev(tmp) { 1448 if (tmp == pdev || tmp->bus != pdev->bus || 1449 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1450 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1451 continue; 1452 1453 group = get_pci_alias_group(tmp, devfns); 1454 if (group) { 1455 pci_dev_put(tmp); 1456 return group; 1457 } 1458 } 1459 1460 return NULL; 1461 } 1462 1463 /* 1464 * Look for aliases to or from the given device for existing groups. DMA 1465 * aliases are only supported on the same bus, therefore the search 1466 * space is quite small (especially since we're really only looking at pcie 1467 * device, and therefore only expect multiple slots on the root complex or 1468 * downstream switch ports). It's conceivable though that a pair of 1469 * multifunction devices could have aliases between them that would cause a 1470 * loop. To prevent this, we use a bitmap to track where we've been. 1471 */ 1472 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1473 unsigned long *devfns) 1474 { 1475 struct pci_dev *tmp = NULL; 1476 struct iommu_group *group; 1477 1478 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1479 return NULL; 1480 1481 group = iommu_group_get(&pdev->dev); 1482 if (group) 1483 return group; 1484 1485 for_each_pci_dev(tmp) { 1486 if (tmp == pdev || tmp->bus != pdev->bus) 1487 continue; 1488 1489 /* We alias them or they alias us */ 1490 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1491 group = get_pci_alias_group(tmp, devfns); 1492 if (group) { 1493 pci_dev_put(tmp); 1494 return group; 1495 } 1496 1497 group = get_pci_function_alias_group(tmp, devfns); 1498 if (group) { 1499 pci_dev_put(tmp); 1500 return group; 1501 } 1502 } 1503 } 1504 1505 return NULL; 1506 } 1507 1508 struct group_for_pci_data { 1509 struct pci_dev *pdev; 1510 struct iommu_group *group; 1511 }; 1512 1513 /* 1514 * DMA alias iterator callback, return the last seen device. Stop and return 1515 * the IOMMU group if we find one along the way. 1516 */ 1517 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1518 { 1519 struct group_for_pci_data *data = opaque; 1520 1521 data->pdev = pdev; 1522 data->group = iommu_group_get(&pdev->dev); 1523 1524 return data->group != NULL; 1525 } 1526 1527 /* 1528 * Generic device_group call-back function. It just allocates one 1529 * iommu-group per device. 1530 */ 1531 struct iommu_group *generic_device_group(struct device *dev) 1532 { 1533 return iommu_group_alloc(); 1534 } 1535 EXPORT_SYMBOL_GPL(generic_device_group); 1536 1537 /* 1538 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1539 * to find or create an IOMMU group for a device. 1540 */ 1541 struct iommu_group *pci_device_group(struct device *dev) 1542 { 1543 struct pci_dev *pdev = to_pci_dev(dev); 1544 struct group_for_pci_data data; 1545 struct pci_bus *bus; 1546 struct iommu_group *group = NULL; 1547 u64 devfns[4] = { 0 }; 1548 1549 if (WARN_ON(!dev_is_pci(dev))) 1550 return ERR_PTR(-EINVAL); 1551 1552 /* 1553 * Find the upstream DMA alias for the device. A device must not 1554 * be aliased due to topology in order to have its own IOMMU group. 1555 * If we find an alias along the way that already belongs to a 1556 * group, use it. 1557 */ 1558 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1559 return data.group; 1560 1561 pdev = data.pdev; 1562 1563 /* 1564 * Continue upstream from the point of minimum IOMMU granularity 1565 * due to aliases to the point where devices are protected from 1566 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1567 * group, use it. 1568 */ 1569 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1570 if (!bus->self) 1571 continue; 1572 1573 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1574 break; 1575 1576 pdev = bus->self; 1577 1578 group = iommu_group_get(&pdev->dev); 1579 if (group) 1580 return group; 1581 } 1582 1583 /* 1584 * Look for existing groups on device aliases. If we alias another 1585 * device or another device aliases us, use the same group. 1586 */ 1587 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1588 if (group) 1589 return group; 1590 1591 /* 1592 * Look for existing groups on non-isolated functions on the same 1593 * slot and aliases of those funcions, if any. No need to clear 1594 * the search bitmap, the tested devfns are still valid. 1595 */ 1596 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1597 if (group) 1598 return group; 1599 1600 /* No shared group found, allocate new */ 1601 return iommu_group_alloc(); 1602 } 1603 EXPORT_SYMBOL_GPL(pci_device_group); 1604 1605 /* Get the IOMMU group for device on fsl-mc bus */ 1606 struct iommu_group *fsl_mc_device_group(struct device *dev) 1607 { 1608 struct device *cont_dev = fsl_mc_cont_dev(dev); 1609 struct iommu_group *group; 1610 1611 group = iommu_group_get(cont_dev); 1612 if (!group) 1613 group = iommu_group_alloc(); 1614 return group; 1615 } 1616 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1617 1618 static int iommu_get_def_domain_type(struct device *dev) 1619 { 1620 const struct iommu_ops *ops = dev_iommu_ops(dev); 1621 1622 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 1623 return IOMMU_DOMAIN_DMA; 1624 1625 if (ops->def_domain_type) 1626 return ops->def_domain_type(dev); 1627 1628 return 0; 1629 } 1630 1631 static struct iommu_domain * 1632 __iommu_group_alloc_default_domain(const struct bus_type *bus, 1633 struct iommu_group *group, int req_type) 1634 { 1635 if (group->default_domain && group->default_domain->type == req_type) 1636 return group->default_domain; 1637 return __iommu_domain_alloc(bus, req_type); 1638 } 1639 1640 /* 1641 * req_type of 0 means "auto" which means to select a domain based on 1642 * iommu_def_domain_type or what the driver actually supports. 1643 */ 1644 static struct iommu_domain * 1645 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) 1646 { 1647 const struct bus_type *bus = 1648 list_first_entry(&group->devices, struct group_device, list) 1649 ->dev->bus; 1650 struct iommu_domain *dom; 1651 1652 lockdep_assert_held(&group->mutex); 1653 1654 if (req_type) 1655 return __iommu_group_alloc_default_domain(bus, group, req_type); 1656 1657 /* The driver gave no guidance on what type to use, try the default */ 1658 dom = __iommu_group_alloc_default_domain(bus, group, iommu_def_domain_type); 1659 if (dom) 1660 return dom; 1661 1662 /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ 1663 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) 1664 return NULL; 1665 dom = __iommu_group_alloc_default_domain(bus, group, IOMMU_DOMAIN_DMA); 1666 if (!dom) 1667 return NULL; 1668 1669 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1670 iommu_def_domain_type, group->name); 1671 return dom; 1672 } 1673 1674 /** 1675 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1676 * @dev: target device 1677 * 1678 * This function is intended to be called by IOMMU drivers and extended to 1679 * support common, bus-defined algorithms when determining or creating the 1680 * IOMMU group for a device. On success, the caller will hold a reference 1681 * to the returned IOMMU group, which will already include the provided 1682 * device. The reference should be released with iommu_group_put(). 1683 */ 1684 static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1685 { 1686 const struct iommu_ops *ops = dev_iommu_ops(dev); 1687 struct iommu_group *group; 1688 int ret; 1689 1690 group = iommu_group_get(dev); 1691 if (group) 1692 return group; 1693 1694 group = ops->device_group(dev); 1695 if (WARN_ON_ONCE(group == NULL)) 1696 return ERR_PTR(-EINVAL); 1697 1698 if (IS_ERR(group)) 1699 return group; 1700 1701 ret = iommu_group_add_device(group, dev); 1702 if (ret) 1703 goto out_put_group; 1704 1705 return group; 1706 1707 out_put_group: 1708 iommu_group_put(group); 1709 1710 return ERR_PTR(ret); 1711 } 1712 1713 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1714 { 1715 return group->default_domain; 1716 } 1717 1718 static int probe_iommu_group(struct device *dev, void *data) 1719 { 1720 struct list_head *group_list = data; 1721 int ret; 1722 1723 ret = __iommu_probe_device(dev, group_list); 1724 if (ret == -ENODEV) 1725 ret = 0; 1726 1727 return ret; 1728 } 1729 1730 static int iommu_bus_notifier(struct notifier_block *nb, 1731 unsigned long action, void *data) 1732 { 1733 struct device *dev = data; 1734 1735 if (action == BUS_NOTIFY_ADD_DEVICE) { 1736 int ret; 1737 1738 ret = iommu_probe_device(dev); 1739 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1740 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1741 iommu_release_device(dev); 1742 return NOTIFY_OK; 1743 } 1744 1745 return 0; 1746 } 1747 1748 /* A target_type of 0 will select the best domain type and cannot fail */ 1749 static int iommu_get_default_domain_type(struct iommu_group *group, 1750 int target_type) 1751 { 1752 int best_type = target_type; 1753 struct group_device *gdev; 1754 struct device *last_dev; 1755 1756 lockdep_assert_held(&group->mutex); 1757 1758 for_each_group_device(group, gdev) { 1759 unsigned int type = iommu_get_def_domain_type(gdev->dev); 1760 1761 if (best_type && type && best_type != type) { 1762 if (target_type) { 1763 dev_err_ratelimited( 1764 gdev->dev, 1765 "Device cannot be in %s domain\n", 1766 iommu_domain_type_str(target_type)); 1767 return -1; 1768 } 1769 1770 dev_warn( 1771 gdev->dev, 1772 "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", 1773 iommu_domain_type_str(type), dev_name(last_dev), 1774 iommu_domain_type_str(best_type)); 1775 return 0; 1776 } 1777 if (!best_type) 1778 best_type = type; 1779 last_dev = gdev->dev; 1780 } 1781 return best_type; 1782 } 1783 1784 static void iommu_group_do_probe_finalize(struct device *dev) 1785 { 1786 const struct iommu_ops *ops = dev_iommu_ops(dev); 1787 1788 if (ops->probe_finalize) 1789 ops->probe_finalize(dev); 1790 } 1791 1792 int bus_iommu_probe(const struct bus_type *bus) 1793 { 1794 struct iommu_group *group, *next; 1795 LIST_HEAD(group_list); 1796 int ret; 1797 1798 /* 1799 * This code-path does not allocate the default domain when 1800 * creating the iommu group, so do it after the groups are 1801 * created. 1802 */ 1803 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1804 if (ret) 1805 return ret; 1806 1807 list_for_each_entry_safe(group, next, &group_list, entry) { 1808 struct group_device *gdev; 1809 1810 mutex_lock(&group->mutex); 1811 1812 /* Remove item from the list */ 1813 list_del_init(&group->entry); 1814 1815 ret = iommu_setup_default_domain(group, 0); 1816 if (ret) { 1817 mutex_unlock(&group->mutex); 1818 return ret; 1819 } 1820 mutex_unlock(&group->mutex); 1821 1822 /* 1823 * FIXME: Mis-locked because the ops->probe_finalize() call-back 1824 * of some IOMMU drivers calls arm_iommu_attach_device() which 1825 * in-turn might call back into IOMMU core code, where it tries 1826 * to take group->mutex, resulting in a deadlock. 1827 */ 1828 for_each_group_device(group, gdev) 1829 iommu_group_do_probe_finalize(gdev->dev); 1830 } 1831 1832 return 0; 1833 } 1834 1835 bool iommu_present(const struct bus_type *bus) 1836 { 1837 return bus->iommu_ops != NULL; 1838 } 1839 EXPORT_SYMBOL_GPL(iommu_present); 1840 1841 /** 1842 * device_iommu_capable() - check for a general IOMMU capability 1843 * @dev: device to which the capability would be relevant, if available 1844 * @cap: IOMMU capability 1845 * 1846 * Return: true if an IOMMU is present and supports the given capability 1847 * for the given device, otherwise false. 1848 */ 1849 bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 1850 { 1851 const struct iommu_ops *ops; 1852 1853 if (!dev->iommu || !dev->iommu->iommu_dev) 1854 return false; 1855 1856 ops = dev_iommu_ops(dev); 1857 if (!ops->capable) 1858 return false; 1859 1860 return ops->capable(dev, cap); 1861 } 1862 EXPORT_SYMBOL_GPL(device_iommu_capable); 1863 1864 /** 1865 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi() 1866 * for a group 1867 * @group: Group to query 1868 * 1869 * IOMMU groups should not have differing values of 1870 * msi_device_has_isolated_msi() for devices in a group. However nothing 1871 * directly prevents this, so ensure mistakes don't result in isolation failures 1872 * by checking that all the devices are the same. 1873 */ 1874 bool iommu_group_has_isolated_msi(struct iommu_group *group) 1875 { 1876 struct group_device *group_dev; 1877 bool ret = true; 1878 1879 mutex_lock(&group->mutex); 1880 for_each_group_device(group, group_dev) 1881 ret &= msi_device_has_isolated_msi(group_dev->dev); 1882 mutex_unlock(&group->mutex); 1883 return ret; 1884 } 1885 EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi); 1886 1887 /** 1888 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1889 * @domain: iommu domain 1890 * @handler: fault handler 1891 * @token: user data, will be passed back to the fault handler 1892 * 1893 * This function should be used by IOMMU users which want to be notified 1894 * whenever an IOMMU fault happens. 1895 * 1896 * The fault handler itself should return 0 on success, and an appropriate 1897 * error code otherwise. 1898 */ 1899 void iommu_set_fault_handler(struct iommu_domain *domain, 1900 iommu_fault_handler_t handler, 1901 void *token) 1902 { 1903 BUG_ON(!domain); 1904 1905 domain->handler = handler; 1906 domain->handler_token = token; 1907 } 1908 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1909 1910 static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus, 1911 unsigned type) 1912 { 1913 struct iommu_domain *domain; 1914 unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS; 1915 1916 if (bus == NULL || bus->iommu_ops == NULL) 1917 return NULL; 1918 1919 domain = bus->iommu_ops->domain_alloc(alloc_type); 1920 if (!domain) 1921 return NULL; 1922 1923 domain->type = type; 1924 /* 1925 * If not already set, assume all sizes by default; the driver 1926 * may override this later 1927 */ 1928 if (!domain->pgsize_bitmap) 1929 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1930 1931 if (!domain->ops) 1932 domain->ops = bus->iommu_ops->default_domain_ops; 1933 1934 if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { 1935 iommu_domain_free(domain); 1936 domain = NULL; 1937 } 1938 return domain; 1939 } 1940 1941 struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) 1942 { 1943 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1944 } 1945 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1946 1947 void iommu_domain_free(struct iommu_domain *domain) 1948 { 1949 if (domain->type == IOMMU_DOMAIN_SVA) 1950 mmdrop(domain->mm); 1951 iommu_put_dma_cookie(domain); 1952 domain->ops->free(domain); 1953 } 1954 EXPORT_SYMBOL_GPL(iommu_domain_free); 1955 1956 /* 1957 * Put the group's domain back to the appropriate core-owned domain - either the 1958 * standard kernel-mode DMA configuration or an all-DMA-blocked domain. 1959 */ 1960 static void __iommu_group_set_core_domain(struct iommu_group *group) 1961 { 1962 struct iommu_domain *new_domain; 1963 1964 if (group->owner) 1965 new_domain = group->blocking_domain; 1966 else 1967 new_domain = group->default_domain; 1968 1969 __iommu_group_set_domain_nofail(group, new_domain); 1970 } 1971 1972 static int __iommu_attach_device(struct iommu_domain *domain, 1973 struct device *dev) 1974 { 1975 int ret; 1976 1977 if (unlikely(domain->ops->attach_dev == NULL)) 1978 return -ENODEV; 1979 1980 ret = domain->ops->attach_dev(domain, dev); 1981 if (ret) 1982 return ret; 1983 dev->iommu->attach_deferred = 0; 1984 trace_attach_device_to_domain(dev); 1985 return 0; 1986 } 1987 1988 /** 1989 * iommu_attach_device - Attach an IOMMU domain to a device 1990 * @domain: IOMMU domain to attach 1991 * @dev: Device that will be attached 1992 * 1993 * Returns 0 on success and error code on failure 1994 * 1995 * Note that EINVAL can be treated as a soft failure, indicating 1996 * that certain configuration of the domain is incompatible with 1997 * the device. In this case attaching a different domain to the 1998 * device may succeed. 1999 */ 2000 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 2001 { 2002 struct iommu_group *group; 2003 int ret; 2004 2005 group = iommu_group_get(dev); 2006 if (!group) 2007 return -ENODEV; 2008 2009 /* 2010 * Lock the group to make sure the device-count doesn't 2011 * change while we are attaching 2012 */ 2013 mutex_lock(&group->mutex); 2014 ret = -EINVAL; 2015 if (list_count_nodes(&group->devices) != 1) 2016 goto out_unlock; 2017 2018 ret = __iommu_attach_group(domain, group); 2019 2020 out_unlock: 2021 mutex_unlock(&group->mutex); 2022 iommu_group_put(group); 2023 2024 return ret; 2025 } 2026 EXPORT_SYMBOL_GPL(iommu_attach_device); 2027 2028 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2029 { 2030 if (dev->iommu && dev->iommu->attach_deferred) 2031 return __iommu_attach_device(domain, dev); 2032 2033 return 0; 2034 } 2035 2036 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2037 { 2038 struct iommu_group *group; 2039 2040 group = iommu_group_get(dev); 2041 if (!group) 2042 return; 2043 2044 mutex_lock(&group->mutex); 2045 if (WARN_ON(domain != group->domain) || 2046 WARN_ON(list_count_nodes(&group->devices) != 1)) 2047 goto out_unlock; 2048 __iommu_group_set_core_domain(group); 2049 2050 out_unlock: 2051 mutex_unlock(&group->mutex); 2052 iommu_group_put(group); 2053 } 2054 EXPORT_SYMBOL_GPL(iommu_detach_device); 2055 2056 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2057 { 2058 struct iommu_domain *domain; 2059 struct iommu_group *group; 2060 2061 group = iommu_group_get(dev); 2062 if (!group) 2063 return NULL; 2064 2065 domain = group->domain; 2066 2067 iommu_group_put(group); 2068 2069 return domain; 2070 } 2071 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2072 2073 /* 2074 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2075 * guarantees that the group and its default domain are valid and correct. 2076 */ 2077 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2078 { 2079 return dev->iommu_group->default_domain; 2080 } 2081 2082 static int __iommu_attach_group(struct iommu_domain *domain, 2083 struct iommu_group *group) 2084 { 2085 if (group->domain && group->domain != group->default_domain && 2086 group->domain != group->blocking_domain) 2087 return -EBUSY; 2088 2089 return __iommu_group_set_domain(group, domain); 2090 } 2091 2092 /** 2093 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group 2094 * @domain: IOMMU domain to attach 2095 * @group: IOMMU group that will be attached 2096 * 2097 * Returns 0 on success and error code on failure 2098 * 2099 * Note that EINVAL can be treated as a soft failure, indicating 2100 * that certain configuration of the domain is incompatible with 2101 * the group. In this case attaching a different domain to the 2102 * group may succeed. 2103 */ 2104 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2105 { 2106 int ret; 2107 2108 mutex_lock(&group->mutex); 2109 ret = __iommu_attach_group(domain, group); 2110 mutex_unlock(&group->mutex); 2111 2112 return ret; 2113 } 2114 EXPORT_SYMBOL_GPL(iommu_attach_group); 2115 2116 static int __iommu_device_set_domain(struct iommu_group *group, 2117 struct device *dev, 2118 struct iommu_domain *new_domain, 2119 unsigned int flags) 2120 { 2121 int ret; 2122 2123 if (dev->iommu->attach_deferred) { 2124 if (new_domain == group->default_domain) 2125 return 0; 2126 dev->iommu->attach_deferred = 0; 2127 } 2128 2129 ret = __iommu_attach_device(new_domain, dev); 2130 if (ret) { 2131 /* 2132 * If we have a blocking domain then try to attach that in hopes 2133 * of avoiding a UAF. Modern drivers should implement blocking 2134 * domains as global statics that cannot fail. 2135 */ 2136 if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) && 2137 group->blocking_domain && 2138 group->blocking_domain != new_domain) 2139 __iommu_attach_device(group->blocking_domain, dev); 2140 return ret; 2141 } 2142 return 0; 2143 } 2144 2145 /* 2146 * If 0 is returned the group's domain is new_domain. If an error is returned 2147 * then the group's domain will be set back to the existing domain unless 2148 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's 2149 * domains is left inconsistent. This is a driver bug to fail attach with a 2150 * previously good domain. We try to avoid a kernel UAF because of this. 2151 * 2152 * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU 2153 * API works on domains and devices. Bridge that gap by iterating over the 2154 * devices in a group. Ideally we'd have a single device which represents the 2155 * requestor ID of the group, but we also allow IOMMU drivers to create policy 2156 * defined minimum sets, where the physical hardware may be able to distiguish 2157 * members, but we wish to group them at a higher level (ex. untrusted 2158 * multi-function PCI devices). Thus we attach each device. 2159 */ 2160 static int __iommu_group_set_domain_internal(struct iommu_group *group, 2161 struct iommu_domain *new_domain, 2162 unsigned int flags) 2163 { 2164 struct group_device *last_gdev; 2165 struct group_device *gdev; 2166 int result; 2167 int ret; 2168 2169 lockdep_assert_held(&group->mutex); 2170 2171 if (group->domain == new_domain) 2172 return 0; 2173 2174 /* 2175 * New drivers should support default domains, so set_platform_dma() 2176 * op will never be called. Otherwise the NULL domain represents some 2177 * platform specific behavior. 2178 */ 2179 if (!new_domain) { 2180 for_each_group_device(group, gdev) { 2181 const struct iommu_ops *ops = dev_iommu_ops(gdev->dev); 2182 2183 if (!WARN_ON(!ops->set_platform_dma_ops)) 2184 ops->set_platform_dma_ops(gdev->dev); 2185 } 2186 group->domain = NULL; 2187 return 0; 2188 } 2189 2190 /* 2191 * Changing the domain is done by calling attach_dev() on the new 2192 * domain. This switch does not have to be atomic and DMA can be 2193 * discarded during the transition. DMA must only be able to access 2194 * either new_domain or group->domain, never something else. 2195 */ 2196 result = 0; 2197 for_each_group_device(group, gdev) { 2198 ret = __iommu_device_set_domain(group, gdev->dev, new_domain, 2199 flags); 2200 if (ret) { 2201 result = ret; 2202 /* 2203 * Keep trying the other devices in the group. If a 2204 * driver fails attach to an otherwise good domain, and 2205 * does not support blocking domains, it should at least 2206 * drop its reference on the current domain so we don't 2207 * UAF. 2208 */ 2209 if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) 2210 continue; 2211 goto err_revert; 2212 } 2213 } 2214 group->domain = new_domain; 2215 return result; 2216 2217 err_revert: 2218 /* 2219 * This is called in error unwind paths. A well behaved driver should 2220 * always allow us to attach to a domain that was already attached. 2221 */ 2222 last_gdev = gdev; 2223 for_each_group_device(group, gdev) { 2224 const struct iommu_ops *ops = dev_iommu_ops(gdev->dev); 2225 2226 /* 2227 * If set_platform_dma_ops is not present a NULL domain can 2228 * happen only for first probe, in which case we leave 2229 * group->domain as NULL and let release clean everything up. 2230 */ 2231 if (group->domain) 2232 WARN_ON(__iommu_device_set_domain( 2233 group, gdev->dev, group->domain, 2234 IOMMU_SET_DOMAIN_MUST_SUCCEED)); 2235 else if (ops->set_platform_dma_ops) 2236 ops->set_platform_dma_ops(gdev->dev); 2237 if (gdev == last_gdev) 2238 break; 2239 } 2240 return ret; 2241 } 2242 2243 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2244 { 2245 mutex_lock(&group->mutex); 2246 __iommu_group_set_core_domain(group); 2247 mutex_unlock(&group->mutex); 2248 } 2249 EXPORT_SYMBOL_GPL(iommu_detach_group); 2250 2251 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2252 { 2253 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2254 return iova; 2255 2256 if (domain->type == IOMMU_DOMAIN_BLOCKED) 2257 return 0; 2258 2259 return domain->ops->iova_to_phys(domain, iova); 2260 } 2261 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2262 2263 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, 2264 phys_addr_t paddr, size_t size, size_t *count) 2265 { 2266 unsigned int pgsize_idx, pgsize_idx_next; 2267 unsigned long pgsizes; 2268 size_t offset, pgsize, pgsize_next; 2269 unsigned long addr_merge = paddr | iova; 2270 2271 /* Page sizes supported by the hardware and small enough for @size */ 2272 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); 2273 2274 /* Constrain the page sizes further based on the maximum alignment */ 2275 if (likely(addr_merge)) 2276 pgsizes &= GENMASK(__ffs(addr_merge), 0); 2277 2278 /* Make sure we have at least one suitable page size */ 2279 BUG_ON(!pgsizes); 2280 2281 /* Pick the biggest page size remaining */ 2282 pgsize_idx = __fls(pgsizes); 2283 pgsize = BIT(pgsize_idx); 2284 if (!count) 2285 return pgsize; 2286 2287 /* Find the next biggest support page size, if it exists */ 2288 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); 2289 if (!pgsizes) 2290 goto out_set_count; 2291 2292 pgsize_idx_next = __ffs(pgsizes); 2293 pgsize_next = BIT(pgsize_idx_next); 2294 2295 /* 2296 * There's no point trying a bigger page size unless the virtual 2297 * and physical addresses are similarly offset within the larger page. 2298 */ 2299 if ((iova ^ paddr) & (pgsize_next - 1)) 2300 goto out_set_count; 2301 2302 /* Calculate the offset to the next page size alignment boundary */ 2303 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); 2304 2305 /* 2306 * If size is big enough to accommodate the larger page, reduce 2307 * the number of smaller pages. 2308 */ 2309 if (offset + pgsize_next <= size) 2310 size = offset; 2311 2312 out_set_count: 2313 *count = size >> pgsize_idx; 2314 return pgsize; 2315 } 2316 2317 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, 2318 phys_addr_t paddr, size_t size, int prot, 2319 gfp_t gfp, size_t *mapped) 2320 { 2321 const struct iommu_domain_ops *ops = domain->ops; 2322 size_t pgsize, count; 2323 int ret; 2324 2325 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); 2326 2327 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", 2328 iova, &paddr, pgsize, count); 2329 2330 if (ops->map_pages) { 2331 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, 2332 gfp, mapped); 2333 } else { 2334 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2335 *mapped = ret ? 0 : pgsize; 2336 } 2337 2338 return ret; 2339 } 2340 2341 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2342 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2343 { 2344 const struct iommu_domain_ops *ops = domain->ops; 2345 unsigned long orig_iova = iova; 2346 unsigned int min_pagesz; 2347 size_t orig_size = size; 2348 phys_addr_t orig_paddr = paddr; 2349 int ret = 0; 2350 2351 if (unlikely(!(ops->map || ops->map_pages) || 2352 domain->pgsize_bitmap == 0UL)) 2353 return -ENODEV; 2354 2355 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2356 return -EINVAL; 2357 2358 /* find out the minimum page size supported */ 2359 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2360 2361 /* 2362 * both the virtual address and the physical one, as well as 2363 * the size of the mapping, must be aligned (at least) to the 2364 * size of the smallest page supported by the hardware 2365 */ 2366 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2367 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2368 iova, &paddr, size, min_pagesz); 2369 return -EINVAL; 2370 } 2371 2372 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2373 2374 while (size) { 2375 size_t mapped = 0; 2376 2377 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, 2378 &mapped); 2379 /* 2380 * Some pages may have been mapped, even if an error occurred, 2381 * so we should account for those so they can be unmapped. 2382 */ 2383 size -= mapped; 2384 2385 if (ret) 2386 break; 2387 2388 iova += mapped; 2389 paddr += mapped; 2390 } 2391 2392 /* unroll mapping in case something went wrong */ 2393 if (ret) 2394 iommu_unmap(domain, orig_iova, orig_size - size); 2395 else 2396 trace_map(orig_iova, orig_paddr, orig_size); 2397 2398 return ret; 2399 } 2400 2401 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2402 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2403 { 2404 const struct iommu_domain_ops *ops = domain->ops; 2405 int ret; 2406 2407 might_sleep_if(gfpflags_allow_blocking(gfp)); 2408 2409 /* Discourage passing strange GFP flags */ 2410 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2411 __GFP_HIGHMEM))) 2412 return -EINVAL; 2413 2414 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2415 if (ret == 0 && ops->iotlb_sync_map) 2416 ops->iotlb_sync_map(domain, iova, size); 2417 2418 return ret; 2419 } 2420 EXPORT_SYMBOL_GPL(iommu_map); 2421 2422 static size_t __iommu_unmap_pages(struct iommu_domain *domain, 2423 unsigned long iova, size_t size, 2424 struct iommu_iotlb_gather *iotlb_gather) 2425 { 2426 const struct iommu_domain_ops *ops = domain->ops; 2427 size_t pgsize, count; 2428 2429 pgsize = iommu_pgsize(domain, iova, iova, size, &count); 2430 return ops->unmap_pages ? 2431 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : 2432 ops->unmap(domain, iova, pgsize, iotlb_gather); 2433 } 2434 2435 static size_t __iommu_unmap(struct iommu_domain *domain, 2436 unsigned long iova, size_t size, 2437 struct iommu_iotlb_gather *iotlb_gather) 2438 { 2439 const struct iommu_domain_ops *ops = domain->ops; 2440 size_t unmapped_page, unmapped = 0; 2441 unsigned long orig_iova = iova; 2442 unsigned int min_pagesz; 2443 2444 if (unlikely(!(ops->unmap || ops->unmap_pages) || 2445 domain->pgsize_bitmap == 0UL)) 2446 return 0; 2447 2448 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2449 return 0; 2450 2451 /* find out the minimum page size supported */ 2452 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2453 2454 /* 2455 * The virtual address, as well as the size of the mapping, must be 2456 * aligned (at least) to the size of the smallest page supported 2457 * by the hardware 2458 */ 2459 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2460 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2461 iova, size, min_pagesz); 2462 return 0; 2463 } 2464 2465 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2466 2467 /* 2468 * Keep iterating until we either unmap 'size' bytes (or more) 2469 * or we hit an area that isn't mapped. 2470 */ 2471 while (unmapped < size) { 2472 unmapped_page = __iommu_unmap_pages(domain, iova, 2473 size - unmapped, 2474 iotlb_gather); 2475 if (!unmapped_page) 2476 break; 2477 2478 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2479 iova, unmapped_page); 2480 2481 iova += unmapped_page; 2482 unmapped += unmapped_page; 2483 } 2484 2485 trace_unmap(orig_iova, size, unmapped); 2486 return unmapped; 2487 } 2488 2489 size_t iommu_unmap(struct iommu_domain *domain, 2490 unsigned long iova, size_t size) 2491 { 2492 struct iommu_iotlb_gather iotlb_gather; 2493 size_t ret; 2494 2495 iommu_iotlb_gather_init(&iotlb_gather); 2496 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2497 iommu_iotlb_sync(domain, &iotlb_gather); 2498 2499 return ret; 2500 } 2501 EXPORT_SYMBOL_GPL(iommu_unmap); 2502 2503 size_t iommu_unmap_fast(struct iommu_domain *domain, 2504 unsigned long iova, size_t size, 2505 struct iommu_iotlb_gather *iotlb_gather) 2506 { 2507 return __iommu_unmap(domain, iova, size, iotlb_gather); 2508 } 2509 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2510 2511 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2512 struct scatterlist *sg, unsigned int nents, int prot, 2513 gfp_t gfp) 2514 { 2515 const struct iommu_domain_ops *ops = domain->ops; 2516 size_t len = 0, mapped = 0; 2517 phys_addr_t start; 2518 unsigned int i = 0; 2519 int ret; 2520 2521 might_sleep_if(gfpflags_allow_blocking(gfp)); 2522 2523 /* Discourage passing strange GFP flags */ 2524 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2525 __GFP_HIGHMEM))) 2526 return -EINVAL; 2527 2528 while (i <= nents) { 2529 phys_addr_t s_phys = sg_phys(sg); 2530 2531 if (len && s_phys != start + len) { 2532 ret = __iommu_map(domain, iova + mapped, start, 2533 len, prot, gfp); 2534 2535 if (ret) 2536 goto out_err; 2537 2538 mapped += len; 2539 len = 0; 2540 } 2541 2542 if (sg_dma_is_bus_address(sg)) 2543 goto next; 2544 2545 if (len) { 2546 len += sg->length; 2547 } else { 2548 len = sg->length; 2549 start = s_phys; 2550 } 2551 2552 next: 2553 if (++i < nents) 2554 sg = sg_next(sg); 2555 } 2556 2557 if (ops->iotlb_sync_map) 2558 ops->iotlb_sync_map(domain, iova, mapped); 2559 return mapped; 2560 2561 out_err: 2562 /* undo mappings already done */ 2563 iommu_unmap(domain, iova, mapped); 2564 2565 return ret; 2566 } 2567 EXPORT_SYMBOL_GPL(iommu_map_sg); 2568 2569 /** 2570 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2571 * @domain: the iommu domain where the fault has happened 2572 * @dev: the device where the fault has happened 2573 * @iova: the faulting address 2574 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2575 * 2576 * This function should be called by the low-level IOMMU implementations 2577 * whenever IOMMU faults happen, to allow high-level users, that are 2578 * interested in such events, to know about them. 2579 * 2580 * This event may be useful for several possible use cases: 2581 * - mere logging of the event 2582 * - dynamic TLB/PTE loading 2583 * - if restarting of the faulting device is required 2584 * 2585 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2586 * PTE/TLB loading will one day be supported, implementations will be able 2587 * to tell whether it succeeded or not according to this return value). 2588 * 2589 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2590 * (though fault handlers can also return -ENOSYS, in case they want to 2591 * elicit the default behavior of the IOMMU drivers). 2592 */ 2593 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2594 unsigned long iova, int flags) 2595 { 2596 int ret = -ENOSYS; 2597 2598 /* 2599 * if upper layers showed interest and installed a fault handler, 2600 * invoke it. 2601 */ 2602 if (domain->handler) 2603 ret = domain->handler(domain, dev, iova, flags, 2604 domain->handler_token); 2605 2606 trace_io_page_fault(dev, iova, flags); 2607 return ret; 2608 } 2609 EXPORT_SYMBOL_GPL(report_iommu_fault); 2610 2611 static int __init iommu_init(void) 2612 { 2613 iommu_group_kset = kset_create_and_add("iommu_groups", 2614 NULL, kernel_kobj); 2615 BUG_ON(!iommu_group_kset); 2616 2617 iommu_debugfs_setup(); 2618 2619 return 0; 2620 } 2621 core_initcall(iommu_init); 2622 2623 int iommu_enable_nesting(struct iommu_domain *domain) 2624 { 2625 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2626 return -EINVAL; 2627 if (!domain->ops->enable_nesting) 2628 return -EINVAL; 2629 return domain->ops->enable_nesting(domain); 2630 } 2631 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2632 2633 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2634 unsigned long quirk) 2635 { 2636 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2637 return -EINVAL; 2638 if (!domain->ops->set_pgtable_quirks) 2639 return -EINVAL; 2640 return domain->ops->set_pgtable_quirks(domain, quirk); 2641 } 2642 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2643 2644 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2645 { 2646 const struct iommu_ops *ops = dev_iommu_ops(dev); 2647 2648 if (ops->get_resv_regions) 2649 ops->get_resv_regions(dev, list); 2650 } 2651 2652 /** 2653 * iommu_put_resv_regions - release resered regions 2654 * @dev: device for which to free reserved regions 2655 * @list: reserved region list for device 2656 * 2657 * This releases a reserved region list acquired by iommu_get_resv_regions(). 2658 */ 2659 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2660 { 2661 struct iommu_resv_region *entry, *next; 2662 2663 list_for_each_entry_safe(entry, next, list, list) { 2664 if (entry->free) 2665 entry->free(dev, entry); 2666 else 2667 kfree(entry); 2668 } 2669 } 2670 EXPORT_SYMBOL(iommu_put_resv_regions); 2671 2672 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2673 size_t length, int prot, 2674 enum iommu_resv_type type, 2675 gfp_t gfp) 2676 { 2677 struct iommu_resv_region *region; 2678 2679 region = kzalloc(sizeof(*region), gfp); 2680 if (!region) 2681 return NULL; 2682 2683 INIT_LIST_HEAD(®ion->list); 2684 region->start = start; 2685 region->length = length; 2686 region->prot = prot; 2687 region->type = type; 2688 return region; 2689 } 2690 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2691 2692 void iommu_set_default_passthrough(bool cmd_line) 2693 { 2694 if (cmd_line) 2695 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2696 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2697 } 2698 2699 void iommu_set_default_translated(bool cmd_line) 2700 { 2701 if (cmd_line) 2702 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2703 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2704 } 2705 2706 bool iommu_default_passthrough(void) 2707 { 2708 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2709 } 2710 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2711 2712 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2713 { 2714 const struct iommu_ops *ops = NULL; 2715 struct iommu_device *iommu; 2716 2717 spin_lock(&iommu_device_lock); 2718 list_for_each_entry(iommu, &iommu_device_list, list) 2719 if (iommu->fwnode == fwnode) { 2720 ops = iommu->ops; 2721 break; 2722 } 2723 spin_unlock(&iommu_device_lock); 2724 return ops; 2725 } 2726 2727 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2728 const struct iommu_ops *ops) 2729 { 2730 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2731 2732 if (fwspec) 2733 return ops == fwspec->ops ? 0 : -EINVAL; 2734 2735 if (!dev_iommu_get(dev)) 2736 return -ENOMEM; 2737 2738 /* Preallocate for the overwhelmingly common case of 1 ID */ 2739 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2740 if (!fwspec) 2741 return -ENOMEM; 2742 2743 of_node_get(to_of_node(iommu_fwnode)); 2744 fwspec->iommu_fwnode = iommu_fwnode; 2745 fwspec->ops = ops; 2746 dev_iommu_fwspec_set(dev, fwspec); 2747 return 0; 2748 } 2749 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2750 2751 void iommu_fwspec_free(struct device *dev) 2752 { 2753 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2754 2755 if (fwspec) { 2756 fwnode_handle_put(fwspec->iommu_fwnode); 2757 kfree(fwspec); 2758 dev_iommu_fwspec_set(dev, NULL); 2759 } 2760 } 2761 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2762 2763 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2764 { 2765 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2766 int i, new_num; 2767 2768 if (!fwspec) 2769 return -EINVAL; 2770 2771 new_num = fwspec->num_ids + num_ids; 2772 if (new_num > 1) { 2773 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2774 GFP_KERNEL); 2775 if (!fwspec) 2776 return -ENOMEM; 2777 2778 dev_iommu_fwspec_set(dev, fwspec); 2779 } 2780 2781 for (i = 0; i < num_ids; i++) 2782 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2783 2784 fwspec->num_ids = new_num; 2785 return 0; 2786 } 2787 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2788 2789 /* 2790 * Per device IOMMU features. 2791 */ 2792 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2793 { 2794 if (dev->iommu && dev->iommu->iommu_dev) { 2795 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2796 2797 if (ops->dev_enable_feat) 2798 return ops->dev_enable_feat(dev, feat); 2799 } 2800 2801 return -ENODEV; 2802 } 2803 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2804 2805 /* 2806 * The device drivers should do the necessary cleanups before calling this. 2807 */ 2808 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2809 { 2810 if (dev->iommu && dev->iommu->iommu_dev) { 2811 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2812 2813 if (ops->dev_disable_feat) 2814 return ops->dev_disable_feat(dev, feat); 2815 } 2816 2817 return -EBUSY; 2818 } 2819 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2820 2821 /** 2822 * iommu_setup_default_domain - Set the default_domain for the group 2823 * @group: Group to change 2824 * @target_type: Domain type to set as the default_domain 2825 * 2826 * Allocate a default domain and set it as the current domain on the group. If 2827 * the group already has a default domain it will be changed to the target_type. 2828 * When target_type is 0 the default domain is selected based on driver and 2829 * system preferences. 2830 */ 2831 static int iommu_setup_default_domain(struct iommu_group *group, 2832 int target_type) 2833 { 2834 struct iommu_domain *old_dom = group->default_domain; 2835 struct group_device *gdev; 2836 struct iommu_domain *dom; 2837 bool direct_failed; 2838 int req_type; 2839 int ret; 2840 2841 lockdep_assert_held(&group->mutex); 2842 2843 req_type = iommu_get_default_domain_type(group, target_type); 2844 if (req_type < 0) 2845 return -EINVAL; 2846 2847 /* 2848 * There are still some drivers which don't support default domains, so 2849 * we ignore the failure and leave group->default_domain NULL. 2850 * 2851 * We assume that the iommu driver starts up the device in 2852 * 'set_platform_dma_ops' mode if it does not support default domains. 2853 */ 2854 dom = iommu_group_alloc_default_domain(group, req_type); 2855 if (!dom) { 2856 /* Once in default_domain mode we never leave */ 2857 if (group->default_domain) 2858 return -ENODEV; 2859 group->default_domain = NULL; 2860 return 0; 2861 } 2862 2863 if (group->default_domain == dom) 2864 return 0; 2865 2866 /* 2867 * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be 2868 * mapped before their device is attached, in order to guarantee 2869 * continuity with any FW activity 2870 */ 2871 direct_failed = false; 2872 for_each_group_device(group, gdev) { 2873 if (iommu_create_device_direct_mappings(dom, gdev->dev)) { 2874 direct_failed = true; 2875 dev_warn_once( 2876 gdev->dev->iommu->iommu_dev->dev, 2877 "IOMMU driver was not able to establish FW requested direct mapping."); 2878 } 2879 } 2880 2881 /* We must set default_domain early for __iommu_device_set_domain */ 2882 group->default_domain = dom; 2883 if (!group->domain) { 2884 /* 2885 * Drivers are not allowed to fail the first domain attach. 2886 * The only way to recover from this is to fail attaching the 2887 * iommu driver and call ops->release_device. Put the domain 2888 * in group->default_domain so it is freed after. 2889 */ 2890 ret = __iommu_group_set_domain_internal( 2891 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 2892 if (WARN_ON(ret)) 2893 goto out_free; 2894 } else { 2895 ret = __iommu_group_set_domain(group, dom); 2896 if (ret) { 2897 iommu_domain_free(dom); 2898 group->default_domain = old_dom; 2899 return ret; 2900 } 2901 } 2902 2903 /* 2904 * Drivers are supposed to allow mappings to be installed in a domain 2905 * before device attachment, but some don't. Hack around this defect by 2906 * trying again after attaching. If this happens it means the device 2907 * will not continuously have the IOMMU_RESV_DIRECT map. 2908 */ 2909 if (direct_failed) { 2910 for_each_group_device(group, gdev) { 2911 ret = iommu_create_device_direct_mappings(dom, gdev->dev); 2912 if (ret) 2913 goto err_restore; 2914 } 2915 } 2916 2917 err_restore: 2918 if (old_dom) { 2919 __iommu_group_set_domain_internal( 2920 group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 2921 iommu_domain_free(dom); 2922 old_dom = NULL; 2923 } 2924 out_free: 2925 if (old_dom) 2926 iommu_domain_free(old_dom); 2927 return ret; 2928 } 2929 2930 /* 2931 * Changing the default domain through sysfs requires the users to unbind the 2932 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ 2933 * transition. Return failure if this isn't met. 2934 * 2935 * We need to consider the race between this and the device release path. 2936 * group->mutex is used here to guarantee that the device release path 2937 * will not be entered at the same time. 2938 */ 2939 static ssize_t iommu_group_store_type(struct iommu_group *group, 2940 const char *buf, size_t count) 2941 { 2942 struct group_device *gdev; 2943 int ret, req_type; 2944 2945 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2946 return -EACCES; 2947 2948 if (WARN_ON(!group) || !group->default_domain) 2949 return -EINVAL; 2950 2951 if (sysfs_streq(buf, "identity")) 2952 req_type = IOMMU_DOMAIN_IDENTITY; 2953 else if (sysfs_streq(buf, "DMA")) 2954 req_type = IOMMU_DOMAIN_DMA; 2955 else if (sysfs_streq(buf, "DMA-FQ")) 2956 req_type = IOMMU_DOMAIN_DMA_FQ; 2957 else if (sysfs_streq(buf, "auto")) 2958 req_type = 0; 2959 else 2960 return -EINVAL; 2961 2962 mutex_lock(&group->mutex); 2963 /* We can bring up a flush queue without tearing down the domain. */ 2964 if (req_type == IOMMU_DOMAIN_DMA_FQ && 2965 group->default_domain->type == IOMMU_DOMAIN_DMA) { 2966 ret = iommu_dma_init_fq(group->default_domain); 2967 if (ret) 2968 goto out_unlock; 2969 2970 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; 2971 ret = count; 2972 goto out_unlock; 2973 } 2974 2975 /* Otherwise, ensure that device exists and no driver is bound. */ 2976 if (list_empty(&group->devices) || group->owner_cnt) { 2977 ret = -EPERM; 2978 goto out_unlock; 2979 } 2980 2981 ret = iommu_setup_default_domain(group, req_type); 2982 if (ret) 2983 goto out_unlock; 2984 2985 /* 2986 * Release the mutex here because ops->probe_finalize() call-back of 2987 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 2988 * in-turn might call back into IOMMU core code, where it tries to take 2989 * group->mutex, resulting in a deadlock. 2990 */ 2991 mutex_unlock(&group->mutex); 2992 2993 /* Make sure dma_ops is appropriatley set */ 2994 for_each_group_device(group, gdev) 2995 iommu_group_do_probe_finalize(gdev->dev); 2996 return count; 2997 2998 out_unlock: 2999 mutex_unlock(&group->mutex); 3000 return ret ?: count; 3001 } 3002 3003 static bool iommu_is_default_domain(struct iommu_group *group) 3004 { 3005 if (group->domain == group->default_domain) 3006 return true; 3007 3008 /* 3009 * If the default domain was set to identity and it is still an identity 3010 * domain then we consider this a pass. This happens because of 3011 * amd_iommu_init_device() replacing the default idenytity domain with an 3012 * identity domain that has a different configuration for AMDGPU. 3013 */ 3014 if (group->default_domain && 3015 group->default_domain->type == IOMMU_DOMAIN_IDENTITY && 3016 group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY) 3017 return true; 3018 return false; 3019 } 3020 3021 /** 3022 * iommu_device_use_default_domain() - Device driver wants to handle device 3023 * DMA through the kernel DMA API. 3024 * @dev: The device. 3025 * 3026 * The device driver about to bind @dev wants to do DMA through the kernel 3027 * DMA API. Return 0 if it is allowed, otherwise an error. 3028 */ 3029 int iommu_device_use_default_domain(struct device *dev) 3030 { 3031 struct iommu_group *group = iommu_group_get(dev); 3032 int ret = 0; 3033 3034 if (!group) 3035 return 0; 3036 3037 mutex_lock(&group->mutex); 3038 if (group->owner_cnt) { 3039 if (group->owner || !iommu_is_default_domain(group) || 3040 !xa_empty(&group->pasid_array)) { 3041 ret = -EBUSY; 3042 goto unlock_out; 3043 } 3044 } 3045 3046 group->owner_cnt++; 3047 3048 unlock_out: 3049 mutex_unlock(&group->mutex); 3050 iommu_group_put(group); 3051 3052 return ret; 3053 } 3054 3055 /** 3056 * iommu_device_unuse_default_domain() - Device driver stops handling device 3057 * DMA through the kernel DMA API. 3058 * @dev: The device. 3059 * 3060 * The device driver doesn't want to do DMA through kernel DMA API anymore. 3061 * It must be called after iommu_device_use_default_domain(). 3062 */ 3063 void iommu_device_unuse_default_domain(struct device *dev) 3064 { 3065 struct iommu_group *group = iommu_group_get(dev); 3066 3067 if (!group) 3068 return; 3069 3070 mutex_lock(&group->mutex); 3071 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) 3072 group->owner_cnt--; 3073 3074 mutex_unlock(&group->mutex); 3075 iommu_group_put(group); 3076 } 3077 3078 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) 3079 { 3080 struct group_device *dev = 3081 list_first_entry(&group->devices, struct group_device, list); 3082 3083 if (group->blocking_domain) 3084 return 0; 3085 3086 group->blocking_domain = 3087 __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED); 3088 if (!group->blocking_domain) { 3089 /* 3090 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED 3091 * create an empty domain instead. 3092 */ 3093 group->blocking_domain = __iommu_domain_alloc( 3094 dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); 3095 if (!group->blocking_domain) 3096 return -EINVAL; 3097 } 3098 return 0; 3099 } 3100 3101 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) 3102 { 3103 int ret; 3104 3105 if ((group->domain && group->domain != group->default_domain) || 3106 !xa_empty(&group->pasid_array)) 3107 return -EBUSY; 3108 3109 ret = __iommu_group_alloc_blocking_domain(group); 3110 if (ret) 3111 return ret; 3112 ret = __iommu_group_set_domain(group, group->blocking_domain); 3113 if (ret) 3114 return ret; 3115 3116 group->owner = owner; 3117 group->owner_cnt++; 3118 return 0; 3119 } 3120 3121 /** 3122 * iommu_group_claim_dma_owner() - Set DMA ownership of a group 3123 * @group: The group. 3124 * @owner: Caller specified pointer. Used for exclusive ownership. 3125 * 3126 * This is to support backward compatibility for vfio which manages the dma 3127 * ownership in iommu_group level. New invocations on this interface should be 3128 * prohibited. Only a single owner may exist for a group. 3129 */ 3130 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 3131 { 3132 int ret = 0; 3133 3134 if (WARN_ON(!owner)) 3135 return -EINVAL; 3136 3137 mutex_lock(&group->mutex); 3138 if (group->owner_cnt) { 3139 ret = -EPERM; 3140 goto unlock_out; 3141 } 3142 3143 ret = __iommu_take_dma_ownership(group, owner); 3144 unlock_out: 3145 mutex_unlock(&group->mutex); 3146 3147 return ret; 3148 } 3149 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); 3150 3151 /** 3152 * iommu_device_claim_dma_owner() - Set DMA ownership of a device 3153 * @dev: The device. 3154 * @owner: Caller specified pointer. Used for exclusive ownership. 3155 * 3156 * Claim the DMA ownership of a device. Multiple devices in the same group may 3157 * concurrently claim ownership if they present the same owner value. Returns 0 3158 * on success and error code on failure 3159 */ 3160 int iommu_device_claim_dma_owner(struct device *dev, void *owner) 3161 { 3162 struct iommu_group *group; 3163 int ret = 0; 3164 3165 if (WARN_ON(!owner)) 3166 return -EINVAL; 3167 3168 group = iommu_group_get(dev); 3169 if (!group) 3170 return -ENODEV; 3171 3172 mutex_lock(&group->mutex); 3173 if (group->owner_cnt) { 3174 if (group->owner != owner) { 3175 ret = -EPERM; 3176 goto unlock_out; 3177 } 3178 group->owner_cnt++; 3179 goto unlock_out; 3180 } 3181 3182 ret = __iommu_take_dma_ownership(group, owner); 3183 unlock_out: 3184 mutex_unlock(&group->mutex); 3185 iommu_group_put(group); 3186 3187 return ret; 3188 } 3189 EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); 3190 3191 static void __iommu_release_dma_ownership(struct iommu_group *group) 3192 { 3193 if (WARN_ON(!group->owner_cnt || !group->owner || 3194 !xa_empty(&group->pasid_array))) 3195 return; 3196 3197 group->owner_cnt = 0; 3198 group->owner = NULL; 3199 __iommu_group_set_domain_nofail(group, group->default_domain); 3200 } 3201 3202 /** 3203 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3204 * @dev: The device 3205 * 3206 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3207 */ 3208 void iommu_group_release_dma_owner(struct iommu_group *group) 3209 { 3210 mutex_lock(&group->mutex); 3211 __iommu_release_dma_ownership(group); 3212 mutex_unlock(&group->mutex); 3213 } 3214 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); 3215 3216 /** 3217 * iommu_device_release_dma_owner() - Release DMA ownership of a device 3218 * @group: The device. 3219 * 3220 * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). 3221 */ 3222 void iommu_device_release_dma_owner(struct device *dev) 3223 { 3224 struct iommu_group *group = iommu_group_get(dev); 3225 3226 mutex_lock(&group->mutex); 3227 if (group->owner_cnt > 1) 3228 group->owner_cnt--; 3229 else 3230 __iommu_release_dma_ownership(group); 3231 mutex_unlock(&group->mutex); 3232 iommu_group_put(group); 3233 } 3234 EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); 3235 3236 /** 3237 * iommu_group_dma_owner_claimed() - Query group dma ownership status 3238 * @group: The group. 3239 * 3240 * This provides status query on a given group. It is racy and only for 3241 * non-binding status reporting. 3242 */ 3243 bool iommu_group_dma_owner_claimed(struct iommu_group *group) 3244 { 3245 unsigned int user; 3246 3247 mutex_lock(&group->mutex); 3248 user = group->owner_cnt; 3249 mutex_unlock(&group->mutex); 3250 3251 return user; 3252 } 3253 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); 3254 3255 static int __iommu_set_group_pasid(struct iommu_domain *domain, 3256 struct iommu_group *group, ioasid_t pasid) 3257 { 3258 struct group_device *device; 3259 int ret = 0; 3260 3261 for_each_group_device(group, device) { 3262 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); 3263 if (ret) 3264 break; 3265 } 3266 3267 return ret; 3268 } 3269 3270 static void __iommu_remove_group_pasid(struct iommu_group *group, 3271 ioasid_t pasid) 3272 { 3273 struct group_device *device; 3274 const struct iommu_ops *ops; 3275 3276 for_each_group_device(group, device) { 3277 ops = dev_iommu_ops(device->dev); 3278 ops->remove_dev_pasid(device->dev, pasid); 3279 } 3280 } 3281 3282 /* 3283 * iommu_attach_device_pasid() - Attach a domain to pasid of device 3284 * @domain: the iommu domain. 3285 * @dev: the attached device. 3286 * @pasid: the pasid of the device. 3287 * 3288 * Return: 0 on success, or an error. 3289 */ 3290 int iommu_attach_device_pasid(struct iommu_domain *domain, 3291 struct device *dev, ioasid_t pasid) 3292 { 3293 struct iommu_group *group; 3294 void *curr; 3295 int ret; 3296 3297 if (!domain->ops->set_dev_pasid) 3298 return -EOPNOTSUPP; 3299 3300 group = iommu_group_get(dev); 3301 if (!group) 3302 return -ENODEV; 3303 3304 mutex_lock(&group->mutex); 3305 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); 3306 if (curr) { 3307 ret = xa_err(curr) ? : -EBUSY; 3308 goto out_unlock; 3309 } 3310 3311 ret = __iommu_set_group_pasid(domain, group, pasid); 3312 if (ret) { 3313 __iommu_remove_group_pasid(group, pasid); 3314 xa_erase(&group->pasid_array, pasid); 3315 } 3316 out_unlock: 3317 mutex_unlock(&group->mutex); 3318 iommu_group_put(group); 3319 3320 return ret; 3321 } 3322 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); 3323 3324 /* 3325 * iommu_detach_device_pasid() - Detach the domain from pasid of device 3326 * @domain: the iommu domain. 3327 * @dev: the attached device. 3328 * @pasid: the pasid of the device. 3329 * 3330 * The @domain must have been attached to @pasid of the @dev with 3331 * iommu_attach_device_pasid(). 3332 */ 3333 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, 3334 ioasid_t pasid) 3335 { 3336 struct iommu_group *group = iommu_group_get(dev); 3337 3338 mutex_lock(&group->mutex); 3339 __iommu_remove_group_pasid(group, pasid); 3340 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); 3341 mutex_unlock(&group->mutex); 3342 3343 iommu_group_put(group); 3344 } 3345 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); 3346 3347 /* 3348 * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev 3349 * @dev: the queried device 3350 * @pasid: the pasid of the device 3351 * @type: matched domain type, 0 for any match 3352 * 3353 * This is a variant of iommu_get_domain_for_dev(). It returns the existing 3354 * domain attached to pasid of a device. Callers must hold a lock around this 3355 * function, and both iommu_attach/detach_dev_pasid() whenever a domain of 3356 * type is being manipulated. This API does not internally resolve races with 3357 * attach/detach. 3358 * 3359 * Return: attached domain on success, NULL otherwise. 3360 */ 3361 struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, 3362 ioasid_t pasid, 3363 unsigned int type) 3364 { 3365 struct iommu_domain *domain; 3366 struct iommu_group *group; 3367 3368 group = iommu_group_get(dev); 3369 if (!group) 3370 return NULL; 3371 3372 xa_lock(&group->pasid_array); 3373 domain = xa_load(&group->pasid_array, pasid); 3374 if (type && domain && domain->type != type) 3375 domain = ERR_PTR(-EBUSY); 3376 xa_unlock(&group->pasid_array); 3377 iommu_group_put(group); 3378 3379 return domain; 3380 } 3381 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); 3382 3383 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 3384 struct mm_struct *mm) 3385 { 3386 const struct iommu_ops *ops = dev_iommu_ops(dev); 3387 struct iommu_domain *domain; 3388 3389 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); 3390 if (!domain) 3391 return NULL; 3392 3393 domain->type = IOMMU_DOMAIN_SVA; 3394 mmgrab(mm); 3395 domain->mm = mm; 3396 domain->iopf_handler = iommu_sva_handle_iopf; 3397 domain->fault_data = mm; 3398 3399 return domain; 3400 } 3401