1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/amba/bus.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/bits.h> 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/host1x_context_bus.h> 20 #include <linux/iommu.h> 21 #include <linux/idr.h> 22 #include <linux/err.h> 23 #include <linux/pci.h> 24 #include <linux/pci-ats.h> 25 #include <linux/bitops.h> 26 #include <linux/platform_device.h> 27 #include <linux/property.h> 28 #include <linux/fsl/mc.h> 29 #include <linux/module.h> 30 #include <linux/cc_platform.h> 31 #include <linux/cdx/cdx_bus.h> 32 #include <trace/events/iommu.h> 33 #include <linux/sched/mm.h> 34 #include <linux/msi.h> 35 36 #include "dma-iommu.h" 37 38 #include "iommu-sva.h" 39 40 static struct kset *iommu_group_kset; 41 static DEFINE_IDA(iommu_group_ida); 42 43 static unsigned int iommu_def_domain_type __read_mostly; 44 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); 45 static u32 iommu_cmd_line __read_mostly; 46 47 struct iommu_group { 48 struct kobject kobj; 49 struct kobject *devices_kobj; 50 struct list_head devices; 51 struct xarray pasid_array; 52 struct mutex mutex; 53 void *iommu_data; 54 void (*iommu_data_release)(void *iommu_data); 55 char *name; 56 int id; 57 struct iommu_domain *default_domain; 58 struct iommu_domain *blocking_domain; 59 struct iommu_domain *domain; 60 struct list_head entry; 61 unsigned int owner_cnt; 62 void *owner; 63 }; 64 65 struct group_device { 66 struct list_head list; 67 struct device *dev; 68 char *name; 69 }; 70 71 /* Iterate over each struct group_device in a struct iommu_group */ 72 #define for_each_group_device(group, pos) \ 73 list_for_each_entry(pos, &(group)->devices, list) 74 75 struct iommu_group_attribute { 76 struct attribute attr; 77 ssize_t (*show)(struct iommu_group *group, char *buf); 78 ssize_t (*store)(struct iommu_group *group, 79 const char *buf, size_t count); 80 }; 81 82 static const char * const iommu_group_resv_type_string[] = { 83 [IOMMU_RESV_DIRECT] = "direct", 84 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 85 [IOMMU_RESV_RESERVED] = "reserved", 86 [IOMMU_RESV_MSI] = "msi", 87 [IOMMU_RESV_SW_MSI] = "msi", 88 }; 89 90 #define IOMMU_CMD_LINE_DMA_API BIT(0) 91 #define IOMMU_CMD_LINE_STRICT BIT(1) 92 93 static int iommu_bus_notifier(struct notifier_block *nb, 94 unsigned long action, void *data); 95 static void iommu_release_device(struct device *dev); 96 static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus, 97 unsigned type); 98 static int __iommu_attach_device(struct iommu_domain *domain, 99 struct device *dev); 100 static int __iommu_attach_group(struct iommu_domain *domain, 101 struct iommu_group *group); 102 103 enum { 104 IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0, 105 }; 106 107 static int __iommu_device_set_domain(struct iommu_group *group, 108 struct device *dev, 109 struct iommu_domain *new_domain, 110 unsigned int flags); 111 static int __iommu_group_set_domain_internal(struct iommu_group *group, 112 struct iommu_domain *new_domain, 113 unsigned int flags); 114 static int __iommu_group_set_domain(struct iommu_group *group, 115 struct iommu_domain *new_domain) 116 { 117 return __iommu_group_set_domain_internal(group, new_domain, 0); 118 } 119 static void __iommu_group_set_domain_nofail(struct iommu_group *group, 120 struct iommu_domain *new_domain) 121 { 122 WARN_ON(__iommu_group_set_domain_internal( 123 group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); 124 } 125 126 static int iommu_setup_default_domain(struct iommu_group *group, 127 int target_type); 128 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 129 struct device *dev); 130 static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 131 static ssize_t iommu_group_store_type(struct iommu_group *group, 132 const char *buf, size_t count); 133 134 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 135 struct iommu_group_attribute iommu_group_attr_##_name = \ 136 __ATTR(_name, _mode, _show, _store) 137 138 #define to_iommu_group_attr(_attr) \ 139 container_of(_attr, struct iommu_group_attribute, attr) 140 #define to_iommu_group(_kobj) \ 141 container_of(_kobj, struct iommu_group, kobj) 142 143 static LIST_HEAD(iommu_device_list); 144 static DEFINE_SPINLOCK(iommu_device_lock); 145 146 static struct bus_type * const iommu_buses[] = { 147 &platform_bus_type, 148 #ifdef CONFIG_PCI 149 &pci_bus_type, 150 #endif 151 #ifdef CONFIG_ARM_AMBA 152 &amba_bustype, 153 #endif 154 #ifdef CONFIG_FSL_MC_BUS 155 &fsl_mc_bus_type, 156 #endif 157 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS 158 &host1x_context_device_bus_type, 159 #endif 160 #ifdef CONFIG_CDX_BUS 161 &cdx_bus_type, 162 #endif 163 }; 164 165 /* 166 * Use a function instead of an array here because the domain-type is a 167 * bit-field, so an array would waste memory. 168 */ 169 static const char *iommu_domain_type_str(unsigned int t) 170 { 171 switch (t) { 172 case IOMMU_DOMAIN_BLOCKED: 173 return "Blocked"; 174 case IOMMU_DOMAIN_IDENTITY: 175 return "Passthrough"; 176 case IOMMU_DOMAIN_UNMANAGED: 177 return "Unmanaged"; 178 case IOMMU_DOMAIN_DMA: 179 case IOMMU_DOMAIN_DMA_FQ: 180 return "Translated"; 181 default: 182 return "Unknown"; 183 } 184 } 185 186 static int __init iommu_subsys_init(void) 187 { 188 struct notifier_block *nb; 189 190 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 191 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 192 iommu_set_default_passthrough(false); 193 else 194 iommu_set_default_translated(false); 195 196 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 197 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 198 iommu_set_default_translated(false); 199 } 200 } 201 202 if (!iommu_default_passthrough() && !iommu_dma_strict) 203 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; 204 205 pr_info("Default domain type: %s%s\n", 206 iommu_domain_type_str(iommu_def_domain_type), 207 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 208 " (set via kernel command line)" : ""); 209 210 if (!iommu_default_passthrough()) 211 pr_info("DMA domain TLB invalidation policy: %s mode%s\n", 212 iommu_dma_strict ? "strict" : "lazy", 213 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? 214 " (set via kernel command line)" : ""); 215 216 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); 217 if (!nb) 218 return -ENOMEM; 219 220 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { 221 nb[i].notifier_call = iommu_bus_notifier; 222 bus_register_notifier(iommu_buses[i], &nb[i]); 223 } 224 225 return 0; 226 } 227 subsys_initcall(iommu_subsys_init); 228 229 static int remove_iommu_group(struct device *dev, void *data) 230 { 231 if (dev->iommu && dev->iommu->iommu_dev == data) 232 iommu_release_device(dev); 233 234 return 0; 235 } 236 237 /** 238 * iommu_device_register() - Register an IOMMU hardware instance 239 * @iommu: IOMMU handle for the instance 240 * @ops: IOMMU ops to associate with the instance 241 * @hwdev: (optional) actual instance device, used for fwnode lookup 242 * 243 * Return: 0 on success, or an error. 244 */ 245 int iommu_device_register(struct iommu_device *iommu, 246 const struct iommu_ops *ops, struct device *hwdev) 247 { 248 int err = 0; 249 250 /* We need to be able to take module references appropriately */ 251 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 252 return -EINVAL; 253 /* 254 * Temporarily enforce global restriction to a single driver. This was 255 * already the de-facto behaviour, since any possible combination of 256 * existing drivers would compete for at least the PCI or platform bus. 257 */ 258 if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops) 259 return -EBUSY; 260 261 iommu->ops = ops; 262 if (hwdev) 263 iommu->fwnode = dev_fwnode(hwdev); 264 265 spin_lock(&iommu_device_lock); 266 list_add_tail(&iommu->list, &iommu_device_list); 267 spin_unlock(&iommu_device_lock); 268 269 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) { 270 iommu_buses[i]->iommu_ops = ops; 271 err = bus_iommu_probe(iommu_buses[i]); 272 } 273 if (err) 274 iommu_device_unregister(iommu); 275 return err; 276 } 277 EXPORT_SYMBOL_GPL(iommu_device_register); 278 279 void iommu_device_unregister(struct iommu_device *iommu) 280 { 281 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) 282 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); 283 284 spin_lock(&iommu_device_lock); 285 list_del(&iommu->list); 286 spin_unlock(&iommu_device_lock); 287 } 288 EXPORT_SYMBOL_GPL(iommu_device_unregister); 289 290 static struct dev_iommu *dev_iommu_get(struct device *dev) 291 { 292 struct dev_iommu *param = dev->iommu; 293 294 if (param) 295 return param; 296 297 param = kzalloc(sizeof(*param), GFP_KERNEL); 298 if (!param) 299 return NULL; 300 301 mutex_init(¶m->lock); 302 dev->iommu = param; 303 return param; 304 } 305 306 static void dev_iommu_free(struct device *dev) 307 { 308 struct dev_iommu *param = dev->iommu; 309 310 dev->iommu = NULL; 311 if (param->fwspec) { 312 fwnode_handle_put(param->fwspec->iommu_fwnode); 313 kfree(param->fwspec); 314 } 315 kfree(param); 316 } 317 318 static u32 dev_iommu_get_max_pasids(struct device *dev) 319 { 320 u32 max_pasids = 0, bits = 0; 321 int ret; 322 323 if (dev_is_pci(dev)) { 324 ret = pci_max_pasids(to_pci_dev(dev)); 325 if (ret > 0) 326 max_pasids = ret; 327 } else { 328 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); 329 if (!ret) 330 max_pasids = 1UL << bits; 331 } 332 333 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); 334 } 335 336 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 337 { 338 const struct iommu_ops *ops = dev->bus->iommu_ops; 339 struct iommu_device *iommu_dev; 340 struct iommu_group *group; 341 static DEFINE_MUTEX(iommu_probe_device_lock); 342 int ret; 343 344 if (!ops) 345 return -ENODEV; 346 /* 347 * Serialise to avoid races between IOMMU drivers registering in 348 * parallel and/or the "replay" calls from ACPI/OF code via client 349 * driver probe. Once the latter have been cleaned up we should 350 * probably be able to use device_lock() here to minimise the scope, 351 * but for now enforcing a simple global ordering is fine. 352 */ 353 mutex_lock(&iommu_probe_device_lock); 354 355 /* Device is probed already if in a group */ 356 if (dev->iommu_group) { 357 ret = 0; 358 goto out_unlock; 359 } 360 361 if (!dev_iommu_get(dev)) { 362 ret = -ENOMEM; 363 goto out_unlock; 364 } 365 366 if (!try_module_get(ops->owner)) { 367 ret = -EINVAL; 368 goto err_free; 369 } 370 371 iommu_dev = ops->probe_device(dev); 372 if (IS_ERR(iommu_dev)) { 373 ret = PTR_ERR(iommu_dev); 374 goto out_module_put; 375 } 376 377 dev->iommu->iommu_dev = iommu_dev; 378 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 379 if (ops->is_attach_deferred) 380 dev->iommu->attach_deferred = ops->is_attach_deferred(dev); 381 382 group = iommu_group_get_for_dev(dev); 383 if (IS_ERR(group)) { 384 ret = PTR_ERR(group); 385 goto out_release; 386 } 387 388 mutex_lock(&group->mutex); 389 if (group_list && !group->default_domain && list_empty(&group->entry)) 390 list_add_tail(&group->entry, group_list); 391 mutex_unlock(&group->mutex); 392 iommu_group_put(group); 393 394 mutex_unlock(&iommu_probe_device_lock); 395 iommu_device_link(iommu_dev, dev); 396 397 return 0; 398 399 out_release: 400 if (ops->release_device) 401 ops->release_device(dev); 402 403 out_module_put: 404 module_put(ops->owner); 405 406 err_free: 407 dev_iommu_free(dev); 408 409 out_unlock: 410 mutex_unlock(&iommu_probe_device_lock); 411 412 return ret; 413 } 414 415 int iommu_probe_device(struct device *dev) 416 { 417 const struct iommu_ops *ops; 418 struct iommu_group *group; 419 int ret; 420 421 ret = __iommu_probe_device(dev, NULL); 422 if (ret) 423 goto err_out; 424 425 group = iommu_group_get(dev); 426 if (!group) { 427 ret = -ENODEV; 428 goto err_release; 429 } 430 431 mutex_lock(&group->mutex); 432 433 if (group->default_domain) 434 iommu_create_device_direct_mappings(group->default_domain, dev); 435 436 if (group->domain) { 437 ret = __iommu_device_set_domain(group, dev, group->domain, 0); 438 if (ret) 439 goto err_unlock; 440 } else if (!group->default_domain) { 441 ret = iommu_setup_default_domain(group, 0); 442 if (ret) 443 goto err_unlock; 444 } 445 446 mutex_unlock(&group->mutex); 447 iommu_group_put(group); 448 449 ops = dev_iommu_ops(dev); 450 if (ops->probe_finalize) 451 ops->probe_finalize(dev); 452 453 return 0; 454 455 err_unlock: 456 mutex_unlock(&group->mutex); 457 iommu_group_put(group); 458 err_release: 459 iommu_release_device(dev); 460 461 err_out: 462 return ret; 463 464 } 465 466 /* 467 * Remove a device from a group's device list and return the group device 468 * if successful. 469 */ 470 static struct group_device * 471 __iommu_group_remove_device(struct iommu_group *group, struct device *dev) 472 { 473 struct group_device *device; 474 475 lockdep_assert_held(&group->mutex); 476 for_each_group_device(group, device) { 477 if (device->dev == dev) { 478 list_del(&device->list); 479 return device; 480 } 481 } 482 483 return NULL; 484 } 485 486 /* 487 * Release a device from its group and decrements the iommu group reference 488 * count. 489 */ 490 static void __iommu_group_release_device(struct iommu_group *group, 491 struct group_device *grp_dev) 492 { 493 struct device *dev = grp_dev->dev; 494 495 sysfs_remove_link(group->devices_kobj, grp_dev->name); 496 sysfs_remove_link(&dev->kobj, "iommu_group"); 497 498 trace_remove_device_from_group(group->id, dev); 499 500 kfree(grp_dev->name); 501 kfree(grp_dev); 502 dev->iommu_group = NULL; 503 iommu_group_put(group); 504 } 505 506 static void iommu_release_device(struct device *dev) 507 { 508 struct iommu_group *group = dev->iommu_group; 509 struct group_device *device; 510 const struct iommu_ops *ops; 511 512 if (!dev->iommu || !group) 513 return; 514 515 iommu_device_unlink(dev->iommu->iommu_dev, dev); 516 517 mutex_lock(&group->mutex); 518 device = __iommu_group_remove_device(group, dev); 519 520 /* 521 * If the group has become empty then ownership must have been released, 522 * and the current domain must be set back to NULL or the default 523 * domain. 524 */ 525 if (list_empty(&group->devices)) 526 WARN_ON(group->owner_cnt || 527 group->domain != group->default_domain); 528 529 /* 530 * release_device() must stop using any attached domain on the device. 531 * If there are still other devices in the group they are not effected 532 * by this callback. 533 * 534 * The IOMMU driver must set the device to either an identity or 535 * blocking translation and stop using any domain pointer, as it is 536 * going to be freed. 537 */ 538 ops = dev_iommu_ops(dev); 539 if (ops->release_device) 540 ops->release_device(dev); 541 mutex_unlock(&group->mutex); 542 543 if (device) 544 __iommu_group_release_device(group, device); 545 546 module_put(ops->owner); 547 dev_iommu_free(dev); 548 } 549 550 static int __init iommu_set_def_domain_type(char *str) 551 { 552 bool pt; 553 int ret; 554 555 ret = kstrtobool(str, &pt); 556 if (ret) 557 return ret; 558 559 if (pt) 560 iommu_set_default_passthrough(true); 561 else 562 iommu_set_default_translated(true); 563 564 return 0; 565 } 566 early_param("iommu.passthrough", iommu_set_def_domain_type); 567 568 static int __init iommu_dma_setup(char *str) 569 { 570 int ret = kstrtobool(str, &iommu_dma_strict); 571 572 if (!ret) 573 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 574 return ret; 575 } 576 early_param("iommu.strict", iommu_dma_setup); 577 578 void iommu_set_dma_strict(void) 579 { 580 iommu_dma_strict = true; 581 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) 582 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 583 } 584 585 static ssize_t iommu_group_attr_show(struct kobject *kobj, 586 struct attribute *__attr, char *buf) 587 { 588 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 589 struct iommu_group *group = to_iommu_group(kobj); 590 ssize_t ret = -EIO; 591 592 if (attr->show) 593 ret = attr->show(group, buf); 594 return ret; 595 } 596 597 static ssize_t iommu_group_attr_store(struct kobject *kobj, 598 struct attribute *__attr, 599 const char *buf, size_t count) 600 { 601 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 602 struct iommu_group *group = to_iommu_group(kobj); 603 ssize_t ret = -EIO; 604 605 if (attr->store) 606 ret = attr->store(group, buf, count); 607 return ret; 608 } 609 610 static const struct sysfs_ops iommu_group_sysfs_ops = { 611 .show = iommu_group_attr_show, 612 .store = iommu_group_attr_store, 613 }; 614 615 static int iommu_group_create_file(struct iommu_group *group, 616 struct iommu_group_attribute *attr) 617 { 618 return sysfs_create_file(&group->kobj, &attr->attr); 619 } 620 621 static void iommu_group_remove_file(struct iommu_group *group, 622 struct iommu_group_attribute *attr) 623 { 624 sysfs_remove_file(&group->kobj, &attr->attr); 625 } 626 627 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 628 { 629 return sysfs_emit(buf, "%s\n", group->name); 630 } 631 632 /** 633 * iommu_insert_resv_region - Insert a new region in the 634 * list of reserved regions. 635 * @new: new region to insert 636 * @regions: list of regions 637 * 638 * Elements are sorted by start address and overlapping segments 639 * of the same type are merged. 640 */ 641 static int iommu_insert_resv_region(struct iommu_resv_region *new, 642 struct list_head *regions) 643 { 644 struct iommu_resv_region *iter, *tmp, *nr, *top; 645 LIST_HEAD(stack); 646 647 nr = iommu_alloc_resv_region(new->start, new->length, 648 new->prot, new->type, GFP_KERNEL); 649 if (!nr) 650 return -ENOMEM; 651 652 /* First add the new element based on start address sorting */ 653 list_for_each_entry(iter, regions, list) { 654 if (nr->start < iter->start || 655 (nr->start == iter->start && nr->type <= iter->type)) 656 break; 657 } 658 list_add_tail(&nr->list, &iter->list); 659 660 /* Merge overlapping segments of type nr->type in @regions, if any */ 661 list_for_each_entry_safe(iter, tmp, regions, list) { 662 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 663 664 /* no merge needed on elements of different types than @new */ 665 if (iter->type != new->type) { 666 list_move_tail(&iter->list, &stack); 667 continue; 668 } 669 670 /* look for the last stack element of same type as @iter */ 671 list_for_each_entry_reverse(top, &stack, list) 672 if (top->type == iter->type) 673 goto check_overlap; 674 675 list_move_tail(&iter->list, &stack); 676 continue; 677 678 check_overlap: 679 top_end = top->start + top->length - 1; 680 681 if (iter->start > top_end + 1) { 682 list_move_tail(&iter->list, &stack); 683 } else { 684 top->length = max(top_end, iter_end) - top->start + 1; 685 list_del(&iter->list); 686 kfree(iter); 687 } 688 } 689 list_splice(&stack, regions); 690 return 0; 691 } 692 693 static int 694 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 695 struct list_head *group_resv_regions) 696 { 697 struct iommu_resv_region *entry; 698 int ret = 0; 699 700 list_for_each_entry(entry, dev_resv_regions, list) { 701 ret = iommu_insert_resv_region(entry, group_resv_regions); 702 if (ret) 703 break; 704 } 705 return ret; 706 } 707 708 int iommu_get_group_resv_regions(struct iommu_group *group, 709 struct list_head *head) 710 { 711 struct group_device *device; 712 int ret = 0; 713 714 mutex_lock(&group->mutex); 715 for_each_group_device(group, device) { 716 struct list_head dev_resv_regions; 717 718 /* 719 * Non-API groups still expose reserved_regions in sysfs, 720 * so filter out calls that get here that way. 721 */ 722 if (!device->dev->iommu) 723 break; 724 725 INIT_LIST_HEAD(&dev_resv_regions); 726 iommu_get_resv_regions(device->dev, &dev_resv_regions); 727 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 728 iommu_put_resv_regions(device->dev, &dev_resv_regions); 729 if (ret) 730 break; 731 } 732 mutex_unlock(&group->mutex); 733 return ret; 734 } 735 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 736 737 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 738 char *buf) 739 { 740 struct iommu_resv_region *region, *next; 741 struct list_head group_resv_regions; 742 int offset = 0; 743 744 INIT_LIST_HEAD(&group_resv_regions); 745 iommu_get_group_resv_regions(group, &group_resv_regions); 746 747 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 748 offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n", 749 (long long)region->start, 750 (long long)(region->start + 751 region->length - 1), 752 iommu_group_resv_type_string[region->type]); 753 kfree(region); 754 } 755 756 return offset; 757 } 758 759 static ssize_t iommu_group_show_type(struct iommu_group *group, 760 char *buf) 761 { 762 char *type = "unknown"; 763 764 mutex_lock(&group->mutex); 765 if (group->default_domain) { 766 switch (group->default_domain->type) { 767 case IOMMU_DOMAIN_BLOCKED: 768 type = "blocked"; 769 break; 770 case IOMMU_DOMAIN_IDENTITY: 771 type = "identity"; 772 break; 773 case IOMMU_DOMAIN_UNMANAGED: 774 type = "unmanaged"; 775 break; 776 case IOMMU_DOMAIN_DMA: 777 type = "DMA"; 778 break; 779 case IOMMU_DOMAIN_DMA_FQ: 780 type = "DMA-FQ"; 781 break; 782 } 783 } 784 mutex_unlock(&group->mutex); 785 786 return sysfs_emit(buf, "%s\n", type); 787 } 788 789 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 790 791 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 792 iommu_group_show_resv_regions, NULL); 793 794 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 795 iommu_group_store_type); 796 797 static void iommu_group_release(struct kobject *kobj) 798 { 799 struct iommu_group *group = to_iommu_group(kobj); 800 801 pr_debug("Releasing group %d\n", group->id); 802 803 if (group->iommu_data_release) 804 group->iommu_data_release(group->iommu_data); 805 806 ida_free(&iommu_group_ida, group->id); 807 808 if (group->default_domain) 809 iommu_domain_free(group->default_domain); 810 if (group->blocking_domain) 811 iommu_domain_free(group->blocking_domain); 812 813 kfree(group->name); 814 kfree(group); 815 } 816 817 static const struct kobj_type iommu_group_ktype = { 818 .sysfs_ops = &iommu_group_sysfs_ops, 819 .release = iommu_group_release, 820 }; 821 822 /** 823 * iommu_group_alloc - Allocate a new group 824 * 825 * This function is called by an iommu driver to allocate a new iommu 826 * group. The iommu group represents the minimum granularity of the iommu. 827 * Upon successful return, the caller holds a reference to the supplied 828 * group in order to hold the group until devices are added. Use 829 * iommu_group_put() to release this extra reference count, allowing the 830 * group to be automatically reclaimed once it has no devices or external 831 * references. 832 */ 833 struct iommu_group *iommu_group_alloc(void) 834 { 835 struct iommu_group *group; 836 int ret; 837 838 group = kzalloc(sizeof(*group), GFP_KERNEL); 839 if (!group) 840 return ERR_PTR(-ENOMEM); 841 842 group->kobj.kset = iommu_group_kset; 843 mutex_init(&group->mutex); 844 INIT_LIST_HEAD(&group->devices); 845 INIT_LIST_HEAD(&group->entry); 846 xa_init(&group->pasid_array); 847 848 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); 849 if (ret < 0) { 850 kfree(group); 851 return ERR_PTR(ret); 852 } 853 group->id = ret; 854 855 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 856 NULL, "%d", group->id); 857 if (ret) { 858 kobject_put(&group->kobj); 859 return ERR_PTR(ret); 860 } 861 862 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 863 if (!group->devices_kobj) { 864 kobject_put(&group->kobj); /* triggers .release & free */ 865 return ERR_PTR(-ENOMEM); 866 } 867 868 /* 869 * The devices_kobj holds a reference on the group kobject, so 870 * as long as that exists so will the group. We can therefore 871 * use the devices_kobj for reference counting. 872 */ 873 kobject_put(&group->kobj); 874 875 ret = iommu_group_create_file(group, 876 &iommu_group_attr_reserved_regions); 877 if (ret) { 878 kobject_put(group->devices_kobj); 879 return ERR_PTR(ret); 880 } 881 882 ret = iommu_group_create_file(group, &iommu_group_attr_type); 883 if (ret) { 884 kobject_put(group->devices_kobj); 885 return ERR_PTR(ret); 886 } 887 888 pr_debug("Allocated group %d\n", group->id); 889 890 return group; 891 } 892 EXPORT_SYMBOL_GPL(iommu_group_alloc); 893 894 /** 895 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 896 * @group: the group 897 * 898 * iommu drivers can store data in the group for use when doing iommu 899 * operations. This function provides a way to retrieve it. Caller 900 * should hold a group reference. 901 */ 902 void *iommu_group_get_iommudata(struct iommu_group *group) 903 { 904 return group->iommu_data; 905 } 906 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 907 908 /** 909 * iommu_group_set_iommudata - set iommu_data for a group 910 * @group: the group 911 * @iommu_data: new data 912 * @release: release function for iommu_data 913 * 914 * iommu drivers can store data in the group for use when doing iommu 915 * operations. This function provides a way to set the data after 916 * the group has been allocated. Caller should hold a group reference. 917 */ 918 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 919 void (*release)(void *iommu_data)) 920 { 921 group->iommu_data = iommu_data; 922 group->iommu_data_release = release; 923 } 924 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 925 926 /** 927 * iommu_group_set_name - set name for a group 928 * @group: the group 929 * @name: name 930 * 931 * Allow iommu driver to set a name for a group. When set it will 932 * appear in a name attribute file under the group in sysfs. 933 */ 934 int iommu_group_set_name(struct iommu_group *group, const char *name) 935 { 936 int ret; 937 938 if (group->name) { 939 iommu_group_remove_file(group, &iommu_group_attr_name); 940 kfree(group->name); 941 group->name = NULL; 942 if (!name) 943 return 0; 944 } 945 946 group->name = kstrdup(name, GFP_KERNEL); 947 if (!group->name) 948 return -ENOMEM; 949 950 ret = iommu_group_create_file(group, &iommu_group_attr_name); 951 if (ret) { 952 kfree(group->name); 953 group->name = NULL; 954 return ret; 955 } 956 957 return 0; 958 } 959 EXPORT_SYMBOL_GPL(iommu_group_set_name); 960 961 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, 962 struct device *dev) 963 { 964 struct iommu_resv_region *entry; 965 struct list_head mappings; 966 unsigned long pg_size; 967 int ret = 0; 968 969 if (!iommu_is_dma_domain(domain)) 970 return 0; 971 972 BUG_ON(!domain->pgsize_bitmap); 973 974 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 975 INIT_LIST_HEAD(&mappings); 976 977 iommu_get_resv_regions(dev, &mappings); 978 979 /* We need to consider overlapping regions for different devices */ 980 list_for_each_entry(entry, &mappings, list) { 981 dma_addr_t start, end, addr; 982 size_t map_size = 0; 983 984 start = ALIGN(entry->start, pg_size); 985 end = ALIGN(entry->start + entry->length, pg_size); 986 987 if (entry->type != IOMMU_RESV_DIRECT && 988 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 989 continue; 990 991 for (addr = start; addr <= end; addr += pg_size) { 992 phys_addr_t phys_addr; 993 994 if (addr == end) 995 goto map_end; 996 997 phys_addr = iommu_iova_to_phys(domain, addr); 998 if (!phys_addr) { 999 map_size += pg_size; 1000 continue; 1001 } 1002 1003 map_end: 1004 if (map_size) { 1005 ret = iommu_map(domain, addr - map_size, 1006 addr - map_size, map_size, 1007 entry->prot, GFP_KERNEL); 1008 if (ret) 1009 goto out; 1010 map_size = 0; 1011 } 1012 } 1013 1014 } 1015 1016 iommu_flush_iotlb_all(domain); 1017 1018 out: 1019 iommu_put_resv_regions(dev, &mappings); 1020 1021 return ret; 1022 } 1023 1024 /** 1025 * iommu_group_add_device - add a device to an iommu group 1026 * @group: the group into which to add the device (reference should be held) 1027 * @dev: the device 1028 * 1029 * This function is called by an iommu driver to add a device into a 1030 * group. Adding a device increments the group reference count. 1031 */ 1032 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 1033 { 1034 int ret, i = 0; 1035 struct group_device *device; 1036 1037 device = kzalloc(sizeof(*device), GFP_KERNEL); 1038 if (!device) 1039 return -ENOMEM; 1040 1041 device->dev = dev; 1042 1043 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 1044 if (ret) 1045 goto err_free_device; 1046 1047 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 1048 rename: 1049 if (!device->name) { 1050 ret = -ENOMEM; 1051 goto err_remove_link; 1052 } 1053 1054 ret = sysfs_create_link_nowarn(group->devices_kobj, 1055 &dev->kobj, device->name); 1056 if (ret) { 1057 if (ret == -EEXIST && i >= 0) { 1058 /* 1059 * Account for the slim chance of collision 1060 * and append an instance to the name. 1061 */ 1062 kfree(device->name); 1063 device->name = kasprintf(GFP_KERNEL, "%s.%d", 1064 kobject_name(&dev->kobj), i++); 1065 goto rename; 1066 } 1067 goto err_free_name; 1068 } 1069 1070 iommu_group_ref_get(group); 1071 dev->iommu_group = group; 1072 1073 mutex_lock(&group->mutex); 1074 list_add_tail(&device->list, &group->devices); 1075 mutex_unlock(&group->mutex); 1076 trace_add_device_to_group(group->id, dev); 1077 1078 dev_info(dev, "Adding to iommu group %d\n", group->id); 1079 1080 return 0; 1081 1082 err_free_name: 1083 kfree(device->name); 1084 err_remove_link: 1085 sysfs_remove_link(&dev->kobj, "iommu_group"); 1086 err_free_device: 1087 kfree(device); 1088 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 1089 return ret; 1090 } 1091 EXPORT_SYMBOL_GPL(iommu_group_add_device); 1092 1093 /** 1094 * iommu_group_remove_device - remove a device from it's current group 1095 * @dev: device to be removed 1096 * 1097 * This function is called by an iommu driver to remove the device from 1098 * it's current group. This decrements the iommu group reference count. 1099 */ 1100 void iommu_group_remove_device(struct device *dev) 1101 { 1102 struct iommu_group *group = dev->iommu_group; 1103 struct group_device *device; 1104 1105 if (!group) 1106 return; 1107 1108 dev_info(dev, "Removing from iommu group %d\n", group->id); 1109 1110 mutex_lock(&group->mutex); 1111 device = __iommu_group_remove_device(group, dev); 1112 mutex_unlock(&group->mutex); 1113 1114 if (device) 1115 __iommu_group_release_device(group, device); 1116 } 1117 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 1118 1119 /** 1120 * iommu_group_for_each_dev - iterate over each device in the group 1121 * @group: the group 1122 * @data: caller opaque data to be passed to callback function 1123 * @fn: caller supplied callback function 1124 * 1125 * This function is called by group users to iterate over group devices. 1126 * Callers should hold a reference count to the group during callback. 1127 * The group->mutex is held across callbacks, which will block calls to 1128 * iommu_group_add/remove_device. 1129 */ 1130 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 1131 int (*fn)(struct device *, void *)) 1132 { 1133 struct group_device *device; 1134 int ret = 0; 1135 1136 mutex_lock(&group->mutex); 1137 for_each_group_device(group, device) { 1138 ret = fn(device->dev, data); 1139 if (ret) 1140 break; 1141 } 1142 mutex_unlock(&group->mutex); 1143 1144 return ret; 1145 } 1146 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1147 1148 /** 1149 * iommu_group_get - Return the group for a device and increment reference 1150 * @dev: get the group that this device belongs to 1151 * 1152 * This function is called by iommu drivers and users to get the group 1153 * for the specified device. If found, the group is returned and the group 1154 * reference in incremented, else NULL. 1155 */ 1156 struct iommu_group *iommu_group_get(struct device *dev) 1157 { 1158 struct iommu_group *group = dev->iommu_group; 1159 1160 if (group) 1161 kobject_get(group->devices_kobj); 1162 1163 return group; 1164 } 1165 EXPORT_SYMBOL_GPL(iommu_group_get); 1166 1167 /** 1168 * iommu_group_ref_get - Increment reference on a group 1169 * @group: the group to use, must not be NULL 1170 * 1171 * This function is called by iommu drivers to take additional references on an 1172 * existing group. Returns the given group for convenience. 1173 */ 1174 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1175 { 1176 kobject_get(group->devices_kobj); 1177 return group; 1178 } 1179 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1180 1181 /** 1182 * iommu_group_put - Decrement group reference 1183 * @group: the group to use 1184 * 1185 * This function is called by iommu drivers and users to release the 1186 * iommu group. Once the reference count is zero, the group is released. 1187 */ 1188 void iommu_group_put(struct iommu_group *group) 1189 { 1190 if (group) 1191 kobject_put(group->devices_kobj); 1192 } 1193 EXPORT_SYMBOL_GPL(iommu_group_put); 1194 1195 /** 1196 * iommu_register_device_fault_handler() - Register a device fault handler 1197 * @dev: the device 1198 * @handler: the fault handler 1199 * @data: private data passed as argument to the handler 1200 * 1201 * When an IOMMU fault event is received, this handler gets called with the 1202 * fault event and data as argument. The handler should return 0 on success. If 1203 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1204 * complete the fault by calling iommu_page_response() with one of the following 1205 * response code: 1206 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1207 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1208 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1209 * page faults if possible. 1210 * 1211 * Return 0 if the fault handler was installed successfully, or an error. 1212 */ 1213 int iommu_register_device_fault_handler(struct device *dev, 1214 iommu_dev_fault_handler_t handler, 1215 void *data) 1216 { 1217 struct dev_iommu *param = dev->iommu; 1218 int ret = 0; 1219 1220 if (!param) 1221 return -EINVAL; 1222 1223 mutex_lock(¶m->lock); 1224 /* Only allow one fault handler registered for each device */ 1225 if (param->fault_param) { 1226 ret = -EBUSY; 1227 goto done_unlock; 1228 } 1229 1230 get_device(dev); 1231 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1232 if (!param->fault_param) { 1233 put_device(dev); 1234 ret = -ENOMEM; 1235 goto done_unlock; 1236 } 1237 param->fault_param->handler = handler; 1238 param->fault_param->data = data; 1239 mutex_init(¶m->fault_param->lock); 1240 INIT_LIST_HEAD(¶m->fault_param->faults); 1241 1242 done_unlock: 1243 mutex_unlock(¶m->lock); 1244 1245 return ret; 1246 } 1247 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1248 1249 /** 1250 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1251 * @dev: the device 1252 * 1253 * Remove the device fault handler installed with 1254 * iommu_register_device_fault_handler(). 1255 * 1256 * Return 0 on success, or an error. 1257 */ 1258 int iommu_unregister_device_fault_handler(struct device *dev) 1259 { 1260 struct dev_iommu *param = dev->iommu; 1261 int ret = 0; 1262 1263 if (!param) 1264 return -EINVAL; 1265 1266 mutex_lock(¶m->lock); 1267 1268 if (!param->fault_param) 1269 goto unlock; 1270 1271 /* we cannot unregister handler if there are pending faults */ 1272 if (!list_empty(¶m->fault_param->faults)) { 1273 ret = -EBUSY; 1274 goto unlock; 1275 } 1276 1277 kfree(param->fault_param); 1278 param->fault_param = NULL; 1279 put_device(dev); 1280 unlock: 1281 mutex_unlock(¶m->lock); 1282 1283 return ret; 1284 } 1285 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1286 1287 /** 1288 * iommu_report_device_fault() - Report fault event to device driver 1289 * @dev: the device 1290 * @evt: fault event data 1291 * 1292 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1293 * handler. When this function fails and the fault is recoverable, it is the 1294 * caller's responsibility to complete the fault. 1295 * 1296 * Return 0 on success, or an error. 1297 */ 1298 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1299 { 1300 struct dev_iommu *param = dev->iommu; 1301 struct iommu_fault_event *evt_pending = NULL; 1302 struct iommu_fault_param *fparam; 1303 int ret = 0; 1304 1305 if (!param || !evt) 1306 return -EINVAL; 1307 1308 /* we only report device fault if there is a handler registered */ 1309 mutex_lock(¶m->lock); 1310 fparam = param->fault_param; 1311 if (!fparam || !fparam->handler) { 1312 ret = -EINVAL; 1313 goto done_unlock; 1314 } 1315 1316 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1317 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1318 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1319 GFP_KERNEL); 1320 if (!evt_pending) { 1321 ret = -ENOMEM; 1322 goto done_unlock; 1323 } 1324 mutex_lock(&fparam->lock); 1325 list_add_tail(&evt_pending->list, &fparam->faults); 1326 mutex_unlock(&fparam->lock); 1327 } 1328 1329 ret = fparam->handler(&evt->fault, fparam->data); 1330 if (ret && evt_pending) { 1331 mutex_lock(&fparam->lock); 1332 list_del(&evt_pending->list); 1333 mutex_unlock(&fparam->lock); 1334 kfree(evt_pending); 1335 } 1336 done_unlock: 1337 mutex_unlock(¶m->lock); 1338 return ret; 1339 } 1340 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1341 1342 int iommu_page_response(struct device *dev, 1343 struct iommu_page_response *msg) 1344 { 1345 bool needs_pasid; 1346 int ret = -EINVAL; 1347 struct iommu_fault_event *evt; 1348 struct iommu_fault_page_request *prm; 1349 struct dev_iommu *param = dev->iommu; 1350 const struct iommu_ops *ops = dev_iommu_ops(dev); 1351 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1352 1353 if (!ops->page_response) 1354 return -ENODEV; 1355 1356 if (!param || !param->fault_param) 1357 return -EINVAL; 1358 1359 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1360 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1361 return -EINVAL; 1362 1363 /* Only send response if there is a fault report pending */ 1364 mutex_lock(¶m->fault_param->lock); 1365 if (list_empty(¶m->fault_param->faults)) { 1366 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1367 goto done_unlock; 1368 } 1369 /* 1370 * Check if we have a matching page request pending to respond, 1371 * otherwise return -EINVAL 1372 */ 1373 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1374 prm = &evt->fault.prm; 1375 if (prm->grpid != msg->grpid) 1376 continue; 1377 1378 /* 1379 * If the PASID is required, the corresponding request is 1380 * matched using the group ID, the PASID valid bit and the PASID 1381 * value. Otherwise only the group ID matches request and 1382 * response. 1383 */ 1384 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1385 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1386 continue; 1387 1388 if (!needs_pasid && has_pasid) { 1389 /* No big deal, just clear it. */ 1390 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1391 msg->pasid = 0; 1392 } 1393 1394 ret = ops->page_response(dev, evt, msg); 1395 list_del(&evt->list); 1396 kfree(evt); 1397 break; 1398 } 1399 1400 done_unlock: 1401 mutex_unlock(¶m->fault_param->lock); 1402 return ret; 1403 } 1404 EXPORT_SYMBOL_GPL(iommu_page_response); 1405 1406 /** 1407 * iommu_group_id - Return ID for a group 1408 * @group: the group to ID 1409 * 1410 * Return the unique ID for the group matching the sysfs group number. 1411 */ 1412 int iommu_group_id(struct iommu_group *group) 1413 { 1414 return group->id; 1415 } 1416 EXPORT_SYMBOL_GPL(iommu_group_id); 1417 1418 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1419 unsigned long *devfns); 1420 1421 /* 1422 * To consider a PCI device isolated, we require ACS to support Source 1423 * Validation, Request Redirection, Completer Redirection, and Upstream 1424 * Forwarding. This effectively means that devices cannot spoof their 1425 * requester ID, requests and completions cannot be redirected, and all 1426 * transactions are forwarded upstream, even as it passes through a 1427 * bridge where the target device is downstream. 1428 */ 1429 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1430 1431 /* 1432 * For multifunction devices which are not isolated from each other, find 1433 * all the other non-isolated functions and look for existing groups. For 1434 * each function, we also need to look for aliases to or from other devices 1435 * that may already have a group. 1436 */ 1437 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1438 unsigned long *devfns) 1439 { 1440 struct pci_dev *tmp = NULL; 1441 struct iommu_group *group; 1442 1443 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1444 return NULL; 1445 1446 for_each_pci_dev(tmp) { 1447 if (tmp == pdev || tmp->bus != pdev->bus || 1448 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1449 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1450 continue; 1451 1452 group = get_pci_alias_group(tmp, devfns); 1453 if (group) { 1454 pci_dev_put(tmp); 1455 return group; 1456 } 1457 } 1458 1459 return NULL; 1460 } 1461 1462 /* 1463 * Look for aliases to or from the given device for existing groups. DMA 1464 * aliases are only supported on the same bus, therefore the search 1465 * space is quite small (especially since we're really only looking at pcie 1466 * device, and therefore only expect multiple slots on the root complex or 1467 * downstream switch ports). It's conceivable though that a pair of 1468 * multifunction devices could have aliases between them that would cause a 1469 * loop. To prevent this, we use a bitmap to track where we've been. 1470 */ 1471 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1472 unsigned long *devfns) 1473 { 1474 struct pci_dev *tmp = NULL; 1475 struct iommu_group *group; 1476 1477 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1478 return NULL; 1479 1480 group = iommu_group_get(&pdev->dev); 1481 if (group) 1482 return group; 1483 1484 for_each_pci_dev(tmp) { 1485 if (tmp == pdev || tmp->bus != pdev->bus) 1486 continue; 1487 1488 /* We alias them or they alias us */ 1489 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1490 group = get_pci_alias_group(tmp, devfns); 1491 if (group) { 1492 pci_dev_put(tmp); 1493 return group; 1494 } 1495 1496 group = get_pci_function_alias_group(tmp, devfns); 1497 if (group) { 1498 pci_dev_put(tmp); 1499 return group; 1500 } 1501 } 1502 } 1503 1504 return NULL; 1505 } 1506 1507 struct group_for_pci_data { 1508 struct pci_dev *pdev; 1509 struct iommu_group *group; 1510 }; 1511 1512 /* 1513 * DMA alias iterator callback, return the last seen device. Stop and return 1514 * the IOMMU group if we find one along the way. 1515 */ 1516 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1517 { 1518 struct group_for_pci_data *data = opaque; 1519 1520 data->pdev = pdev; 1521 data->group = iommu_group_get(&pdev->dev); 1522 1523 return data->group != NULL; 1524 } 1525 1526 /* 1527 * Generic device_group call-back function. It just allocates one 1528 * iommu-group per device. 1529 */ 1530 struct iommu_group *generic_device_group(struct device *dev) 1531 { 1532 return iommu_group_alloc(); 1533 } 1534 EXPORT_SYMBOL_GPL(generic_device_group); 1535 1536 /* 1537 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1538 * to find or create an IOMMU group for a device. 1539 */ 1540 struct iommu_group *pci_device_group(struct device *dev) 1541 { 1542 struct pci_dev *pdev = to_pci_dev(dev); 1543 struct group_for_pci_data data; 1544 struct pci_bus *bus; 1545 struct iommu_group *group = NULL; 1546 u64 devfns[4] = { 0 }; 1547 1548 if (WARN_ON(!dev_is_pci(dev))) 1549 return ERR_PTR(-EINVAL); 1550 1551 /* 1552 * Find the upstream DMA alias for the device. A device must not 1553 * be aliased due to topology in order to have its own IOMMU group. 1554 * If we find an alias along the way that already belongs to a 1555 * group, use it. 1556 */ 1557 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1558 return data.group; 1559 1560 pdev = data.pdev; 1561 1562 /* 1563 * Continue upstream from the point of minimum IOMMU granularity 1564 * due to aliases to the point where devices are protected from 1565 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1566 * group, use it. 1567 */ 1568 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1569 if (!bus->self) 1570 continue; 1571 1572 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1573 break; 1574 1575 pdev = bus->self; 1576 1577 group = iommu_group_get(&pdev->dev); 1578 if (group) 1579 return group; 1580 } 1581 1582 /* 1583 * Look for existing groups on device aliases. If we alias another 1584 * device or another device aliases us, use the same group. 1585 */ 1586 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1587 if (group) 1588 return group; 1589 1590 /* 1591 * Look for existing groups on non-isolated functions on the same 1592 * slot and aliases of those funcions, if any. No need to clear 1593 * the search bitmap, the tested devfns are still valid. 1594 */ 1595 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1596 if (group) 1597 return group; 1598 1599 /* No shared group found, allocate new */ 1600 return iommu_group_alloc(); 1601 } 1602 EXPORT_SYMBOL_GPL(pci_device_group); 1603 1604 /* Get the IOMMU group for device on fsl-mc bus */ 1605 struct iommu_group *fsl_mc_device_group(struct device *dev) 1606 { 1607 struct device *cont_dev = fsl_mc_cont_dev(dev); 1608 struct iommu_group *group; 1609 1610 group = iommu_group_get(cont_dev); 1611 if (!group) 1612 group = iommu_group_alloc(); 1613 return group; 1614 } 1615 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1616 1617 static int iommu_get_def_domain_type(struct device *dev) 1618 { 1619 const struct iommu_ops *ops = dev_iommu_ops(dev); 1620 1621 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 1622 return IOMMU_DOMAIN_DMA; 1623 1624 if (ops->def_domain_type) 1625 return ops->def_domain_type(dev); 1626 1627 return 0; 1628 } 1629 1630 static struct iommu_domain * 1631 __iommu_group_alloc_default_domain(const struct bus_type *bus, 1632 struct iommu_group *group, int req_type) 1633 { 1634 if (group->default_domain && group->default_domain->type == req_type) 1635 return group->default_domain; 1636 return __iommu_domain_alloc(bus, req_type); 1637 } 1638 1639 /* 1640 * req_type of 0 means "auto" which means to select a domain based on 1641 * iommu_def_domain_type or what the driver actually supports. 1642 */ 1643 static struct iommu_domain * 1644 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) 1645 { 1646 const struct bus_type *bus = 1647 list_first_entry(&group->devices, struct group_device, list) 1648 ->dev->bus; 1649 struct iommu_domain *dom; 1650 1651 lockdep_assert_held(&group->mutex); 1652 1653 if (req_type) 1654 return __iommu_group_alloc_default_domain(bus, group, req_type); 1655 1656 /* The driver gave no guidance on what type to use, try the default */ 1657 dom = __iommu_group_alloc_default_domain(bus, group, iommu_def_domain_type); 1658 if (dom) 1659 return dom; 1660 1661 /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ 1662 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) 1663 return NULL; 1664 dom = __iommu_group_alloc_default_domain(bus, group, IOMMU_DOMAIN_DMA); 1665 if (!dom) 1666 return NULL; 1667 1668 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1669 iommu_def_domain_type, group->name); 1670 return dom; 1671 } 1672 1673 /** 1674 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1675 * @dev: target device 1676 * 1677 * This function is intended to be called by IOMMU drivers and extended to 1678 * support common, bus-defined algorithms when determining or creating the 1679 * IOMMU group for a device. On success, the caller will hold a reference 1680 * to the returned IOMMU group, which will already include the provided 1681 * device. The reference should be released with iommu_group_put(). 1682 */ 1683 static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1684 { 1685 const struct iommu_ops *ops = dev_iommu_ops(dev); 1686 struct iommu_group *group; 1687 int ret; 1688 1689 group = iommu_group_get(dev); 1690 if (group) 1691 return group; 1692 1693 group = ops->device_group(dev); 1694 if (WARN_ON_ONCE(group == NULL)) 1695 return ERR_PTR(-EINVAL); 1696 1697 if (IS_ERR(group)) 1698 return group; 1699 1700 ret = iommu_group_add_device(group, dev); 1701 if (ret) 1702 goto out_put_group; 1703 1704 return group; 1705 1706 out_put_group: 1707 iommu_group_put(group); 1708 1709 return ERR_PTR(ret); 1710 } 1711 1712 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1713 { 1714 return group->default_domain; 1715 } 1716 1717 static int probe_iommu_group(struct device *dev, void *data) 1718 { 1719 struct list_head *group_list = data; 1720 int ret; 1721 1722 ret = __iommu_probe_device(dev, group_list); 1723 if (ret == -ENODEV) 1724 ret = 0; 1725 1726 return ret; 1727 } 1728 1729 static int iommu_bus_notifier(struct notifier_block *nb, 1730 unsigned long action, void *data) 1731 { 1732 struct device *dev = data; 1733 1734 if (action == BUS_NOTIFY_ADD_DEVICE) { 1735 int ret; 1736 1737 ret = iommu_probe_device(dev); 1738 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1739 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1740 iommu_release_device(dev); 1741 return NOTIFY_OK; 1742 } 1743 1744 return 0; 1745 } 1746 1747 /* A target_type of 0 will select the best domain type and cannot fail */ 1748 static int iommu_get_default_domain_type(struct iommu_group *group, 1749 int target_type) 1750 { 1751 int best_type = target_type; 1752 struct group_device *gdev; 1753 struct device *last_dev; 1754 1755 lockdep_assert_held(&group->mutex); 1756 1757 for_each_group_device(group, gdev) { 1758 unsigned int type = iommu_get_def_domain_type(gdev->dev); 1759 1760 if (best_type && type && best_type != type) { 1761 if (target_type) { 1762 dev_err_ratelimited( 1763 gdev->dev, 1764 "Device cannot be in %s domain\n", 1765 iommu_domain_type_str(target_type)); 1766 return -1; 1767 } 1768 1769 dev_warn( 1770 gdev->dev, 1771 "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", 1772 iommu_domain_type_str(type), dev_name(last_dev), 1773 iommu_domain_type_str(best_type)); 1774 return 0; 1775 } 1776 if (!best_type) 1777 best_type = type; 1778 last_dev = gdev->dev; 1779 } 1780 return best_type; 1781 } 1782 1783 static void iommu_group_do_probe_finalize(struct device *dev) 1784 { 1785 const struct iommu_ops *ops = dev_iommu_ops(dev); 1786 1787 if (ops->probe_finalize) 1788 ops->probe_finalize(dev); 1789 } 1790 1791 int bus_iommu_probe(const struct bus_type *bus) 1792 { 1793 struct iommu_group *group, *next; 1794 LIST_HEAD(group_list); 1795 int ret; 1796 1797 /* 1798 * This code-path does not allocate the default domain when 1799 * creating the iommu group, so do it after the groups are 1800 * created. 1801 */ 1802 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1803 if (ret) 1804 return ret; 1805 1806 list_for_each_entry_safe(group, next, &group_list, entry) { 1807 struct group_device *gdev; 1808 1809 mutex_lock(&group->mutex); 1810 1811 /* Remove item from the list */ 1812 list_del_init(&group->entry); 1813 1814 ret = iommu_setup_default_domain(group, 0); 1815 if (ret) { 1816 mutex_unlock(&group->mutex); 1817 return ret; 1818 } 1819 mutex_unlock(&group->mutex); 1820 1821 /* 1822 * FIXME: Mis-locked because the ops->probe_finalize() call-back 1823 * of some IOMMU drivers calls arm_iommu_attach_device() which 1824 * in-turn might call back into IOMMU core code, where it tries 1825 * to take group->mutex, resulting in a deadlock. 1826 */ 1827 for_each_group_device(group, gdev) 1828 iommu_group_do_probe_finalize(gdev->dev); 1829 } 1830 1831 return 0; 1832 } 1833 1834 bool iommu_present(const struct bus_type *bus) 1835 { 1836 return bus->iommu_ops != NULL; 1837 } 1838 EXPORT_SYMBOL_GPL(iommu_present); 1839 1840 /** 1841 * device_iommu_capable() - check for a general IOMMU capability 1842 * @dev: device to which the capability would be relevant, if available 1843 * @cap: IOMMU capability 1844 * 1845 * Return: true if an IOMMU is present and supports the given capability 1846 * for the given device, otherwise false. 1847 */ 1848 bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 1849 { 1850 const struct iommu_ops *ops; 1851 1852 if (!dev->iommu || !dev->iommu->iommu_dev) 1853 return false; 1854 1855 ops = dev_iommu_ops(dev); 1856 if (!ops->capable) 1857 return false; 1858 1859 return ops->capable(dev, cap); 1860 } 1861 EXPORT_SYMBOL_GPL(device_iommu_capable); 1862 1863 /** 1864 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi() 1865 * for a group 1866 * @group: Group to query 1867 * 1868 * IOMMU groups should not have differing values of 1869 * msi_device_has_isolated_msi() for devices in a group. However nothing 1870 * directly prevents this, so ensure mistakes don't result in isolation failures 1871 * by checking that all the devices are the same. 1872 */ 1873 bool iommu_group_has_isolated_msi(struct iommu_group *group) 1874 { 1875 struct group_device *group_dev; 1876 bool ret = true; 1877 1878 mutex_lock(&group->mutex); 1879 for_each_group_device(group, group_dev) 1880 ret &= msi_device_has_isolated_msi(group_dev->dev); 1881 mutex_unlock(&group->mutex); 1882 return ret; 1883 } 1884 EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi); 1885 1886 /** 1887 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1888 * @domain: iommu domain 1889 * @handler: fault handler 1890 * @token: user data, will be passed back to the fault handler 1891 * 1892 * This function should be used by IOMMU users which want to be notified 1893 * whenever an IOMMU fault happens. 1894 * 1895 * The fault handler itself should return 0 on success, and an appropriate 1896 * error code otherwise. 1897 */ 1898 void iommu_set_fault_handler(struct iommu_domain *domain, 1899 iommu_fault_handler_t handler, 1900 void *token) 1901 { 1902 BUG_ON(!domain); 1903 1904 domain->handler = handler; 1905 domain->handler_token = token; 1906 } 1907 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1908 1909 static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus, 1910 unsigned type) 1911 { 1912 struct iommu_domain *domain; 1913 unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS; 1914 1915 if (bus == NULL || bus->iommu_ops == NULL) 1916 return NULL; 1917 1918 domain = bus->iommu_ops->domain_alloc(alloc_type); 1919 if (!domain) 1920 return NULL; 1921 1922 domain->type = type; 1923 /* 1924 * If not already set, assume all sizes by default; the driver 1925 * may override this later 1926 */ 1927 if (!domain->pgsize_bitmap) 1928 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1929 1930 if (!domain->ops) 1931 domain->ops = bus->iommu_ops->default_domain_ops; 1932 1933 if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { 1934 iommu_domain_free(domain); 1935 domain = NULL; 1936 } 1937 return domain; 1938 } 1939 1940 struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) 1941 { 1942 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1943 } 1944 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1945 1946 void iommu_domain_free(struct iommu_domain *domain) 1947 { 1948 if (domain->type == IOMMU_DOMAIN_SVA) 1949 mmdrop(domain->mm); 1950 iommu_put_dma_cookie(domain); 1951 domain->ops->free(domain); 1952 } 1953 EXPORT_SYMBOL_GPL(iommu_domain_free); 1954 1955 /* 1956 * Put the group's domain back to the appropriate core-owned domain - either the 1957 * standard kernel-mode DMA configuration or an all-DMA-blocked domain. 1958 */ 1959 static void __iommu_group_set_core_domain(struct iommu_group *group) 1960 { 1961 struct iommu_domain *new_domain; 1962 1963 if (group->owner) 1964 new_domain = group->blocking_domain; 1965 else 1966 new_domain = group->default_domain; 1967 1968 __iommu_group_set_domain_nofail(group, new_domain); 1969 } 1970 1971 static int __iommu_attach_device(struct iommu_domain *domain, 1972 struct device *dev) 1973 { 1974 int ret; 1975 1976 if (unlikely(domain->ops->attach_dev == NULL)) 1977 return -ENODEV; 1978 1979 ret = domain->ops->attach_dev(domain, dev); 1980 if (ret) 1981 return ret; 1982 dev->iommu->attach_deferred = 0; 1983 trace_attach_device_to_domain(dev); 1984 return 0; 1985 } 1986 1987 /** 1988 * iommu_attach_device - Attach an IOMMU domain to a device 1989 * @domain: IOMMU domain to attach 1990 * @dev: Device that will be attached 1991 * 1992 * Returns 0 on success and error code on failure 1993 * 1994 * Note that EINVAL can be treated as a soft failure, indicating 1995 * that certain configuration of the domain is incompatible with 1996 * the device. In this case attaching a different domain to the 1997 * device may succeed. 1998 */ 1999 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 2000 { 2001 struct iommu_group *group; 2002 int ret; 2003 2004 group = iommu_group_get(dev); 2005 if (!group) 2006 return -ENODEV; 2007 2008 /* 2009 * Lock the group to make sure the device-count doesn't 2010 * change while we are attaching 2011 */ 2012 mutex_lock(&group->mutex); 2013 ret = -EINVAL; 2014 if (list_count_nodes(&group->devices) != 1) 2015 goto out_unlock; 2016 2017 ret = __iommu_attach_group(domain, group); 2018 2019 out_unlock: 2020 mutex_unlock(&group->mutex); 2021 iommu_group_put(group); 2022 2023 return ret; 2024 } 2025 EXPORT_SYMBOL_GPL(iommu_attach_device); 2026 2027 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2028 { 2029 if (dev->iommu && dev->iommu->attach_deferred) 2030 return __iommu_attach_device(domain, dev); 2031 2032 return 0; 2033 } 2034 2035 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2036 { 2037 struct iommu_group *group; 2038 2039 group = iommu_group_get(dev); 2040 if (!group) 2041 return; 2042 2043 mutex_lock(&group->mutex); 2044 if (WARN_ON(domain != group->domain) || 2045 WARN_ON(list_count_nodes(&group->devices) != 1)) 2046 goto out_unlock; 2047 __iommu_group_set_core_domain(group); 2048 2049 out_unlock: 2050 mutex_unlock(&group->mutex); 2051 iommu_group_put(group); 2052 } 2053 EXPORT_SYMBOL_GPL(iommu_detach_device); 2054 2055 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2056 { 2057 struct iommu_domain *domain; 2058 struct iommu_group *group; 2059 2060 group = iommu_group_get(dev); 2061 if (!group) 2062 return NULL; 2063 2064 domain = group->domain; 2065 2066 iommu_group_put(group); 2067 2068 return domain; 2069 } 2070 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2071 2072 /* 2073 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2074 * guarantees that the group and its default domain are valid and correct. 2075 */ 2076 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2077 { 2078 return dev->iommu_group->default_domain; 2079 } 2080 2081 static int __iommu_attach_group(struct iommu_domain *domain, 2082 struct iommu_group *group) 2083 { 2084 if (group->domain && group->domain != group->default_domain && 2085 group->domain != group->blocking_domain) 2086 return -EBUSY; 2087 2088 return __iommu_group_set_domain(group, domain); 2089 } 2090 2091 /** 2092 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group 2093 * @domain: IOMMU domain to attach 2094 * @group: IOMMU group that will be attached 2095 * 2096 * Returns 0 on success and error code on failure 2097 * 2098 * Note that EINVAL can be treated as a soft failure, indicating 2099 * that certain configuration of the domain is incompatible with 2100 * the group. In this case attaching a different domain to the 2101 * group may succeed. 2102 */ 2103 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2104 { 2105 int ret; 2106 2107 mutex_lock(&group->mutex); 2108 ret = __iommu_attach_group(domain, group); 2109 mutex_unlock(&group->mutex); 2110 2111 return ret; 2112 } 2113 EXPORT_SYMBOL_GPL(iommu_attach_group); 2114 2115 static int __iommu_device_set_domain(struct iommu_group *group, 2116 struct device *dev, 2117 struct iommu_domain *new_domain, 2118 unsigned int flags) 2119 { 2120 int ret; 2121 2122 if (dev->iommu->attach_deferred) { 2123 if (new_domain == group->default_domain) 2124 return 0; 2125 dev->iommu->attach_deferred = 0; 2126 } 2127 2128 ret = __iommu_attach_device(new_domain, dev); 2129 if (ret) { 2130 /* 2131 * If we have a blocking domain then try to attach that in hopes 2132 * of avoiding a UAF. Modern drivers should implement blocking 2133 * domains as global statics that cannot fail. 2134 */ 2135 if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) && 2136 group->blocking_domain && 2137 group->blocking_domain != new_domain) 2138 __iommu_attach_device(group->blocking_domain, dev); 2139 return ret; 2140 } 2141 return 0; 2142 } 2143 2144 /* 2145 * If 0 is returned the group's domain is new_domain. If an error is returned 2146 * then the group's domain will be set back to the existing domain unless 2147 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's 2148 * domains is left inconsistent. This is a driver bug to fail attach with a 2149 * previously good domain. We try to avoid a kernel UAF because of this. 2150 * 2151 * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU 2152 * API works on domains and devices. Bridge that gap by iterating over the 2153 * devices in a group. Ideally we'd have a single device which represents the 2154 * requestor ID of the group, but we also allow IOMMU drivers to create policy 2155 * defined minimum sets, where the physical hardware may be able to distiguish 2156 * members, but we wish to group them at a higher level (ex. untrusted 2157 * multi-function PCI devices). Thus we attach each device. 2158 */ 2159 static int __iommu_group_set_domain_internal(struct iommu_group *group, 2160 struct iommu_domain *new_domain, 2161 unsigned int flags) 2162 { 2163 struct group_device *last_gdev; 2164 struct group_device *gdev; 2165 int result; 2166 int ret; 2167 2168 lockdep_assert_held(&group->mutex); 2169 2170 if (group->domain == new_domain) 2171 return 0; 2172 2173 /* 2174 * New drivers should support default domains, so set_platform_dma() 2175 * op will never be called. Otherwise the NULL domain represents some 2176 * platform specific behavior. 2177 */ 2178 if (!new_domain) { 2179 for_each_group_device(group, gdev) { 2180 const struct iommu_ops *ops = dev_iommu_ops(gdev->dev); 2181 2182 if (!WARN_ON(!ops->set_platform_dma_ops)) 2183 ops->set_platform_dma_ops(gdev->dev); 2184 } 2185 group->domain = NULL; 2186 return 0; 2187 } 2188 2189 /* 2190 * Changing the domain is done by calling attach_dev() on the new 2191 * domain. This switch does not have to be atomic and DMA can be 2192 * discarded during the transition. DMA must only be able to access 2193 * either new_domain or group->domain, never something else. 2194 */ 2195 result = 0; 2196 for_each_group_device(group, gdev) { 2197 ret = __iommu_device_set_domain(group, gdev->dev, new_domain, 2198 flags); 2199 if (ret) { 2200 result = ret; 2201 /* 2202 * Keep trying the other devices in the group. If a 2203 * driver fails attach to an otherwise good domain, and 2204 * does not support blocking domains, it should at least 2205 * drop its reference on the current domain so we don't 2206 * UAF. 2207 */ 2208 if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) 2209 continue; 2210 goto err_revert; 2211 } 2212 } 2213 group->domain = new_domain; 2214 return result; 2215 2216 err_revert: 2217 /* 2218 * This is called in error unwind paths. A well behaved driver should 2219 * always allow us to attach to a domain that was already attached. 2220 */ 2221 last_gdev = gdev; 2222 for_each_group_device(group, gdev) { 2223 const struct iommu_ops *ops = dev_iommu_ops(gdev->dev); 2224 2225 /* 2226 * If set_platform_dma_ops is not present a NULL domain can 2227 * happen only for first probe, in which case we leave 2228 * group->domain as NULL and let release clean everything up. 2229 */ 2230 if (group->domain) 2231 WARN_ON(__iommu_device_set_domain( 2232 group, gdev->dev, group->domain, 2233 IOMMU_SET_DOMAIN_MUST_SUCCEED)); 2234 else if (ops->set_platform_dma_ops) 2235 ops->set_platform_dma_ops(gdev->dev); 2236 if (gdev == last_gdev) 2237 break; 2238 } 2239 return ret; 2240 } 2241 2242 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2243 { 2244 mutex_lock(&group->mutex); 2245 __iommu_group_set_core_domain(group); 2246 mutex_unlock(&group->mutex); 2247 } 2248 EXPORT_SYMBOL_GPL(iommu_detach_group); 2249 2250 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2251 { 2252 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2253 return iova; 2254 2255 if (domain->type == IOMMU_DOMAIN_BLOCKED) 2256 return 0; 2257 2258 return domain->ops->iova_to_phys(domain, iova); 2259 } 2260 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2261 2262 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, 2263 phys_addr_t paddr, size_t size, size_t *count) 2264 { 2265 unsigned int pgsize_idx, pgsize_idx_next; 2266 unsigned long pgsizes; 2267 size_t offset, pgsize, pgsize_next; 2268 unsigned long addr_merge = paddr | iova; 2269 2270 /* Page sizes supported by the hardware and small enough for @size */ 2271 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); 2272 2273 /* Constrain the page sizes further based on the maximum alignment */ 2274 if (likely(addr_merge)) 2275 pgsizes &= GENMASK(__ffs(addr_merge), 0); 2276 2277 /* Make sure we have at least one suitable page size */ 2278 BUG_ON(!pgsizes); 2279 2280 /* Pick the biggest page size remaining */ 2281 pgsize_idx = __fls(pgsizes); 2282 pgsize = BIT(pgsize_idx); 2283 if (!count) 2284 return pgsize; 2285 2286 /* Find the next biggest support page size, if it exists */ 2287 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); 2288 if (!pgsizes) 2289 goto out_set_count; 2290 2291 pgsize_idx_next = __ffs(pgsizes); 2292 pgsize_next = BIT(pgsize_idx_next); 2293 2294 /* 2295 * There's no point trying a bigger page size unless the virtual 2296 * and physical addresses are similarly offset within the larger page. 2297 */ 2298 if ((iova ^ paddr) & (pgsize_next - 1)) 2299 goto out_set_count; 2300 2301 /* Calculate the offset to the next page size alignment boundary */ 2302 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); 2303 2304 /* 2305 * If size is big enough to accommodate the larger page, reduce 2306 * the number of smaller pages. 2307 */ 2308 if (offset + pgsize_next <= size) 2309 size = offset; 2310 2311 out_set_count: 2312 *count = size >> pgsize_idx; 2313 return pgsize; 2314 } 2315 2316 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, 2317 phys_addr_t paddr, size_t size, int prot, 2318 gfp_t gfp, size_t *mapped) 2319 { 2320 const struct iommu_domain_ops *ops = domain->ops; 2321 size_t pgsize, count; 2322 int ret; 2323 2324 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); 2325 2326 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", 2327 iova, &paddr, pgsize, count); 2328 2329 if (ops->map_pages) { 2330 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, 2331 gfp, mapped); 2332 } else { 2333 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2334 *mapped = ret ? 0 : pgsize; 2335 } 2336 2337 return ret; 2338 } 2339 2340 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2341 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2342 { 2343 const struct iommu_domain_ops *ops = domain->ops; 2344 unsigned long orig_iova = iova; 2345 unsigned int min_pagesz; 2346 size_t orig_size = size; 2347 phys_addr_t orig_paddr = paddr; 2348 int ret = 0; 2349 2350 if (unlikely(!(ops->map || ops->map_pages) || 2351 domain->pgsize_bitmap == 0UL)) 2352 return -ENODEV; 2353 2354 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2355 return -EINVAL; 2356 2357 /* find out the minimum page size supported */ 2358 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2359 2360 /* 2361 * both the virtual address and the physical one, as well as 2362 * the size of the mapping, must be aligned (at least) to the 2363 * size of the smallest page supported by the hardware 2364 */ 2365 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2366 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2367 iova, &paddr, size, min_pagesz); 2368 return -EINVAL; 2369 } 2370 2371 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2372 2373 while (size) { 2374 size_t mapped = 0; 2375 2376 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, 2377 &mapped); 2378 /* 2379 * Some pages may have been mapped, even if an error occurred, 2380 * so we should account for those so they can be unmapped. 2381 */ 2382 size -= mapped; 2383 2384 if (ret) 2385 break; 2386 2387 iova += mapped; 2388 paddr += mapped; 2389 } 2390 2391 /* unroll mapping in case something went wrong */ 2392 if (ret) 2393 iommu_unmap(domain, orig_iova, orig_size - size); 2394 else 2395 trace_map(orig_iova, orig_paddr, orig_size); 2396 2397 return ret; 2398 } 2399 2400 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2401 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2402 { 2403 const struct iommu_domain_ops *ops = domain->ops; 2404 int ret; 2405 2406 might_sleep_if(gfpflags_allow_blocking(gfp)); 2407 2408 /* Discourage passing strange GFP flags */ 2409 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2410 __GFP_HIGHMEM))) 2411 return -EINVAL; 2412 2413 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2414 if (ret == 0 && ops->iotlb_sync_map) 2415 ops->iotlb_sync_map(domain, iova, size); 2416 2417 return ret; 2418 } 2419 EXPORT_SYMBOL_GPL(iommu_map); 2420 2421 static size_t __iommu_unmap_pages(struct iommu_domain *domain, 2422 unsigned long iova, size_t size, 2423 struct iommu_iotlb_gather *iotlb_gather) 2424 { 2425 const struct iommu_domain_ops *ops = domain->ops; 2426 size_t pgsize, count; 2427 2428 pgsize = iommu_pgsize(domain, iova, iova, size, &count); 2429 return ops->unmap_pages ? 2430 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : 2431 ops->unmap(domain, iova, pgsize, iotlb_gather); 2432 } 2433 2434 static size_t __iommu_unmap(struct iommu_domain *domain, 2435 unsigned long iova, size_t size, 2436 struct iommu_iotlb_gather *iotlb_gather) 2437 { 2438 const struct iommu_domain_ops *ops = domain->ops; 2439 size_t unmapped_page, unmapped = 0; 2440 unsigned long orig_iova = iova; 2441 unsigned int min_pagesz; 2442 2443 if (unlikely(!(ops->unmap || ops->unmap_pages) || 2444 domain->pgsize_bitmap == 0UL)) 2445 return 0; 2446 2447 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2448 return 0; 2449 2450 /* find out the minimum page size supported */ 2451 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2452 2453 /* 2454 * The virtual address, as well as the size of the mapping, must be 2455 * aligned (at least) to the size of the smallest page supported 2456 * by the hardware 2457 */ 2458 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2459 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2460 iova, size, min_pagesz); 2461 return 0; 2462 } 2463 2464 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2465 2466 /* 2467 * Keep iterating until we either unmap 'size' bytes (or more) 2468 * or we hit an area that isn't mapped. 2469 */ 2470 while (unmapped < size) { 2471 unmapped_page = __iommu_unmap_pages(domain, iova, 2472 size - unmapped, 2473 iotlb_gather); 2474 if (!unmapped_page) 2475 break; 2476 2477 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2478 iova, unmapped_page); 2479 2480 iova += unmapped_page; 2481 unmapped += unmapped_page; 2482 } 2483 2484 trace_unmap(orig_iova, size, unmapped); 2485 return unmapped; 2486 } 2487 2488 size_t iommu_unmap(struct iommu_domain *domain, 2489 unsigned long iova, size_t size) 2490 { 2491 struct iommu_iotlb_gather iotlb_gather; 2492 size_t ret; 2493 2494 iommu_iotlb_gather_init(&iotlb_gather); 2495 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2496 iommu_iotlb_sync(domain, &iotlb_gather); 2497 2498 return ret; 2499 } 2500 EXPORT_SYMBOL_GPL(iommu_unmap); 2501 2502 size_t iommu_unmap_fast(struct iommu_domain *domain, 2503 unsigned long iova, size_t size, 2504 struct iommu_iotlb_gather *iotlb_gather) 2505 { 2506 return __iommu_unmap(domain, iova, size, iotlb_gather); 2507 } 2508 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2509 2510 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2511 struct scatterlist *sg, unsigned int nents, int prot, 2512 gfp_t gfp) 2513 { 2514 const struct iommu_domain_ops *ops = domain->ops; 2515 size_t len = 0, mapped = 0; 2516 phys_addr_t start; 2517 unsigned int i = 0; 2518 int ret; 2519 2520 might_sleep_if(gfpflags_allow_blocking(gfp)); 2521 2522 /* Discourage passing strange GFP flags */ 2523 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2524 __GFP_HIGHMEM))) 2525 return -EINVAL; 2526 2527 while (i <= nents) { 2528 phys_addr_t s_phys = sg_phys(sg); 2529 2530 if (len && s_phys != start + len) { 2531 ret = __iommu_map(domain, iova + mapped, start, 2532 len, prot, gfp); 2533 2534 if (ret) 2535 goto out_err; 2536 2537 mapped += len; 2538 len = 0; 2539 } 2540 2541 if (sg_dma_is_bus_address(sg)) 2542 goto next; 2543 2544 if (len) { 2545 len += sg->length; 2546 } else { 2547 len = sg->length; 2548 start = s_phys; 2549 } 2550 2551 next: 2552 if (++i < nents) 2553 sg = sg_next(sg); 2554 } 2555 2556 if (ops->iotlb_sync_map) 2557 ops->iotlb_sync_map(domain, iova, mapped); 2558 return mapped; 2559 2560 out_err: 2561 /* undo mappings already done */ 2562 iommu_unmap(domain, iova, mapped); 2563 2564 return ret; 2565 } 2566 EXPORT_SYMBOL_GPL(iommu_map_sg); 2567 2568 /** 2569 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2570 * @domain: the iommu domain where the fault has happened 2571 * @dev: the device where the fault has happened 2572 * @iova: the faulting address 2573 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2574 * 2575 * This function should be called by the low-level IOMMU implementations 2576 * whenever IOMMU faults happen, to allow high-level users, that are 2577 * interested in such events, to know about them. 2578 * 2579 * This event may be useful for several possible use cases: 2580 * - mere logging of the event 2581 * - dynamic TLB/PTE loading 2582 * - if restarting of the faulting device is required 2583 * 2584 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2585 * PTE/TLB loading will one day be supported, implementations will be able 2586 * to tell whether it succeeded or not according to this return value). 2587 * 2588 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2589 * (though fault handlers can also return -ENOSYS, in case they want to 2590 * elicit the default behavior of the IOMMU drivers). 2591 */ 2592 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2593 unsigned long iova, int flags) 2594 { 2595 int ret = -ENOSYS; 2596 2597 /* 2598 * if upper layers showed interest and installed a fault handler, 2599 * invoke it. 2600 */ 2601 if (domain->handler) 2602 ret = domain->handler(domain, dev, iova, flags, 2603 domain->handler_token); 2604 2605 trace_io_page_fault(dev, iova, flags); 2606 return ret; 2607 } 2608 EXPORT_SYMBOL_GPL(report_iommu_fault); 2609 2610 static int __init iommu_init(void) 2611 { 2612 iommu_group_kset = kset_create_and_add("iommu_groups", 2613 NULL, kernel_kobj); 2614 BUG_ON(!iommu_group_kset); 2615 2616 iommu_debugfs_setup(); 2617 2618 return 0; 2619 } 2620 core_initcall(iommu_init); 2621 2622 int iommu_enable_nesting(struct iommu_domain *domain) 2623 { 2624 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2625 return -EINVAL; 2626 if (!domain->ops->enable_nesting) 2627 return -EINVAL; 2628 return domain->ops->enable_nesting(domain); 2629 } 2630 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2631 2632 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2633 unsigned long quirk) 2634 { 2635 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2636 return -EINVAL; 2637 if (!domain->ops->set_pgtable_quirks) 2638 return -EINVAL; 2639 return domain->ops->set_pgtable_quirks(domain, quirk); 2640 } 2641 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2642 2643 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2644 { 2645 const struct iommu_ops *ops = dev_iommu_ops(dev); 2646 2647 if (ops->get_resv_regions) 2648 ops->get_resv_regions(dev, list); 2649 } 2650 2651 /** 2652 * iommu_put_resv_regions - release resered regions 2653 * @dev: device for which to free reserved regions 2654 * @list: reserved region list for device 2655 * 2656 * This releases a reserved region list acquired by iommu_get_resv_regions(). 2657 */ 2658 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2659 { 2660 struct iommu_resv_region *entry, *next; 2661 2662 list_for_each_entry_safe(entry, next, list, list) { 2663 if (entry->free) 2664 entry->free(dev, entry); 2665 else 2666 kfree(entry); 2667 } 2668 } 2669 EXPORT_SYMBOL(iommu_put_resv_regions); 2670 2671 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2672 size_t length, int prot, 2673 enum iommu_resv_type type, 2674 gfp_t gfp) 2675 { 2676 struct iommu_resv_region *region; 2677 2678 region = kzalloc(sizeof(*region), gfp); 2679 if (!region) 2680 return NULL; 2681 2682 INIT_LIST_HEAD(®ion->list); 2683 region->start = start; 2684 region->length = length; 2685 region->prot = prot; 2686 region->type = type; 2687 return region; 2688 } 2689 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2690 2691 void iommu_set_default_passthrough(bool cmd_line) 2692 { 2693 if (cmd_line) 2694 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2695 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2696 } 2697 2698 void iommu_set_default_translated(bool cmd_line) 2699 { 2700 if (cmd_line) 2701 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2702 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2703 } 2704 2705 bool iommu_default_passthrough(void) 2706 { 2707 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2708 } 2709 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2710 2711 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2712 { 2713 const struct iommu_ops *ops = NULL; 2714 struct iommu_device *iommu; 2715 2716 spin_lock(&iommu_device_lock); 2717 list_for_each_entry(iommu, &iommu_device_list, list) 2718 if (iommu->fwnode == fwnode) { 2719 ops = iommu->ops; 2720 break; 2721 } 2722 spin_unlock(&iommu_device_lock); 2723 return ops; 2724 } 2725 2726 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2727 const struct iommu_ops *ops) 2728 { 2729 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2730 2731 if (fwspec) 2732 return ops == fwspec->ops ? 0 : -EINVAL; 2733 2734 if (!dev_iommu_get(dev)) 2735 return -ENOMEM; 2736 2737 /* Preallocate for the overwhelmingly common case of 1 ID */ 2738 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2739 if (!fwspec) 2740 return -ENOMEM; 2741 2742 of_node_get(to_of_node(iommu_fwnode)); 2743 fwspec->iommu_fwnode = iommu_fwnode; 2744 fwspec->ops = ops; 2745 dev_iommu_fwspec_set(dev, fwspec); 2746 return 0; 2747 } 2748 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2749 2750 void iommu_fwspec_free(struct device *dev) 2751 { 2752 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2753 2754 if (fwspec) { 2755 fwnode_handle_put(fwspec->iommu_fwnode); 2756 kfree(fwspec); 2757 dev_iommu_fwspec_set(dev, NULL); 2758 } 2759 } 2760 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2761 2762 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2763 { 2764 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2765 int i, new_num; 2766 2767 if (!fwspec) 2768 return -EINVAL; 2769 2770 new_num = fwspec->num_ids + num_ids; 2771 if (new_num > 1) { 2772 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2773 GFP_KERNEL); 2774 if (!fwspec) 2775 return -ENOMEM; 2776 2777 dev_iommu_fwspec_set(dev, fwspec); 2778 } 2779 2780 for (i = 0; i < num_ids; i++) 2781 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2782 2783 fwspec->num_ids = new_num; 2784 return 0; 2785 } 2786 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2787 2788 /* 2789 * Per device IOMMU features. 2790 */ 2791 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2792 { 2793 if (dev->iommu && dev->iommu->iommu_dev) { 2794 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2795 2796 if (ops->dev_enable_feat) 2797 return ops->dev_enable_feat(dev, feat); 2798 } 2799 2800 return -ENODEV; 2801 } 2802 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2803 2804 /* 2805 * The device drivers should do the necessary cleanups before calling this. 2806 */ 2807 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2808 { 2809 if (dev->iommu && dev->iommu->iommu_dev) { 2810 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2811 2812 if (ops->dev_disable_feat) 2813 return ops->dev_disable_feat(dev, feat); 2814 } 2815 2816 return -EBUSY; 2817 } 2818 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2819 2820 /** 2821 * iommu_setup_default_domain - Set the default_domain for the group 2822 * @group: Group to change 2823 * @target_type: Domain type to set as the default_domain 2824 * 2825 * Allocate a default domain and set it as the current domain on the group. If 2826 * the group already has a default domain it will be changed to the target_type. 2827 * When target_type is 0 the default domain is selected based on driver and 2828 * system preferences. 2829 */ 2830 static int iommu_setup_default_domain(struct iommu_group *group, 2831 int target_type) 2832 { 2833 struct iommu_domain *old_dom = group->default_domain; 2834 struct group_device *gdev; 2835 struct iommu_domain *dom; 2836 bool direct_failed; 2837 int req_type; 2838 int ret; 2839 2840 lockdep_assert_held(&group->mutex); 2841 2842 req_type = iommu_get_default_domain_type(group, target_type); 2843 if (req_type < 0) 2844 return -EINVAL; 2845 2846 /* 2847 * There are still some drivers which don't support default domains, so 2848 * we ignore the failure and leave group->default_domain NULL. 2849 * 2850 * We assume that the iommu driver starts up the device in 2851 * 'set_platform_dma_ops' mode if it does not support default domains. 2852 */ 2853 dom = iommu_group_alloc_default_domain(group, req_type); 2854 if (!dom) { 2855 /* Once in default_domain mode we never leave */ 2856 if (group->default_domain) 2857 return -ENODEV; 2858 group->default_domain = NULL; 2859 return 0; 2860 } 2861 2862 if (group->default_domain == dom) 2863 return 0; 2864 2865 /* 2866 * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be 2867 * mapped before their device is attached, in order to guarantee 2868 * continuity with any FW activity 2869 */ 2870 direct_failed = false; 2871 for_each_group_device(group, gdev) { 2872 if (iommu_create_device_direct_mappings(dom, gdev->dev)) { 2873 direct_failed = true; 2874 dev_warn_once( 2875 gdev->dev->iommu->iommu_dev->dev, 2876 "IOMMU driver was not able to establish FW requested direct mapping."); 2877 } 2878 } 2879 2880 /* We must set default_domain early for __iommu_device_set_domain */ 2881 group->default_domain = dom; 2882 if (!group->domain) { 2883 /* 2884 * Drivers are not allowed to fail the first domain attach. 2885 * The only way to recover from this is to fail attaching the 2886 * iommu driver and call ops->release_device. Put the domain 2887 * in group->default_domain so it is freed after. 2888 */ 2889 ret = __iommu_group_set_domain_internal( 2890 group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 2891 if (WARN_ON(ret)) 2892 goto out_free; 2893 } else { 2894 ret = __iommu_group_set_domain(group, dom); 2895 if (ret) { 2896 iommu_domain_free(dom); 2897 group->default_domain = old_dom; 2898 return ret; 2899 } 2900 } 2901 2902 /* 2903 * Drivers are supposed to allow mappings to be installed in a domain 2904 * before device attachment, but some don't. Hack around this defect by 2905 * trying again after attaching. If this happens it means the device 2906 * will not continuously have the IOMMU_RESV_DIRECT map. 2907 */ 2908 if (direct_failed) { 2909 for_each_group_device(group, gdev) { 2910 ret = iommu_create_device_direct_mappings(dom, gdev->dev); 2911 if (ret) 2912 goto err_restore; 2913 } 2914 } 2915 2916 err_restore: 2917 if (old_dom) { 2918 __iommu_group_set_domain_internal( 2919 group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); 2920 iommu_domain_free(dom); 2921 old_dom = NULL; 2922 } 2923 out_free: 2924 if (old_dom) 2925 iommu_domain_free(old_dom); 2926 return ret; 2927 } 2928 2929 /* 2930 * Changing the default domain through sysfs requires the users to unbind the 2931 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ 2932 * transition. Return failure if this isn't met. 2933 * 2934 * We need to consider the race between this and the device release path. 2935 * group->mutex is used here to guarantee that the device release path 2936 * will not be entered at the same time. 2937 */ 2938 static ssize_t iommu_group_store_type(struct iommu_group *group, 2939 const char *buf, size_t count) 2940 { 2941 struct group_device *gdev; 2942 int ret, req_type; 2943 2944 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2945 return -EACCES; 2946 2947 if (WARN_ON(!group) || !group->default_domain) 2948 return -EINVAL; 2949 2950 if (sysfs_streq(buf, "identity")) 2951 req_type = IOMMU_DOMAIN_IDENTITY; 2952 else if (sysfs_streq(buf, "DMA")) 2953 req_type = IOMMU_DOMAIN_DMA; 2954 else if (sysfs_streq(buf, "DMA-FQ")) 2955 req_type = IOMMU_DOMAIN_DMA_FQ; 2956 else if (sysfs_streq(buf, "auto")) 2957 req_type = 0; 2958 else 2959 return -EINVAL; 2960 2961 mutex_lock(&group->mutex); 2962 /* We can bring up a flush queue without tearing down the domain. */ 2963 if (req_type == IOMMU_DOMAIN_DMA_FQ && 2964 group->default_domain->type == IOMMU_DOMAIN_DMA) { 2965 ret = iommu_dma_init_fq(group->default_domain); 2966 if (ret) 2967 goto out_unlock; 2968 2969 group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; 2970 ret = count; 2971 goto out_unlock; 2972 } 2973 2974 /* Otherwise, ensure that device exists and no driver is bound. */ 2975 if (list_empty(&group->devices) || group->owner_cnt) { 2976 ret = -EPERM; 2977 goto out_unlock; 2978 } 2979 2980 ret = iommu_setup_default_domain(group, req_type); 2981 if (ret) 2982 goto out_unlock; 2983 2984 /* 2985 * Release the mutex here because ops->probe_finalize() call-back of 2986 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 2987 * in-turn might call back into IOMMU core code, where it tries to take 2988 * group->mutex, resulting in a deadlock. 2989 */ 2990 mutex_unlock(&group->mutex); 2991 2992 /* Make sure dma_ops is appropriatley set */ 2993 for_each_group_device(group, gdev) 2994 iommu_group_do_probe_finalize(gdev->dev); 2995 return count; 2996 2997 out_unlock: 2998 mutex_unlock(&group->mutex); 2999 return ret ?: count; 3000 } 3001 3002 static bool iommu_is_default_domain(struct iommu_group *group) 3003 { 3004 if (group->domain == group->default_domain) 3005 return true; 3006 3007 /* 3008 * If the default domain was set to identity and it is still an identity 3009 * domain then we consider this a pass. This happens because of 3010 * amd_iommu_init_device() replacing the default idenytity domain with an 3011 * identity domain that has a different configuration for AMDGPU. 3012 */ 3013 if (group->default_domain && 3014 group->default_domain->type == IOMMU_DOMAIN_IDENTITY && 3015 group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY) 3016 return true; 3017 return false; 3018 } 3019 3020 /** 3021 * iommu_device_use_default_domain() - Device driver wants to handle device 3022 * DMA through the kernel DMA API. 3023 * @dev: The device. 3024 * 3025 * The device driver about to bind @dev wants to do DMA through the kernel 3026 * DMA API. Return 0 if it is allowed, otherwise an error. 3027 */ 3028 int iommu_device_use_default_domain(struct device *dev) 3029 { 3030 struct iommu_group *group = iommu_group_get(dev); 3031 int ret = 0; 3032 3033 if (!group) 3034 return 0; 3035 3036 mutex_lock(&group->mutex); 3037 if (group->owner_cnt) { 3038 if (group->owner || !iommu_is_default_domain(group) || 3039 !xa_empty(&group->pasid_array)) { 3040 ret = -EBUSY; 3041 goto unlock_out; 3042 } 3043 } 3044 3045 group->owner_cnt++; 3046 3047 unlock_out: 3048 mutex_unlock(&group->mutex); 3049 iommu_group_put(group); 3050 3051 return ret; 3052 } 3053 3054 /** 3055 * iommu_device_unuse_default_domain() - Device driver stops handling device 3056 * DMA through the kernel DMA API. 3057 * @dev: The device. 3058 * 3059 * The device driver doesn't want to do DMA through kernel DMA API anymore. 3060 * It must be called after iommu_device_use_default_domain(). 3061 */ 3062 void iommu_device_unuse_default_domain(struct device *dev) 3063 { 3064 struct iommu_group *group = iommu_group_get(dev); 3065 3066 if (!group) 3067 return; 3068 3069 mutex_lock(&group->mutex); 3070 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) 3071 group->owner_cnt--; 3072 3073 mutex_unlock(&group->mutex); 3074 iommu_group_put(group); 3075 } 3076 3077 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) 3078 { 3079 struct group_device *dev = 3080 list_first_entry(&group->devices, struct group_device, list); 3081 3082 if (group->blocking_domain) 3083 return 0; 3084 3085 group->blocking_domain = 3086 __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED); 3087 if (!group->blocking_domain) { 3088 /* 3089 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED 3090 * create an empty domain instead. 3091 */ 3092 group->blocking_domain = __iommu_domain_alloc( 3093 dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); 3094 if (!group->blocking_domain) 3095 return -EINVAL; 3096 } 3097 return 0; 3098 } 3099 3100 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) 3101 { 3102 int ret; 3103 3104 if ((group->domain && group->domain != group->default_domain) || 3105 !xa_empty(&group->pasid_array)) 3106 return -EBUSY; 3107 3108 ret = __iommu_group_alloc_blocking_domain(group); 3109 if (ret) 3110 return ret; 3111 ret = __iommu_group_set_domain(group, group->blocking_domain); 3112 if (ret) 3113 return ret; 3114 3115 group->owner = owner; 3116 group->owner_cnt++; 3117 return 0; 3118 } 3119 3120 /** 3121 * iommu_group_claim_dma_owner() - Set DMA ownership of a group 3122 * @group: The group. 3123 * @owner: Caller specified pointer. Used for exclusive ownership. 3124 * 3125 * This is to support backward compatibility for vfio which manages the dma 3126 * ownership in iommu_group level. New invocations on this interface should be 3127 * prohibited. Only a single owner may exist for a group. 3128 */ 3129 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 3130 { 3131 int ret = 0; 3132 3133 if (WARN_ON(!owner)) 3134 return -EINVAL; 3135 3136 mutex_lock(&group->mutex); 3137 if (group->owner_cnt) { 3138 ret = -EPERM; 3139 goto unlock_out; 3140 } 3141 3142 ret = __iommu_take_dma_ownership(group, owner); 3143 unlock_out: 3144 mutex_unlock(&group->mutex); 3145 3146 return ret; 3147 } 3148 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); 3149 3150 /** 3151 * iommu_device_claim_dma_owner() - Set DMA ownership of a device 3152 * @dev: The device. 3153 * @owner: Caller specified pointer. Used for exclusive ownership. 3154 * 3155 * Claim the DMA ownership of a device. Multiple devices in the same group may 3156 * concurrently claim ownership if they present the same owner value. Returns 0 3157 * on success and error code on failure 3158 */ 3159 int iommu_device_claim_dma_owner(struct device *dev, void *owner) 3160 { 3161 struct iommu_group *group; 3162 int ret = 0; 3163 3164 if (WARN_ON(!owner)) 3165 return -EINVAL; 3166 3167 group = iommu_group_get(dev); 3168 if (!group) 3169 return -ENODEV; 3170 3171 mutex_lock(&group->mutex); 3172 if (group->owner_cnt) { 3173 if (group->owner != owner) { 3174 ret = -EPERM; 3175 goto unlock_out; 3176 } 3177 group->owner_cnt++; 3178 goto unlock_out; 3179 } 3180 3181 ret = __iommu_take_dma_ownership(group, owner); 3182 unlock_out: 3183 mutex_unlock(&group->mutex); 3184 iommu_group_put(group); 3185 3186 return ret; 3187 } 3188 EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); 3189 3190 static void __iommu_release_dma_ownership(struct iommu_group *group) 3191 { 3192 if (WARN_ON(!group->owner_cnt || !group->owner || 3193 !xa_empty(&group->pasid_array))) 3194 return; 3195 3196 group->owner_cnt = 0; 3197 group->owner = NULL; 3198 __iommu_group_set_domain_nofail(group, group->default_domain); 3199 } 3200 3201 /** 3202 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3203 * @dev: The device 3204 * 3205 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3206 */ 3207 void iommu_group_release_dma_owner(struct iommu_group *group) 3208 { 3209 mutex_lock(&group->mutex); 3210 __iommu_release_dma_ownership(group); 3211 mutex_unlock(&group->mutex); 3212 } 3213 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); 3214 3215 /** 3216 * iommu_device_release_dma_owner() - Release DMA ownership of a device 3217 * @group: The device. 3218 * 3219 * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). 3220 */ 3221 void iommu_device_release_dma_owner(struct device *dev) 3222 { 3223 struct iommu_group *group = iommu_group_get(dev); 3224 3225 mutex_lock(&group->mutex); 3226 if (group->owner_cnt > 1) 3227 group->owner_cnt--; 3228 else 3229 __iommu_release_dma_ownership(group); 3230 mutex_unlock(&group->mutex); 3231 iommu_group_put(group); 3232 } 3233 EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); 3234 3235 /** 3236 * iommu_group_dma_owner_claimed() - Query group dma ownership status 3237 * @group: The group. 3238 * 3239 * This provides status query on a given group. It is racy and only for 3240 * non-binding status reporting. 3241 */ 3242 bool iommu_group_dma_owner_claimed(struct iommu_group *group) 3243 { 3244 unsigned int user; 3245 3246 mutex_lock(&group->mutex); 3247 user = group->owner_cnt; 3248 mutex_unlock(&group->mutex); 3249 3250 return user; 3251 } 3252 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); 3253 3254 static int __iommu_set_group_pasid(struct iommu_domain *domain, 3255 struct iommu_group *group, ioasid_t pasid) 3256 { 3257 struct group_device *device; 3258 int ret = 0; 3259 3260 for_each_group_device(group, device) { 3261 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); 3262 if (ret) 3263 break; 3264 } 3265 3266 return ret; 3267 } 3268 3269 static void __iommu_remove_group_pasid(struct iommu_group *group, 3270 ioasid_t pasid) 3271 { 3272 struct group_device *device; 3273 const struct iommu_ops *ops; 3274 3275 for_each_group_device(group, device) { 3276 ops = dev_iommu_ops(device->dev); 3277 ops->remove_dev_pasid(device->dev, pasid); 3278 } 3279 } 3280 3281 /* 3282 * iommu_attach_device_pasid() - Attach a domain to pasid of device 3283 * @domain: the iommu domain. 3284 * @dev: the attached device. 3285 * @pasid: the pasid of the device. 3286 * 3287 * Return: 0 on success, or an error. 3288 */ 3289 int iommu_attach_device_pasid(struct iommu_domain *domain, 3290 struct device *dev, ioasid_t pasid) 3291 { 3292 struct iommu_group *group; 3293 void *curr; 3294 int ret; 3295 3296 if (!domain->ops->set_dev_pasid) 3297 return -EOPNOTSUPP; 3298 3299 group = iommu_group_get(dev); 3300 if (!group) 3301 return -ENODEV; 3302 3303 mutex_lock(&group->mutex); 3304 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); 3305 if (curr) { 3306 ret = xa_err(curr) ? : -EBUSY; 3307 goto out_unlock; 3308 } 3309 3310 ret = __iommu_set_group_pasid(domain, group, pasid); 3311 if (ret) { 3312 __iommu_remove_group_pasid(group, pasid); 3313 xa_erase(&group->pasid_array, pasid); 3314 } 3315 out_unlock: 3316 mutex_unlock(&group->mutex); 3317 iommu_group_put(group); 3318 3319 return ret; 3320 } 3321 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); 3322 3323 /* 3324 * iommu_detach_device_pasid() - Detach the domain from pasid of device 3325 * @domain: the iommu domain. 3326 * @dev: the attached device. 3327 * @pasid: the pasid of the device. 3328 * 3329 * The @domain must have been attached to @pasid of the @dev with 3330 * iommu_attach_device_pasid(). 3331 */ 3332 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, 3333 ioasid_t pasid) 3334 { 3335 struct iommu_group *group = iommu_group_get(dev); 3336 3337 mutex_lock(&group->mutex); 3338 __iommu_remove_group_pasid(group, pasid); 3339 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); 3340 mutex_unlock(&group->mutex); 3341 3342 iommu_group_put(group); 3343 } 3344 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); 3345 3346 /* 3347 * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev 3348 * @dev: the queried device 3349 * @pasid: the pasid of the device 3350 * @type: matched domain type, 0 for any match 3351 * 3352 * This is a variant of iommu_get_domain_for_dev(). It returns the existing 3353 * domain attached to pasid of a device. Callers must hold a lock around this 3354 * function, and both iommu_attach/detach_dev_pasid() whenever a domain of 3355 * type is being manipulated. This API does not internally resolve races with 3356 * attach/detach. 3357 * 3358 * Return: attached domain on success, NULL otherwise. 3359 */ 3360 struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, 3361 ioasid_t pasid, 3362 unsigned int type) 3363 { 3364 struct iommu_domain *domain; 3365 struct iommu_group *group; 3366 3367 group = iommu_group_get(dev); 3368 if (!group) 3369 return NULL; 3370 3371 xa_lock(&group->pasid_array); 3372 domain = xa_load(&group->pasid_array, pasid); 3373 if (type && domain && domain->type != type) 3374 domain = ERR_PTR(-EBUSY); 3375 xa_unlock(&group->pasid_array); 3376 iommu_group_put(group); 3377 3378 return domain; 3379 } 3380 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); 3381 3382 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 3383 struct mm_struct *mm) 3384 { 3385 const struct iommu_ops *ops = dev_iommu_ops(dev); 3386 struct iommu_domain *domain; 3387 3388 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); 3389 if (!domain) 3390 return NULL; 3391 3392 domain->type = IOMMU_DOMAIN_SVA; 3393 mmgrab(mm); 3394 domain->mm = mm; 3395 domain->iopf_handler = iommu_sva_handle_iopf; 3396 domain->fault_data = mm; 3397 3398 return domain; 3399 } 3400