1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/amba/bus.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/bits.h> 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/host1x_context_bus.h> 20 #include <linux/iommu.h> 21 #include <linux/idr.h> 22 #include <linux/err.h> 23 #include <linux/pci.h> 24 #include <linux/pci-ats.h> 25 #include <linux/bitops.h> 26 #include <linux/platform_device.h> 27 #include <linux/property.h> 28 #include <linux/fsl/mc.h> 29 #include <linux/module.h> 30 #include <linux/cc_platform.h> 31 #include <trace/events/iommu.h> 32 #include <linux/sched/mm.h> 33 #include <linux/msi.h> 34 35 #include "dma-iommu.h" 36 37 #include "iommu-sva.h" 38 39 static struct kset *iommu_group_kset; 40 static DEFINE_IDA(iommu_group_ida); 41 42 static unsigned int iommu_def_domain_type __read_mostly; 43 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); 44 static u32 iommu_cmd_line __read_mostly; 45 46 struct iommu_group { 47 struct kobject kobj; 48 struct kobject *devices_kobj; 49 struct list_head devices; 50 struct xarray pasid_array; 51 struct mutex mutex; 52 void *iommu_data; 53 void (*iommu_data_release)(void *iommu_data); 54 char *name; 55 int id; 56 struct iommu_domain *default_domain; 57 struct iommu_domain *blocking_domain; 58 struct iommu_domain *domain; 59 struct list_head entry; 60 unsigned int owner_cnt; 61 void *owner; 62 }; 63 64 struct group_device { 65 struct list_head list; 66 struct device *dev; 67 char *name; 68 }; 69 70 struct iommu_group_attribute { 71 struct attribute attr; 72 ssize_t (*show)(struct iommu_group *group, char *buf); 73 ssize_t (*store)(struct iommu_group *group, 74 const char *buf, size_t count); 75 }; 76 77 static const char * const iommu_group_resv_type_string[] = { 78 [IOMMU_RESV_DIRECT] = "direct", 79 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 80 [IOMMU_RESV_RESERVED] = "reserved", 81 [IOMMU_RESV_MSI] = "msi", 82 [IOMMU_RESV_SW_MSI] = "msi", 83 }; 84 85 #define IOMMU_CMD_LINE_DMA_API BIT(0) 86 #define IOMMU_CMD_LINE_STRICT BIT(1) 87 88 static int iommu_bus_notifier(struct notifier_block *nb, 89 unsigned long action, void *data); 90 static int iommu_alloc_default_domain(struct iommu_group *group, 91 struct device *dev); 92 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 93 unsigned type); 94 static int __iommu_attach_device(struct iommu_domain *domain, 95 struct device *dev); 96 static int __iommu_attach_group(struct iommu_domain *domain, 97 struct iommu_group *group); 98 static int __iommu_group_set_domain(struct iommu_group *group, 99 struct iommu_domain *new_domain); 100 static int iommu_create_device_direct_mappings(struct iommu_group *group, 101 struct device *dev); 102 static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 103 static ssize_t iommu_group_store_type(struct iommu_group *group, 104 const char *buf, size_t count); 105 106 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 107 struct iommu_group_attribute iommu_group_attr_##_name = \ 108 __ATTR(_name, _mode, _show, _store) 109 110 #define to_iommu_group_attr(_attr) \ 111 container_of(_attr, struct iommu_group_attribute, attr) 112 #define to_iommu_group(_kobj) \ 113 container_of(_kobj, struct iommu_group, kobj) 114 115 static LIST_HEAD(iommu_device_list); 116 static DEFINE_SPINLOCK(iommu_device_lock); 117 118 static struct bus_type * const iommu_buses[] = { 119 &platform_bus_type, 120 #ifdef CONFIG_PCI 121 &pci_bus_type, 122 #endif 123 #ifdef CONFIG_ARM_AMBA 124 &amba_bustype, 125 #endif 126 #ifdef CONFIG_FSL_MC_BUS 127 &fsl_mc_bus_type, 128 #endif 129 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS 130 &host1x_context_device_bus_type, 131 #endif 132 }; 133 134 /* 135 * Use a function instead of an array here because the domain-type is a 136 * bit-field, so an array would waste memory. 137 */ 138 static const char *iommu_domain_type_str(unsigned int t) 139 { 140 switch (t) { 141 case IOMMU_DOMAIN_BLOCKED: 142 return "Blocked"; 143 case IOMMU_DOMAIN_IDENTITY: 144 return "Passthrough"; 145 case IOMMU_DOMAIN_UNMANAGED: 146 return "Unmanaged"; 147 case IOMMU_DOMAIN_DMA: 148 case IOMMU_DOMAIN_DMA_FQ: 149 return "Translated"; 150 default: 151 return "Unknown"; 152 } 153 } 154 155 static int __init iommu_subsys_init(void) 156 { 157 struct notifier_block *nb; 158 159 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 160 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 161 iommu_set_default_passthrough(false); 162 else 163 iommu_set_default_translated(false); 164 165 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 166 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 167 iommu_set_default_translated(false); 168 } 169 } 170 171 if (!iommu_default_passthrough() && !iommu_dma_strict) 172 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; 173 174 pr_info("Default domain type: %s %s\n", 175 iommu_domain_type_str(iommu_def_domain_type), 176 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 177 "(set via kernel command line)" : ""); 178 179 if (!iommu_default_passthrough()) 180 pr_info("DMA domain TLB invalidation policy: %s mode %s\n", 181 iommu_dma_strict ? "strict" : "lazy", 182 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? 183 "(set via kernel command line)" : ""); 184 185 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); 186 if (!nb) 187 return -ENOMEM; 188 189 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { 190 nb[i].notifier_call = iommu_bus_notifier; 191 bus_register_notifier(iommu_buses[i], &nb[i]); 192 } 193 194 return 0; 195 } 196 subsys_initcall(iommu_subsys_init); 197 198 static int remove_iommu_group(struct device *dev, void *data) 199 { 200 if (dev->iommu && dev->iommu->iommu_dev == data) 201 iommu_release_device(dev); 202 203 return 0; 204 } 205 206 /** 207 * iommu_device_register() - Register an IOMMU hardware instance 208 * @iommu: IOMMU handle for the instance 209 * @ops: IOMMU ops to associate with the instance 210 * @hwdev: (optional) actual instance device, used for fwnode lookup 211 * 212 * Return: 0 on success, or an error. 213 */ 214 int iommu_device_register(struct iommu_device *iommu, 215 const struct iommu_ops *ops, struct device *hwdev) 216 { 217 int err = 0; 218 219 /* We need to be able to take module references appropriately */ 220 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 221 return -EINVAL; 222 /* 223 * Temporarily enforce global restriction to a single driver. This was 224 * already the de-facto behaviour, since any possible combination of 225 * existing drivers would compete for at least the PCI or platform bus. 226 */ 227 if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops) 228 return -EBUSY; 229 230 iommu->ops = ops; 231 if (hwdev) 232 iommu->fwnode = dev_fwnode(hwdev); 233 234 spin_lock(&iommu_device_lock); 235 list_add_tail(&iommu->list, &iommu_device_list); 236 spin_unlock(&iommu_device_lock); 237 238 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) { 239 iommu_buses[i]->iommu_ops = ops; 240 err = bus_iommu_probe(iommu_buses[i]); 241 } 242 if (err) 243 iommu_device_unregister(iommu); 244 return err; 245 } 246 EXPORT_SYMBOL_GPL(iommu_device_register); 247 248 void iommu_device_unregister(struct iommu_device *iommu) 249 { 250 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) 251 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); 252 253 spin_lock(&iommu_device_lock); 254 list_del(&iommu->list); 255 spin_unlock(&iommu_device_lock); 256 } 257 EXPORT_SYMBOL_GPL(iommu_device_unregister); 258 259 static struct dev_iommu *dev_iommu_get(struct device *dev) 260 { 261 struct dev_iommu *param = dev->iommu; 262 263 if (param) 264 return param; 265 266 param = kzalloc(sizeof(*param), GFP_KERNEL); 267 if (!param) 268 return NULL; 269 270 mutex_init(¶m->lock); 271 dev->iommu = param; 272 return param; 273 } 274 275 static void dev_iommu_free(struct device *dev) 276 { 277 struct dev_iommu *param = dev->iommu; 278 279 dev->iommu = NULL; 280 if (param->fwspec) { 281 fwnode_handle_put(param->fwspec->iommu_fwnode); 282 kfree(param->fwspec); 283 } 284 kfree(param); 285 } 286 287 static u32 dev_iommu_get_max_pasids(struct device *dev) 288 { 289 u32 max_pasids = 0, bits = 0; 290 int ret; 291 292 if (dev_is_pci(dev)) { 293 ret = pci_max_pasids(to_pci_dev(dev)); 294 if (ret > 0) 295 max_pasids = ret; 296 } else { 297 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); 298 if (!ret) 299 max_pasids = 1UL << bits; 300 } 301 302 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); 303 } 304 305 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 306 { 307 const struct iommu_ops *ops = dev->bus->iommu_ops; 308 struct iommu_device *iommu_dev; 309 struct iommu_group *group; 310 static DEFINE_MUTEX(iommu_probe_device_lock); 311 int ret; 312 313 if (!ops) 314 return -ENODEV; 315 /* 316 * Serialise to avoid races between IOMMU drivers registering in 317 * parallel and/or the "replay" calls from ACPI/OF code via client 318 * driver probe. Once the latter have been cleaned up we should 319 * probably be able to use device_lock() here to minimise the scope, 320 * but for now enforcing a simple global ordering is fine. 321 */ 322 mutex_lock(&iommu_probe_device_lock); 323 if (!dev_iommu_get(dev)) { 324 ret = -ENOMEM; 325 goto err_unlock; 326 } 327 328 if (!try_module_get(ops->owner)) { 329 ret = -EINVAL; 330 goto err_free; 331 } 332 333 iommu_dev = ops->probe_device(dev); 334 if (IS_ERR(iommu_dev)) { 335 ret = PTR_ERR(iommu_dev); 336 goto out_module_put; 337 } 338 339 dev->iommu->iommu_dev = iommu_dev; 340 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 341 342 group = iommu_group_get_for_dev(dev); 343 if (IS_ERR(group)) { 344 ret = PTR_ERR(group); 345 goto out_release; 346 } 347 348 mutex_lock(&group->mutex); 349 if (group_list && !group->default_domain && list_empty(&group->entry)) 350 list_add_tail(&group->entry, group_list); 351 mutex_unlock(&group->mutex); 352 iommu_group_put(group); 353 354 mutex_unlock(&iommu_probe_device_lock); 355 iommu_device_link(iommu_dev, dev); 356 357 return 0; 358 359 out_release: 360 if (ops->release_device) 361 ops->release_device(dev); 362 363 out_module_put: 364 module_put(ops->owner); 365 366 err_free: 367 dev_iommu_free(dev); 368 369 err_unlock: 370 mutex_unlock(&iommu_probe_device_lock); 371 372 return ret; 373 } 374 375 static bool iommu_is_attach_deferred(struct device *dev) 376 { 377 const struct iommu_ops *ops = dev_iommu_ops(dev); 378 379 if (ops->is_attach_deferred) 380 return ops->is_attach_deferred(dev); 381 382 return false; 383 } 384 385 static int iommu_group_do_dma_first_attach(struct device *dev, void *data) 386 { 387 struct iommu_domain *domain = data; 388 389 lockdep_assert_held(&dev->iommu_group->mutex); 390 391 if (iommu_is_attach_deferred(dev)) { 392 dev->iommu->attach_deferred = 1; 393 return 0; 394 } 395 396 return __iommu_attach_device(domain, dev); 397 } 398 399 int iommu_probe_device(struct device *dev) 400 { 401 const struct iommu_ops *ops; 402 struct iommu_group *group; 403 int ret; 404 405 ret = __iommu_probe_device(dev, NULL); 406 if (ret) 407 goto err_out; 408 409 group = iommu_group_get(dev); 410 if (!group) { 411 ret = -ENODEV; 412 goto err_release; 413 } 414 415 /* 416 * Try to allocate a default domain - needs support from the 417 * IOMMU driver. There are still some drivers which don't 418 * support default domains, so the return value is not yet 419 * checked. 420 */ 421 mutex_lock(&group->mutex); 422 iommu_alloc_default_domain(group, dev); 423 424 /* 425 * If device joined an existing group which has been claimed, don't 426 * attach the default domain. 427 */ 428 if (group->default_domain && !group->owner) { 429 ret = iommu_group_do_dma_first_attach(dev, group->default_domain); 430 if (ret) { 431 mutex_unlock(&group->mutex); 432 iommu_group_put(group); 433 goto err_release; 434 } 435 } 436 437 iommu_create_device_direct_mappings(group, dev); 438 439 mutex_unlock(&group->mutex); 440 iommu_group_put(group); 441 442 ops = dev_iommu_ops(dev); 443 if (ops->probe_finalize) 444 ops->probe_finalize(dev); 445 446 return 0; 447 448 err_release: 449 iommu_release_device(dev); 450 451 err_out: 452 return ret; 453 454 } 455 456 /* 457 * Remove a device from a group's device list and return the group device 458 * if successful. 459 */ 460 static struct group_device * 461 __iommu_group_remove_device(struct iommu_group *group, struct device *dev) 462 { 463 struct group_device *device; 464 465 lockdep_assert_held(&group->mutex); 466 list_for_each_entry(device, &group->devices, list) { 467 if (device->dev == dev) { 468 list_del(&device->list); 469 return device; 470 } 471 } 472 473 return NULL; 474 } 475 476 /* 477 * Release a device from its group and decrements the iommu group reference 478 * count. 479 */ 480 static void __iommu_group_release_device(struct iommu_group *group, 481 struct group_device *grp_dev) 482 { 483 struct device *dev = grp_dev->dev; 484 485 sysfs_remove_link(group->devices_kobj, grp_dev->name); 486 sysfs_remove_link(&dev->kobj, "iommu_group"); 487 488 trace_remove_device_from_group(group->id, dev); 489 490 kfree(grp_dev->name); 491 kfree(grp_dev); 492 dev->iommu_group = NULL; 493 kobject_put(group->devices_kobj); 494 } 495 496 void iommu_release_device(struct device *dev) 497 { 498 struct iommu_group *group = dev->iommu_group; 499 struct group_device *device; 500 const struct iommu_ops *ops; 501 502 if (!dev->iommu || !group) 503 return; 504 505 iommu_device_unlink(dev->iommu->iommu_dev, dev); 506 507 mutex_lock(&group->mutex); 508 device = __iommu_group_remove_device(group, dev); 509 510 /* 511 * If the group has become empty then ownership must have been released, 512 * and the current domain must be set back to NULL or the default 513 * domain. 514 */ 515 if (list_empty(&group->devices)) 516 WARN_ON(group->owner_cnt || 517 group->domain != group->default_domain); 518 519 /* 520 * release_device() must stop using any attached domain on the device. 521 * If there are still other devices in the group they are not effected 522 * by this callback. 523 * 524 * The IOMMU driver must set the device to either an identity or 525 * blocking translation and stop using any domain pointer, as it is 526 * going to be freed. 527 */ 528 ops = dev_iommu_ops(dev); 529 if (ops->release_device) 530 ops->release_device(dev); 531 mutex_unlock(&group->mutex); 532 533 if (device) 534 __iommu_group_release_device(group, device); 535 536 module_put(ops->owner); 537 dev_iommu_free(dev); 538 } 539 540 static int __init iommu_set_def_domain_type(char *str) 541 { 542 bool pt; 543 int ret; 544 545 ret = kstrtobool(str, &pt); 546 if (ret) 547 return ret; 548 549 if (pt) 550 iommu_set_default_passthrough(true); 551 else 552 iommu_set_default_translated(true); 553 554 return 0; 555 } 556 early_param("iommu.passthrough", iommu_set_def_domain_type); 557 558 static int __init iommu_dma_setup(char *str) 559 { 560 int ret = kstrtobool(str, &iommu_dma_strict); 561 562 if (!ret) 563 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 564 return ret; 565 } 566 early_param("iommu.strict", iommu_dma_setup); 567 568 void iommu_set_dma_strict(void) 569 { 570 iommu_dma_strict = true; 571 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) 572 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 573 } 574 575 static ssize_t iommu_group_attr_show(struct kobject *kobj, 576 struct attribute *__attr, char *buf) 577 { 578 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 579 struct iommu_group *group = to_iommu_group(kobj); 580 ssize_t ret = -EIO; 581 582 if (attr->show) 583 ret = attr->show(group, buf); 584 return ret; 585 } 586 587 static ssize_t iommu_group_attr_store(struct kobject *kobj, 588 struct attribute *__attr, 589 const char *buf, size_t count) 590 { 591 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 592 struct iommu_group *group = to_iommu_group(kobj); 593 ssize_t ret = -EIO; 594 595 if (attr->store) 596 ret = attr->store(group, buf, count); 597 return ret; 598 } 599 600 static const struct sysfs_ops iommu_group_sysfs_ops = { 601 .show = iommu_group_attr_show, 602 .store = iommu_group_attr_store, 603 }; 604 605 static int iommu_group_create_file(struct iommu_group *group, 606 struct iommu_group_attribute *attr) 607 { 608 return sysfs_create_file(&group->kobj, &attr->attr); 609 } 610 611 static void iommu_group_remove_file(struct iommu_group *group, 612 struct iommu_group_attribute *attr) 613 { 614 sysfs_remove_file(&group->kobj, &attr->attr); 615 } 616 617 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 618 { 619 return sprintf(buf, "%s\n", group->name); 620 } 621 622 /** 623 * iommu_insert_resv_region - Insert a new region in the 624 * list of reserved regions. 625 * @new: new region to insert 626 * @regions: list of regions 627 * 628 * Elements are sorted by start address and overlapping segments 629 * of the same type are merged. 630 */ 631 static int iommu_insert_resv_region(struct iommu_resv_region *new, 632 struct list_head *regions) 633 { 634 struct iommu_resv_region *iter, *tmp, *nr, *top; 635 LIST_HEAD(stack); 636 637 nr = iommu_alloc_resv_region(new->start, new->length, 638 new->prot, new->type, GFP_KERNEL); 639 if (!nr) 640 return -ENOMEM; 641 642 /* First add the new element based on start address sorting */ 643 list_for_each_entry(iter, regions, list) { 644 if (nr->start < iter->start || 645 (nr->start == iter->start && nr->type <= iter->type)) 646 break; 647 } 648 list_add_tail(&nr->list, &iter->list); 649 650 /* Merge overlapping segments of type nr->type in @regions, if any */ 651 list_for_each_entry_safe(iter, tmp, regions, list) { 652 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 653 654 /* no merge needed on elements of different types than @new */ 655 if (iter->type != new->type) { 656 list_move_tail(&iter->list, &stack); 657 continue; 658 } 659 660 /* look for the last stack element of same type as @iter */ 661 list_for_each_entry_reverse(top, &stack, list) 662 if (top->type == iter->type) 663 goto check_overlap; 664 665 list_move_tail(&iter->list, &stack); 666 continue; 667 668 check_overlap: 669 top_end = top->start + top->length - 1; 670 671 if (iter->start > top_end + 1) { 672 list_move_tail(&iter->list, &stack); 673 } else { 674 top->length = max(top_end, iter_end) - top->start + 1; 675 list_del(&iter->list); 676 kfree(iter); 677 } 678 } 679 list_splice(&stack, regions); 680 return 0; 681 } 682 683 static int 684 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 685 struct list_head *group_resv_regions) 686 { 687 struct iommu_resv_region *entry; 688 int ret = 0; 689 690 list_for_each_entry(entry, dev_resv_regions, list) { 691 ret = iommu_insert_resv_region(entry, group_resv_regions); 692 if (ret) 693 break; 694 } 695 return ret; 696 } 697 698 int iommu_get_group_resv_regions(struct iommu_group *group, 699 struct list_head *head) 700 { 701 struct group_device *device; 702 int ret = 0; 703 704 mutex_lock(&group->mutex); 705 list_for_each_entry(device, &group->devices, list) { 706 struct list_head dev_resv_regions; 707 708 /* 709 * Non-API groups still expose reserved_regions in sysfs, 710 * so filter out calls that get here that way. 711 */ 712 if (!device->dev->iommu) 713 break; 714 715 INIT_LIST_HEAD(&dev_resv_regions); 716 iommu_get_resv_regions(device->dev, &dev_resv_regions); 717 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 718 iommu_put_resv_regions(device->dev, &dev_resv_regions); 719 if (ret) 720 break; 721 } 722 mutex_unlock(&group->mutex); 723 return ret; 724 } 725 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 726 727 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 728 char *buf) 729 { 730 struct iommu_resv_region *region, *next; 731 struct list_head group_resv_regions; 732 char *str = buf; 733 734 INIT_LIST_HEAD(&group_resv_regions); 735 iommu_get_group_resv_regions(group, &group_resv_regions); 736 737 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 738 str += sprintf(str, "0x%016llx 0x%016llx %s\n", 739 (long long int)region->start, 740 (long long int)(region->start + 741 region->length - 1), 742 iommu_group_resv_type_string[region->type]); 743 kfree(region); 744 } 745 746 return (str - buf); 747 } 748 749 static ssize_t iommu_group_show_type(struct iommu_group *group, 750 char *buf) 751 { 752 char *type = "unknown\n"; 753 754 mutex_lock(&group->mutex); 755 if (group->default_domain) { 756 switch (group->default_domain->type) { 757 case IOMMU_DOMAIN_BLOCKED: 758 type = "blocked\n"; 759 break; 760 case IOMMU_DOMAIN_IDENTITY: 761 type = "identity\n"; 762 break; 763 case IOMMU_DOMAIN_UNMANAGED: 764 type = "unmanaged\n"; 765 break; 766 case IOMMU_DOMAIN_DMA: 767 type = "DMA\n"; 768 break; 769 case IOMMU_DOMAIN_DMA_FQ: 770 type = "DMA-FQ\n"; 771 break; 772 } 773 } 774 mutex_unlock(&group->mutex); 775 strcpy(buf, type); 776 777 return strlen(type); 778 } 779 780 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 781 782 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 783 iommu_group_show_resv_regions, NULL); 784 785 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 786 iommu_group_store_type); 787 788 static void iommu_group_release(struct kobject *kobj) 789 { 790 struct iommu_group *group = to_iommu_group(kobj); 791 792 pr_debug("Releasing group %d\n", group->id); 793 794 if (group->iommu_data_release) 795 group->iommu_data_release(group->iommu_data); 796 797 ida_free(&iommu_group_ida, group->id); 798 799 if (group->default_domain) 800 iommu_domain_free(group->default_domain); 801 if (group->blocking_domain) 802 iommu_domain_free(group->blocking_domain); 803 804 kfree(group->name); 805 kfree(group); 806 } 807 808 static const struct kobj_type iommu_group_ktype = { 809 .sysfs_ops = &iommu_group_sysfs_ops, 810 .release = iommu_group_release, 811 }; 812 813 /** 814 * iommu_group_alloc - Allocate a new group 815 * 816 * This function is called by an iommu driver to allocate a new iommu 817 * group. The iommu group represents the minimum granularity of the iommu. 818 * Upon successful return, the caller holds a reference to the supplied 819 * group in order to hold the group until devices are added. Use 820 * iommu_group_put() to release this extra reference count, allowing the 821 * group to be automatically reclaimed once it has no devices or external 822 * references. 823 */ 824 struct iommu_group *iommu_group_alloc(void) 825 { 826 struct iommu_group *group; 827 int ret; 828 829 group = kzalloc(sizeof(*group), GFP_KERNEL); 830 if (!group) 831 return ERR_PTR(-ENOMEM); 832 833 group->kobj.kset = iommu_group_kset; 834 mutex_init(&group->mutex); 835 INIT_LIST_HEAD(&group->devices); 836 INIT_LIST_HEAD(&group->entry); 837 xa_init(&group->pasid_array); 838 839 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); 840 if (ret < 0) { 841 kfree(group); 842 return ERR_PTR(ret); 843 } 844 group->id = ret; 845 846 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 847 NULL, "%d", group->id); 848 if (ret) { 849 kobject_put(&group->kobj); 850 return ERR_PTR(ret); 851 } 852 853 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 854 if (!group->devices_kobj) { 855 kobject_put(&group->kobj); /* triggers .release & free */ 856 return ERR_PTR(-ENOMEM); 857 } 858 859 /* 860 * The devices_kobj holds a reference on the group kobject, so 861 * as long as that exists so will the group. We can therefore 862 * use the devices_kobj for reference counting. 863 */ 864 kobject_put(&group->kobj); 865 866 ret = iommu_group_create_file(group, 867 &iommu_group_attr_reserved_regions); 868 if (ret) { 869 kobject_put(group->devices_kobj); 870 return ERR_PTR(ret); 871 } 872 873 ret = iommu_group_create_file(group, &iommu_group_attr_type); 874 if (ret) { 875 kobject_put(group->devices_kobj); 876 return ERR_PTR(ret); 877 } 878 879 pr_debug("Allocated group %d\n", group->id); 880 881 return group; 882 } 883 EXPORT_SYMBOL_GPL(iommu_group_alloc); 884 885 struct iommu_group *iommu_group_get_by_id(int id) 886 { 887 struct kobject *group_kobj; 888 struct iommu_group *group; 889 const char *name; 890 891 if (!iommu_group_kset) 892 return NULL; 893 894 name = kasprintf(GFP_KERNEL, "%d", id); 895 if (!name) 896 return NULL; 897 898 group_kobj = kset_find_obj(iommu_group_kset, name); 899 kfree(name); 900 901 if (!group_kobj) 902 return NULL; 903 904 group = container_of(group_kobj, struct iommu_group, kobj); 905 BUG_ON(group->id != id); 906 907 kobject_get(group->devices_kobj); 908 kobject_put(&group->kobj); 909 910 return group; 911 } 912 EXPORT_SYMBOL_GPL(iommu_group_get_by_id); 913 914 /** 915 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 916 * @group: the group 917 * 918 * iommu drivers can store data in the group for use when doing iommu 919 * operations. This function provides a way to retrieve it. Caller 920 * should hold a group reference. 921 */ 922 void *iommu_group_get_iommudata(struct iommu_group *group) 923 { 924 return group->iommu_data; 925 } 926 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 927 928 /** 929 * iommu_group_set_iommudata - set iommu_data for a group 930 * @group: the group 931 * @iommu_data: new data 932 * @release: release function for iommu_data 933 * 934 * iommu drivers can store data in the group for use when doing iommu 935 * operations. This function provides a way to set the data after 936 * the group has been allocated. Caller should hold a group reference. 937 */ 938 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 939 void (*release)(void *iommu_data)) 940 { 941 group->iommu_data = iommu_data; 942 group->iommu_data_release = release; 943 } 944 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 945 946 /** 947 * iommu_group_set_name - set name for a group 948 * @group: the group 949 * @name: name 950 * 951 * Allow iommu driver to set a name for a group. When set it will 952 * appear in a name attribute file under the group in sysfs. 953 */ 954 int iommu_group_set_name(struct iommu_group *group, const char *name) 955 { 956 int ret; 957 958 if (group->name) { 959 iommu_group_remove_file(group, &iommu_group_attr_name); 960 kfree(group->name); 961 group->name = NULL; 962 if (!name) 963 return 0; 964 } 965 966 group->name = kstrdup(name, GFP_KERNEL); 967 if (!group->name) 968 return -ENOMEM; 969 970 ret = iommu_group_create_file(group, &iommu_group_attr_name); 971 if (ret) { 972 kfree(group->name); 973 group->name = NULL; 974 return ret; 975 } 976 977 return 0; 978 } 979 EXPORT_SYMBOL_GPL(iommu_group_set_name); 980 981 static int iommu_create_device_direct_mappings(struct iommu_group *group, 982 struct device *dev) 983 { 984 struct iommu_domain *domain = group->default_domain; 985 struct iommu_resv_region *entry; 986 struct list_head mappings; 987 unsigned long pg_size; 988 int ret = 0; 989 990 if (!domain || !iommu_is_dma_domain(domain)) 991 return 0; 992 993 BUG_ON(!domain->pgsize_bitmap); 994 995 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 996 INIT_LIST_HEAD(&mappings); 997 998 iommu_get_resv_regions(dev, &mappings); 999 1000 /* We need to consider overlapping regions for different devices */ 1001 list_for_each_entry(entry, &mappings, list) { 1002 dma_addr_t start, end, addr; 1003 size_t map_size = 0; 1004 1005 start = ALIGN(entry->start, pg_size); 1006 end = ALIGN(entry->start + entry->length, pg_size); 1007 1008 if (entry->type != IOMMU_RESV_DIRECT && 1009 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 1010 continue; 1011 1012 for (addr = start; addr <= end; addr += pg_size) { 1013 phys_addr_t phys_addr; 1014 1015 if (addr == end) 1016 goto map_end; 1017 1018 phys_addr = iommu_iova_to_phys(domain, addr); 1019 if (!phys_addr) { 1020 map_size += pg_size; 1021 continue; 1022 } 1023 1024 map_end: 1025 if (map_size) { 1026 ret = iommu_map(domain, addr - map_size, 1027 addr - map_size, map_size, 1028 entry->prot, GFP_KERNEL); 1029 if (ret) 1030 goto out; 1031 map_size = 0; 1032 } 1033 } 1034 1035 } 1036 1037 iommu_flush_iotlb_all(domain); 1038 1039 out: 1040 iommu_put_resv_regions(dev, &mappings); 1041 1042 return ret; 1043 } 1044 1045 /** 1046 * iommu_group_add_device - add a device to an iommu group 1047 * @group: the group into which to add the device (reference should be held) 1048 * @dev: the device 1049 * 1050 * This function is called by an iommu driver to add a device into a 1051 * group. Adding a device increments the group reference count. 1052 */ 1053 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 1054 { 1055 int ret, i = 0; 1056 struct group_device *device; 1057 1058 device = kzalloc(sizeof(*device), GFP_KERNEL); 1059 if (!device) 1060 return -ENOMEM; 1061 1062 device->dev = dev; 1063 1064 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 1065 if (ret) 1066 goto err_free_device; 1067 1068 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 1069 rename: 1070 if (!device->name) { 1071 ret = -ENOMEM; 1072 goto err_remove_link; 1073 } 1074 1075 ret = sysfs_create_link_nowarn(group->devices_kobj, 1076 &dev->kobj, device->name); 1077 if (ret) { 1078 if (ret == -EEXIST && i >= 0) { 1079 /* 1080 * Account for the slim chance of collision 1081 * and append an instance to the name. 1082 */ 1083 kfree(device->name); 1084 device->name = kasprintf(GFP_KERNEL, "%s.%d", 1085 kobject_name(&dev->kobj), i++); 1086 goto rename; 1087 } 1088 goto err_free_name; 1089 } 1090 1091 kobject_get(group->devices_kobj); 1092 1093 dev->iommu_group = group; 1094 1095 mutex_lock(&group->mutex); 1096 list_add_tail(&device->list, &group->devices); 1097 if (group->domain) 1098 ret = iommu_group_do_dma_first_attach(dev, group->domain); 1099 mutex_unlock(&group->mutex); 1100 if (ret) 1101 goto err_put_group; 1102 1103 trace_add_device_to_group(group->id, dev); 1104 1105 dev_info(dev, "Adding to iommu group %d\n", group->id); 1106 1107 return 0; 1108 1109 err_put_group: 1110 mutex_lock(&group->mutex); 1111 list_del(&device->list); 1112 mutex_unlock(&group->mutex); 1113 dev->iommu_group = NULL; 1114 kobject_put(group->devices_kobj); 1115 sysfs_remove_link(group->devices_kobj, device->name); 1116 err_free_name: 1117 kfree(device->name); 1118 err_remove_link: 1119 sysfs_remove_link(&dev->kobj, "iommu_group"); 1120 err_free_device: 1121 kfree(device); 1122 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 1123 return ret; 1124 } 1125 EXPORT_SYMBOL_GPL(iommu_group_add_device); 1126 1127 /** 1128 * iommu_group_remove_device - remove a device from it's current group 1129 * @dev: device to be removed 1130 * 1131 * This function is called by an iommu driver to remove the device from 1132 * it's current group. This decrements the iommu group reference count. 1133 */ 1134 void iommu_group_remove_device(struct device *dev) 1135 { 1136 struct iommu_group *group = dev->iommu_group; 1137 struct group_device *device; 1138 1139 if (!group) 1140 return; 1141 1142 dev_info(dev, "Removing from iommu group %d\n", group->id); 1143 1144 mutex_lock(&group->mutex); 1145 device = __iommu_group_remove_device(group, dev); 1146 mutex_unlock(&group->mutex); 1147 1148 if (device) 1149 __iommu_group_release_device(group, device); 1150 } 1151 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 1152 1153 static int iommu_group_device_count(struct iommu_group *group) 1154 { 1155 struct group_device *entry; 1156 int ret = 0; 1157 1158 list_for_each_entry(entry, &group->devices, list) 1159 ret++; 1160 1161 return ret; 1162 } 1163 1164 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 1165 int (*fn)(struct device *, void *)) 1166 { 1167 struct group_device *device; 1168 int ret = 0; 1169 1170 list_for_each_entry(device, &group->devices, list) { 1171 ret = fn(device->dev, data); 1172 if (ret) 1173 break; 1174 } 1175 return ret; 1176 } 1177 1178 /** 1179 * iommu_group_for_each_dev - iterate over each device in the group 1180 * @group: the group 1181 * @data: caller opaque data to be passed to callback function 1182 * @fn: caller supplied callback function 1183 * 1184 * This function is called by group users to iterate over group devices. 1185 * Callers should hold a reference count to the group during callback. 1186 * The group->mutex is held across callbacks, which will block calls to 1187 * iommu_group_add/remove_device. 1188 */ 1189 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 1190 int (*fn)(struct device *, void *)) 1191 { 1192 int ret; 1193 1194 mutex_lock(&group->mutex); 1195 ret = __iommu_group_for_each_dev(group, data, fn); 1196 mutex_unlock(&group->mutex); 1197 1198 return ret; 1199 } 1200 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1201 1202 /** 1203 * iommu_group_get - Return the group for a device and increment reference 1204 * @dev: get the group that this device belongs to 1205 * 1206 * This function is called by iommu drivers and users to get the group 1207 * for the specified device. If found, the group is returned and the group 1208 * reference in incremented, else NULL. 1209 */ 1210 struct iommu_group *iommu_group_get(struct device *dev) 1211 { 1212 struct iommu_group *group = dev->iommu_group; 1213 1214 if (group) 1215 kobject_get(group->devices_kobj); 1216 1217 return group; 1218 } 1219 EXPORT_SYMBOL_GPL(iommu_group_get); 1220 1221 /** 1222 * iommu_group_ref_get - Increment reference on a group 1223 * @group: the group to use, must not be NULL 1224 * 1225 * This function is called by iommu drivers to take additional references on an 1226 * existing group. Returns the given group for convenience. 1227 */ 1228 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1229 { 1230 kobject_get(group->devices_kobj); 1231 return group; 1232 } 1233 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1234 1235 /** 1236 * iommu_group_put - Decrement group reference 1237 * @group: the group to use 1238 * 1239 * This function is called by iommu drivers and users to release the 1240 * iommu group. Once the reference count is zero, the group is released. 1241 */ 1242 void iommu_group_put(struct iommu_group *group) 1243 { 1244 if (group) 1245 kobject_put(group->devices_kobj); 1246 } 1247 EXPORT_SYMBOL_GPL(iommu_group_put); 1248 1249 /** 1250 * iommu_register_device_fault_handler() - Register a device fault handler 1251 * @dev: the device 1252 * @handler: the fault handler 1253 * @data: private data passed as argument to the handler 1254 * 1255 * When an IOMMU fault event is received, this handler gets called with the 1256 * fault event and data as argument. The handler should return 0 on success. If 1257 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1258 * complete the fault by calling iommu_page_response() with one of the following 1259 * response code: 1260 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1261 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1262 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1263 * page faults if possible. 1264 * 1265 * Return 0 if the fault handler was installed successfully, or an error. 1266 */ 1267 int iommu_register_device_fault_handler(struct device *dev, 1268 iommu_dev_fault_handler_t handler, 1269 void *data) 1270 { 1271 struct dev_iommu *param = dev->iommu; 1272 int ret = 0; 1273 1274 if (!param) 1275 return -EINVAL; 1276 1277 mutex_lock(¶m->lock); 1278 /* Only allow one fault handler registered for each device */ 1279 if (param->fault_param) { 1280 ret = -EBUSY; 1281 goto done_unlock; 1282 } 1283 1284 get_device(dev); 1285 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1286 if (!param->fault_param) { 1287 put_device(dev); 1288 ret = -ENOMEM; 1289 goto done_unlock; 1290 } 1291 param->fault_param->handler = handler; 1292 param->fault_param->data = data; 1293 mutex_init(¶m->fault_param->lock); 1294 INIT_LIST_HEAD(¶m->fault_param->faults); 1295 1296 done_unlock: 1297 mutex_unlock(¶m->lock); 1298 1299 return ret; 1300 } 1301 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1302 1303 /** 1304 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1305 * @dev: the device 1306 * 1307 * Remove the device fault handler installed with 1308 * iommu_register_device_fault_handler(). 1309 * 1310 * Return 0 on success, or an error. 1311 */ 1312 int iommu_unregister_device_fault_handler(struct device *dev) 1313 { 1314 struct dev_iommu *param = dev->iommu; 1315 int ret = 0; 1316 1317 if (!param) 1318 return -EINVAL; 1319 1320 mutex_lock(¶m->lock); 1321 1322 if (!param->fault_param) 1323 goto unlock; 1324 1325 /* we cannot unregister handler if there are pending faults */ 1326 if (!list_empty(¶m->fault_param->faults)) { 1327 ret = -EBUSY; 1328 goto unlock; 1329 } 1330 1331 kfree(param->fault_param); 1332 param->fault_param = NULL; 1333 put_device(dev); 1334 unlock: 1335 mutex_unlock(¶m->lock); 1336 1337 return ret; 1338 } 1339 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1340 1341 /** 1342 * iommu_report_device_fault() - Report fault event to device driver 1343 * @dev: the device 1344 * @evt: fault event data 1345 * 1346 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1347 * handler. When this function fails and the fault is recoverable, it is the 1348 * caller's responsibility to complete the fault. 1349 * 1350 * Return 0 on success, or an error. 1351 */ 1352 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1353 { 1354 struct dev_iommu *param = dev->iommu; 1355 struct iommu_fault_event *evt_pending = NULL; 1356 struct iommu_fault_param *fparam; 1357 int ret = 0; 1358 1359 if (!param || !evt) 1360 return -EINVAL; 1361 1362 /* we only report device fault if there is a handler registered */ 1363 mutex_lock(¶m->lock); 1364 fparam = param->fault_param; 1365 if (!fparam || !fparam->handler) { 1366 ret = -EINVAL; 1367 goto done_unlock; 1368 } 1369 1370 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1371 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1372 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1373 GFP_KERNEL); 1374 if (!evt_pending) { 1375 ret = -ENOMEM; 1376 goto done_unlock; 1377 } 1378 mutex_lock(&fparam->lock); 1379 list_add_tail(&evt_pending->list, &fparam->faults); 1380 mutex_unlock(&fparam->lock); 1381 } 1382 1383 ret = fparam->handler(&evt->fault, fparam->data); 1384 if (ret && evt_pending) { 1385 mutex_lock(&fparam->lock); 1386 list_del(&evt_pending->list); 1387 mutex_unlock(&fparam->lock); 1388 kfree(evt_pending); 1389 } 1390 done_unlock: 1391 mutex_unlock(¶m->lock); 1392 return ret; 1393 } 1394 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1395 1396 int iommu_page_response(struct device *dev, 1397 struct iommu_page_response *msg) 1398 { 1399 bool needs_pasid; 1400 int ret = -EINVAL; 1401 struct iommu_fault_event *evt; 1402 struct iommu_fault_page_request *prm; 1403 struct dev_iommu *param = dev->iommu; 1404 const struct iommu_ops *ops = dev_iommu_ops(dev); 1405 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1406 1407 if (!ops->page_response) 1408 return -ENODEV; 1409 1410 if (!param || !param->fault_param) 1411 return -EINVAL; 1412 1413 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1414 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1415 return -EINVAL; 1416 1417 /* Only send response if there is a fault report pending */ 1418 mutex_lock(¶m->fault_param->lock); 1419 if (list_empty(¶m->fault_param->faults)) { 1420 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1421 goto done_unlock; 1422 } 1423 /* 1424 * Check if we have a matching page request pending to respond, 1425 * otherwise return -EINVAL 1426 */ 1427 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1428 prm = &evt->fault.prm; 1429 if (prm->grpid != msg->grpid) 1430 continue; 1431 1432 /* 1433 * If the PASID is required, the corresponding request is 1434 * matched using the group ID, the PASID valid bit and the PASID 1435 * value. Otherwise only the group ID matches request and 1436 * response. 1437 */ 1438 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1439 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1440 continue; 1441 1442 if (!needs_pasid && has_pasid) { 1443 /* No big deal, just clear it. */ 1444 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1445 msg->pasid = 0; 1446 } 1447 1448 ret = ops->page_response(dev, evt, msg); 1449 list_del(&evt->list); 1450 kfree(evt); 1451 break; 1452 } 1453 1454 done_unlock: 1455 mutex_unlock(¶m->fault_param->lock); 1456 return ret; 1457 } 1458 EXPORT_SYMBOL_GPL(iommu_page_response); 1459 1460 /** 1461 * iommu_group_id - Return ID for a group 1462 * @group: the group to ID 1463 * 1464 * Return the unique ID for the group matching the sysfs group number. 1465 */ 1466 int iommu_group_id(struct iommu_group *group) 1467 { 1468 return group->id; 1469 } 1470 EXPORT_SYMBOL_GPL(iommu_group_id); 1471 1472 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1473 unsigned long *devfns); 1474 1475 /* 1476 * To consider a PCI device isolated, we require ACS to support Source 1477 * Validation, Request Redirection, Completer Redirection, and Upstream 1478 * Forwarding. This effectively means that devices cannot spoof their 1479 * requester ID, requests and completions cannot be redirected, and all 1480 * transactions are forwarded upstream, even as it passes through a 1481 * bridge where the target device is downstream. 1482 */ 1483 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1484 1485 /* 1486 * For multifunction devices which are not isolated from each other, find 1487 * all the other non-isolated functions and look for existing groups. For 1488 * each function, we also need to look for aliases to or from other devices 1489 * that may already have a group. 1490 */ 1491 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1492 unsigned long *devfns) 1493 { 1494 struct pci_dev *tmp = NULL; 1495 struct iommu_group *group; 1496 1497 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1498 return NULL; 1499 1500 for_each_pci_dev(tmp) { 1501 if (tmp == pdev || tmp->bus != pdev->bus || 1502 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1503 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1504 continue; 1505 1506 group = get_pci_alias_group(tmp, devfns); 1507 if (group) { 1508 pci_dev_put(tmp); 1509 return group; 1510 } 1511 } 1512 1513 return NULL; 1514 } 1515 1516 /* 1517 * Look for aliases to or from the given device for existing groups. DMA 1518 * aliases are only supported on the same bus, therefore the search 1519 * space is quite small (especially since we're really only looking at pcie 1520 * device, and therefore only expect multiple slots on the root complex or 1521 * downstream switch ports). It's conceivable though that a pair of 1522 * multifunction devices could have aliases between them that would cause a 1523 * loop. To prevent this, we use a bitmap to track where we've been. 1524 */ 1525 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1526 unsigned long *devfns) 1527 { 1528 struct pci_dev *tmp = NULL; 1529 struct iommu_group *group; 1530 1531 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1532 return NULL; 1533 1534 group = iommu_group_get(&pdev->dev); 1535 if (group) 1536 return group; 1537 1538 for_each_pci_dev(tmp) { 1539 if (tmp == pdev || tmp->bus != pdev->bus) 1540 continue; 1541 1542 /* We alias them or they alias us */ 1543 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1544 group = get_pci_alias_group(tmp, devfns); 1545 if (group) { 1546 pci_dev_put(tmp); 1547 return group; 1548 } 1549 1550 group = get_pci_function_alias_group(tmp, devfns); 1551 if (group) { 1552 pci_dev_put(tmp); 1553 return group; 1554 } 1555 } 1556 } 1557 1558 return NULL; 1559 } 1560 1561 struct group_for_pci_data { 1562 struct pci_dev *pdev; 1563 struct iommu_group *group; 1564 }; 1565 1566 /* 1567 * DMA alias iterator callback, return the last seen device. Stop and return 1568 * the IOMMU group if we find one along the way. 1569 */ 1570 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1571 { 1572 struct group_for_pci_data *data = opaque; 1573 1574 data->pdev = pdev; 1575 data->group = iommu_group_get(&pdev->dev); 1576 1577 return data->group != NULL; 1578 } 1579 1580 /* 1581 * Generic device_group call-back function. It just allocates one 1582 * iommu-group per device. 1583 */ 1584 struct iommu_group *generic_device_group(struct device *dev) 1585 { 1586 return iommu_group_alloc(); 1587 } 1588 EXPORT_SYMBOL_GPL(generic_device_group); 1589 1590 /* 1591 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1592 * to find or create an IOMMU group for a device. 1593 */ 1594 struct iommu_group *pci_device_group(struct device *dev) 1595 { 1596 struct pci_dev *pdev = to_pci_dev(dev); 1597 struct group_for_pci_data data; 1598 struct pci_bus *bus; 1599 struct iommu_group *group = NULL; 1600 u64 devfns[4] = { 0 }; 1601 1602 if (WARN_ON(!dev_is_pci(dev))) 1603 return ERR_PTR(-EINVAL); 1604 1605 /* 1606 * Find the upstream DMA alias for the device. A device must not 1607 * be aliased due to topology in order to have its own IOMMU group. 1608 * If we find an alias along the way that already belongs to a 1609 * group, use it. 1610 */ 1611 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1612 return data.group; 1613 1614 pdev = data.pdev; 1615 1616 /* 1617 * Continue upstream from the point of minimum IOMMU granularity 1618 * due to aliases to the point where devices are protected from 1619 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1620 * group, use it. 1621 */ 1622 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1623 if (!bus->self) 1624 continue; 1625 1626 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1627 break; 1628 1629 pdev = bus->self; 1630 1631 group = iommu_group_get(&pdev->dev); 1632 if (group) 1633 return group; 1634 } 1635 1636 /* 1637 * Look for existing groups on device aliases. If we alias another 1638 * device or another device aliases us, use the same group. 1639 */ 1640 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1641 if (group) 1642 return group; 1643 1644 /* 1645 * Look for existing groups on non-isolated functions on the same 1646 * slot and aliases of those funcions, if any. No need to clear 1647 * the search bitmap, the tested devfns are still valid. 1648 */ 1649 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1650 if (group) 1651 return group; 1652 1653 /* No shared group found, allocate new */ 1654 return iommu_group_alloc(); 1655 } 1656 EXPORT_SYMBOL_GPL(pci_device_group); 1657 1658 /* Get the IOMMU group for device on fsl-mc bus */ 1659 struct iommu_group *fsl_mc_device_group(struct device *dev) 1660 { 1661 struct device *cont_dev = fsl_mc_cont_dev(dev); 1662 struct iommu_group *group; 1663 1664 group = iommu_group_get(cont_dev); 1665 if (!group) 1666 group = iommu_group_alloc(); 1667 return group; 1668 } 1669 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1670 1671 static int iommu_get_def_domain_type(struct device *dev) 1672 { 1673 const struct iommu_ops *ops = dev_iommu_ops(dev); 1674 1675 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 1676 return IOMMU_DOMAIN_DMA; 1677 1678 if (ops->def_domain_type) 1679 return ops->def_domain_type(dev); 1680 1681 return 0; 1682 } 1683 1684 static int iommu_group_alloc_default_domain(struct bus_type *bus, 1685 struct iommu_group *group, 1686 unsigned int type) 1687 { 1688 struct iommu_domain *dom; 1689 1690 dom = __iommu_domain_alloc(bus, type); 1691 if (!dom && type != IOMMU_DOMAIN_DMA) { 1692 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA); 1693 if (dom) 1694 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1695 type, group->name); 1696 } 1697 1698 if (!dom) 1699 return -ENOMEM; 1700 1701 group->default_domain = dom; 1702 if (!group->domain) 1703 group->domain = dom; 1704 return 0; 1705 } 1706 1707 static int iommu_alloc_default_domain(struct iommu_group *group, 1708 struct device *dev) 1709 { 1710 unsigned int type; 1711 1712 if (group->default_domain) 1713 return 0; 1714 1715 type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type; 1716 1717 return iommu_group_alloc_default_domain(dev->bus, group, type); 1718 } 1719 1720 /** 1721 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1722 * @dev: target device 1723 * 1724 * This function is intended to be called by IOMMU drivers and extended to 1725 * support common, bus-defined algorithms when determining or creating the 1726 * IOMMU group for a device. On success, the caller will hold a reference 1727 * to the returned IOMMU group, which will already include the provided 1728 * device. The reference should be released with iommu_group_put(). 1729 */ 1730 static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1731 { 1732 const struct iommu_ops *ops = dev_iommu_ops(dev); 1733 struct iommu_group *group; 1734 int ret; 1735 1736 group = iommu_group_get(dev); 1737 if (group) 1738 return group; 1739 1740 group = ops->device_group(dev); 1741 if (WARN_ON_ONCE(group == NULL)) 1742 return ERR_PTR(-EINVAL); 1743 1744 if (IS_ERR(group)) 1745 return group; 1746 1747 ret = iommu_group_add_device(group, dev); 1748 if (ret) 1749 goto out_put_group; 1750 1751 return group; 1752 1753 out_put_group: 1754 iommu_group_put(group); 1755 1756 return ERR_PTR(ret); 1757 } 1758 1759 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1760 { 1761 return group->default_domain; 1762 } 1763 1764 static int probe_iommu_group(struct device *dev, void *data) 1765 { 1766 struct list_head *group_list = data; 1767 struct iommu_group *group; 1768 int ret; 1769 1770 /* Device is probed already if in a group */ 1771 group = iommu_group_get(dev); 1772 if (group) { 1773 iommu_group_put(group); 1774 return 0; 1775 } 1776 1777 ret = __iommu_probe_device(dev, group_list); 1778 if (ret == -ENODEV) 1779 ret = 0; 1780 1781 return ret; 1782 } 1783 1784 static int iommu_bus_notifier(struct notifier_block *nb, 1785 unsigned long action, void *data) 1786 { 1787 struct device *dev = data; 1788 1789 if (action == BUS_NOTIFY_ADD_DEVICE) { 1790 int ret; 1791 1792 ret = iommu_probe_device(dev); 1793 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1794 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1795 iommu_release_device(dev); 1796 return NOTIFY_OK; 1797 } 1798 1799 return 0; 1800 } 1801 1802 struct __group_domain_type { 1803 struct device *dev; 1804 unsigned int type; 1805 }; 1806 1807 static int probe_get_default_domain_type(struct device *dev, void *data) 1808 { 1809 struct __group_domain_type *gtype = data; 1810 unsigned int type = iommu_get_def_domain_type(dev); 1811 1812 if (type) { 1813 if (gtype->type && gtype->type != type) { 1814 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", 1815 iommu_domain_type_str(type), 1816 dev_name(gtype->dev), 1817 iommu_domain_type_str(gtype->type)); 1818 gtype->type = 0; 1819 } 1820 1821 if (!gtype->dev) { 1822 gtype->dev = dev; 1823 gtype->type = type; 1824 } 1825 } 1826 1827 return 0; 1828 } 1829 1830 static void probe_alloc_default_domain(struct bus_type *bus, 1831 struct iommu_group *group) 1832 { 1833 struct __group_domain_type gtype; 1834 1835 memset(>ype, 0, sizeof(gtype)); 1836 1837 /* Ask for default domain requirements of all devices in the group */ 1838 __iommu_group_for_each_dev(group, >ype, 1839 probe_get_default_domain_type); 1840 1841 if (!gtype.type) 1842 gtype.type = iommu_def_domain_type; 1843 1844 iommu_group_alloc_default_domain(bus, group, gtype.type); 1845 1846 } 1847 1848 static int __iommu_group_dma_first_attach(struct iommu_group *group) 1849 { 1850 return __iommu_group_for_each_dev(group, group->default_domain, 1851 iommu_group_do_dma_first_attach); 1852 } 1853 1854 static int iommu_group_do_probe_finalize(struct device *dev, void *data) 1855 { 1856 const struct iommu_ops *ops = dev_iommu_ops(dev); 1857 1858 if (ops->probe_finalize) 1859 ops->probe_finalize(dev); 1860 1861 return 0; 1862 } 1863 1864 static void __iommu_group_dma_finalize(struct iommu_group *group) 1865 { 1866 __iommu_group_for_each_dev(group, group->default_domain, 1867 iommu_group_do_probe_finalize); 1868 } 1869 1870 static int iommu_do_create_direct_mappings(struct device *dev, void *data) 1871 { 1872 struct iommu_group *group = data; 1873 1874 iommu_create_device_direct_mappings(group, dev); 1875 1876 return 0; 1877 } 1878 1879 static int iommu_group_create_direct_mappings(struct iommu_group *group) 1880 { 1881 return __iommu_group_for_each_dev(group, group, 1882 iommu_do_create_direct_mappings); 1883 } 1884 1885 int bus_iommu_probe(struct bus_type *bus) 1886 { 1887 struct iommu_group *group, *next; 1888 LIST_HEAD(group_list); 1889 int ret; 1890 1891 /* 1892 * This code-path does not allocate the default domain when 1893 * creating the iommu group, so do it after the groups are 1894 * created. 1895 */ 1896 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1897 if (ret) 1898 return ret; 1899 1900 list_for_each_entry_safe(group, next, &group_list, entry) { 1901 mutex_lock(&group->mutex); 1902 1903 /* Remove item from the list */ 1904 list_del_init(&group->entry); 1905 1906 /* Try to allocate default domain */ 1907 probe_alloc_default_domain(bus, group); 1908 1909 if (!group->default_domain) { 1910 mutex_unlock(&group->mutex); 1911 continue; 1912 } 1913 1914 iommu_group_create_direct_mappings(group); 1915 1916 ret = __iommu_group_dma_first_attach(group); 1917 1918 mutex_unlock(&group->mutex); 1919 1920 if (ret) 1921 break; 1922 1923 __iommu_group_dma_finalize(group); 1924 } 1925 1926 return ret; 1927 } 1928 1929 bool iommu_present(struct bus_type *bus) 1930 { 1931 return bus->iommu_ops != NULL; 1932 } 1933 EXPORT_SYMBOL_GPL(iommu_present); 1934 1935 /** 1936 * device_iommu_capable() - check for a general IOMMU capability 1937 * @dev: device to which the capability would be relevant, if available 1938 * @cap: IOMMU capability 1939 * 1940 * Return: true if an IOMMU is present and supports the given capability 1941 * for the given device, otherwise false. 1942 */ 1943 bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 1944 { 1945 const struct iommu_ops *ops; 1946 1947 if (!dev->iommu || !dev->iommu->iommu_dev) 1948 return false; 1949 1950 ops = dev_iommu_ops(dev); 1951 if (!ops->capable) 1952 return false; 1953 1954 return ops->capable(dev, cap); 1955 } 1956 EXPORT_SYMBOL_GPL(device_iommu_capable); 1957 1958 /** 1959 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi() 1960 * for a group 1961 * @group: Group to query 1962 * 1963 * IOMMU groups should not have differing values of 1964 * msi_device_has_isolated_msi() for devices in a group. However nothing 1965 * directly prevents this, so ensure mistakes don't result in isolation failures 1966 * by checking that all the devices are the same. 1967 */ 1968 bool iommu_group_has_isolated_msi(struct iommu_group *group) 1969 { 1970 struct group_device *group_dev; 1971 bool ret = true; 1972 1973 mutex_lock(&group->mutex); 1974 list_for_each_entry(group_dev, &group->devices, list) 1975 ret &= msi_device_has_isolated_msi(group_dev->dev); 1976 mutex_unlock(&group->mutex); 1977 return ret; 1978 } 1979 EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi); 1980 1981 /** 1982 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1983 * @domain: iommu domain 1984 * @handler: fault handler 1985 * @token: user data, will be passed back to the fault handler 1986 * 1987 * This function should be used by IOMMU users which want to be notified 1988 * whenever an IOMMU fault happens. 1989 * 1990 * The fault handler itself should return 0 on success, and an appropriate 1991 * error code otherwise. 1992 */ 1993 void iommu_set_fault_handler(struct iommu_domain *domain, 1994 iommu_fault_handler_t handler, 1995 void *token) 1996 { 1997 BUG_ON(!domain); 1998 1999 domain->handler = handler; 2000 domain->handler_token = token; 2001 } 2002 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 2003 2004 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 2005 unsigned type) 2006 { 2007 struct iommu_domain *domain; 2008 2009 if (bus == NULL || bus->iommu_ops == NULL) 2010 return NULL; 2011 2012 domain = bus->iommu_ops->domain_alloc(type); 2013 if (!domain) 2014 return NULL; 2015 2016 domain->type = type; 2017 /* Assume all sizes by default; the driver may override this later */ 2018 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 2019 if (!domain->ops) 2020 domain->ops = bus->iommu_ops->default_domain_ops; 2021 2022 if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { 2023 iommu_domain_free(domain); 2024 domain = NULL; 2025 } 2026 return domain; 2027 } 2028 2029 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 2030 { 2031 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 2032 } 2033 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 2034 2035 void iommu_domain_free(struct iommu_domain *domain) 2036 { 2037 if (domain->type == IOMMU_DOMAIN_SVA) 2038 mmdrop(domain->mm); 2039 iommu_put_dma_cookie(domain); 2040 domain->ops->free(domain); 2041 } 2042 EXPORT_SYMBOL_GPL(iommu_domain_free); 2043 2044 /* 2045 * Put the group's domain back to the appropriate core-owned domain - either the 2046 * standard kernel-mode DMA configuration or an all-DMA-blocked domain. 2047 */ 2048 static void __iommu_group_set_core_domain(struct iommu_group *group) 2049 { 2050 struct iommu_domain *new_domain; 2051 int ret; 2052 2053 if (group->owner) 2054 new_domain = group->blocking_domain; 2055 else 2056 new_domain = group->default_domain; 2057 2058 ret = __iommu_group_set_domain(group, new_domain); 2059 WARN(ret, "iommu driver failed to attach the default/blocking domain"); 2060 } 2061 2062 static int __iommu_attach_device(struct iommu_domain *domain, 2063 struct device *dev) 2064 { 2065 int ret; 2066 2067 if (unlikely(domain->ops->attach_dev == NULL)) 2068 return -ENODEV; 2069 2070 ret = domain->ops->attach_dev(domain, dev); 2071 if (ret) 2072 return ret; 2073 dev->iommu->attach_deferred = 0; 2074 trace_attach_device_to_domain(dev); 2075 return 0; 2076 } 2077 2078 /** 2079 * iommu_attach_device - Attach an IOMMU domain to a device 2080 * @domain: IOMMU domain to attach 2081 * @dev: Device that will be attached 2082 * 2083 * Returns 0 on success and error code on failure 2084 * 2085 * Note that EINVAL can be treated as a soft failure, indicating 2086 * that certain configuration of the domain is incompatible with 2087 * the device. In this case attaching a different domain to the 2088 * device may succeed. 2089 */ 2090 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 2091 { 2092 struct iommu_group *group; 2093 int ret; 2094 2095 group = iommu_group_get(dev); 2096 if (!group) 2097 return -ENODEV; 2098 2099 /* 2100 * Lock the group to make sure the device-count doesn't 2101 * change while we are attaching 2102 */ 2103 mutex_lock(&group->mutex); 2104 ret = -EINVAL; 2105 if (iommu_group_device_count(group) != 1) 2106 goto out_unlock; 2107 2108 ret = __iommu_attach_group(domain, group); 2109 2110 out_unlock: 2111 mutex_unlock(&group->mutex); 2112 iommu_group_put(group); 2113 2114 return ret; 2115 } 2116 EXPORT_SYMBOL_GPL(iommu_attach_device); 2117 2118 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2119 { 2120 if (dev->iommu && dev->iommu->attach_deferred) 2121 return __iommu_attach_device(domain, dev); 2122 2123 return 0; 2124 } 2125 2126 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2127 { 2128 struct iommu_group *group; 2129 2130 group = iommu_group_get(dev); 2131 if (!group) 2132 return; 2133 2134 mutex_lock(&group->mutex); 2135 if (WARN_ON(domain != group->domain) || 2136 WARN_ON(iommu_group_device_count(group) != 1)) 2137 goto out_unlock; 2138 __iommu_group_set_core_domain(group); 2139 2140 out_unlock: 2141 mutex_unlock(&group->mutex); 2142 iommu_group_put(group); 2143 } 2144 EXPORT_SYMBOL_GPL(iommu_detach_device); 2145 2146 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2147 { 2148 struct iommu_domain *domain; 2149 struct iommu_group *group; 2150 2151 group = iommu_group_get(dev); 2152 if (!group) 2153 return NULL; 2154 2155 domain = group->domain; 2156 2157 iommu_group_put(group); 2158 2159 return domain; 2160 } 2161 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2162 2163 /* 2164 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2165 * guarantees that the group and its default domain are valid and correct. 2166 */ 2167 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2168 { 2169 return dev->iommu_group->default_domain; 2170 } 2171 2172 /* 2173 * IOMMU groups are really the natural working unit of the IOMMU, but 2174 * the IOMMU API works on domains and devices. Bridge that gap by 2175 * iterating over the devices in a group. Ideally we'd have a single 2176 * device which represents the requestor ID of the group, but we also 2177 * allow IOMMU drivers to create policy defined minimum sets, where 2178 * the physical hardware may be able to distiguish members, but we 2179 * wish to group them at a higher level (ex. untrusted multi-function 2180 * PCI devices). Thus we attach each device. 2181 */ 2182 static int iommu_group_do_attach_device(struct device *dev, void *data) 2183 { 2184 struct iommu_domain *domain = data; 2185 2186 return __iommu_attach_device(domain, dev); 2187 } 2188 2189 static int __iommu_attach_group(struct iommu_domain *domain, 2190 struct iommu_group *group) 2191 { 2192 int ret; 2193 2194 if (group->domain && group->domain != group->default_domain && 2195 group->domain != group->blocking_domain) 2196 return -EBUSY; 2197 2198 ret = __iommu_group_for_each_dev(group, domain, 2199 iommu_group_do_attach_device); 2200 if (ret == 0) { 2201 group->domain = domain; 2202 } else { 2203 /* 2204 * To recover from the case when certain device within the 2205 * group fails to attach to the new domain, we need force 2206 * attaching all devices back to the old domain. The old 2207 * domain is compatible for all devices in the group, 2208 * hence the iommu driver should always return success. 2209 */ 2210 struct iommu_domain *old_domain = group->domain; 2211 2212 group->domain = NULL; 2213 WARN(__iommu_group_set_domain(group, old_domain), 2214 "iommu driver failed to attach a compatible domain"); 2215 } 2216 2217 return ret; 2218 } 2219 2220 /** 2221 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group 2222 * @domain: IOMMU domain to attach 2223 * @group: IOMMU group that will be attached 2224 * 2225 * Returns 0 on success and error code on failure 2226 * 2227 * Note that EINVAL can be treated as a soft failure, indicating 2228 * that certain configuration of the domain is incompatible with 2229 * the group. In this case attaching a different domain to the 2230 * group may succeed. 2231 */ 2232 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2233 { 2234 int ret; 2235 2236 mutex_lock(&group->mutex); 2237 ret = __iommu_attach_group(domain, group); 2238 mutex_unlock(&group->mutex); 2239 2240 return ret; 2241 } 2242 EXPORT_SYMBOL_GPL(iommu_attach_group); 2243 2244 static int iommu_group_do_set_platform_dma(struct device *dev, void *data) 2245 { 2246 const struct iommu_ops *ops = dev_iommu_ops(dev); 2247 2248 if (!WARN_ON(!ops->set_platform_dma_ops)) 2249 ops->set_platform_dma_ops(dev); 2250 2251 return 0; 2252 } 2253 2254 static int __iommu_group_set_domain(struct iommu_group *group, 2255 struct iommu_domain *new_domain) 2256 { 2257 int ret; 2258 2259 if (group->domain == new_domain) 2260 return 0; 2261 2262 /* 2263 * New drivers should support default domains, so set_platform_dma() 2264 * op will never be called. Otherwise the NULL domain represents some 2265 * platform specific behavior. 2266 */ 2267 if (!new_domain) { 2268 __iommu_group_for_each_dev(group, NULL, 2269 iommu_group_do_set_platform_dma); 2270 group->domain = NULL; 2271 return 0; 2272 } 2273 2274 /* 2275 * Changing the domain is done by calling attach_dev() on the new 2276 * domain. This switch does not have to be atomic and DMA can be 2277 * discarded during the transition. DMA must only be able to access 2278 * either new_domain or group->domain, never something else. 2279 * 2280 * Note that this is called in error unwind paths, attaching to a 2281 * domain that has already been attached cannot fail. 2282 */ 2283 ret = __iommu_group_for_each_dev(group, new_domain, 2284 iommu_group_do_attach_device); 2285 if (ret) 2286 return ret; 2287 group->domain = new_domain; 2288 return 0; 2289 } 2290 2291 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2292 { 2293 mutex_lock(&group->mutex); 2294 __iommu_group_set_core_domain(group); 2295 mutex_unlock(&group->mutex); 2296 } 2297 EXPORT_SYMBOL_GPL(iommu_detach_group); 2298 2299 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2300 { 2301 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2302 return iova; 2303 2304 if (domain->type == IOMMU_DOMAIN_BLOCKED) 2305 return 0; 2306 2307 return domain->ops->iova_to_phys(domain, iova); 2308 } 2309 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2310 2311 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, 2312 phys_addr_t paddr, size_t size, size_t *count) 2313 { 2314 unsigned int pgsize_idx, pgsize_idx_next; 2315 unsigned long pgsizes; 2316 size_t offset, pgsize, pgsize_next; 2317 unsigned long addr_merge = paddr | iova; 2318 2319 /* Page sizes supported by the hardware and small enough for @size */ 2320 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); 2321 2322 /* Constrain the page sizes further based on the maximum alignment */ 2323 if (likely(addr_merge)) 2324 pgsizes &= GENMASK(__ffs(addr_merge), 0); 2325 2326 /* Make sure we have at least one suitable page size */ 2327 BUG_ON(!pgsizes); 2328 2329 /* Pick the biggest page size remaining */ 2330 pgsize_idx = __fls(pgsizes); 2331 pgsize = BIT(pgsize_idx); 2332 if (!count) 2333 return pgsize; 2334 2335 /* Find the next biggest support page size, if it exists */ 2336 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); 2337 if (!pgsizes) 2338 goto out_set_count; 2339 2340 pgsize_idx_next = __ffs(pgsizes); 2341 pgsize_next = BIT(pgsize_idx_next); 2342 2343 /* 2344 * There's no point trying a bigger page size unless the virtual 2345 * and physical addresses are similarly offset within the larger page. 2346 */ 2347 if ((iova ^ paddr) & (pgsize_next - 1)) 2348 goto out_set_count; 2349 2350 /* Calculate the offset to the next page size alignment boundary */ 2351 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); 2352 2353 /* 2354 * If size is big enough to accommodate the larger page, reduce 2355 * the number of smaller pages. 2356 */ 2357 if (offset + pgsize_next <= size) 2358 size = offset; 2359 2360 out_set_count: 2361 *count = size >> pgsize_idx; 2362 return pgsize; 2363 } 2364 2365 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, 2366 phys_addr_t paddr, size_t size, int prot, 2367 gfp_t gfp, size_t *mapped) 2368 { 2369 const struct iommu_domain_ops *ops = domain->ops; 2370 size_t pgsize, count; 2371 int ret; 2372 2373 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); 2374 2375 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", 2376 iova, &paddr, pgsize, count); 2377 2378 if (ops->map_pages) { 2379 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, 2380 gfp, mapped); 2381 } else { 2382 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2383 *mapped = ret ? 0 : pgsize; 2384 } 2385 2386 return ret; 2387 } 2388 2389 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2390 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2391 { 2392 const struct iommu_domain_ops *ops = domain->ops; 2393 unsigned long orig_iova = iova; 2394 unsigned int min_pagesz; 2395 size_t orig_size = size; 2396 phys_addr_t orig_paddr = paddr; 2397 int ret = 0; 2398 2399 if (unlikely(!(ops->map || ops->map_pages) || 2400 domain->pgsize_bitmap == 0UL)) 2401 return -ENODEV; 2402 2403 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2404 return -EINVAL; 2405 2406 /* find out the minimum page size supported */ 2407 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2408 2409 /* 2410 * both the virtual address and the physical one, as well as 2411 * the size of the mapping, must be aligned (at least) to the 2412 * size of the smallest page supported by the hardware 2413 */ 2414 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2415 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2416 iova, &paddr, size, min_pagesz); 2417 return -EINVAL; 2418 } 2419 2420 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2421 2422 while (size) { 2423 size_t mapped = 0; 2424 2425 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, 2426 &mapped); 2427 /* 2428 * Some pages may have been mapped, even if an error occurred, 2429 * so we should account for those so they can be unmapped. 2430 */ 2431 size -= mapped; 2432 2433 if (ret) 2434 break; 2435 2436 iova += mapped; 2437 paddr += mapped; 2438 } 2439 2440 /* unroll mapping in case something went wrong */ 2441 if (ret) 2442 iommu_unmap(domain, orig_iova, orig_size - size); 2443 else 2444 trace_map(orig_iova, orig_paddr, orig_size); 2445 2446 return ret; 2447 } 2448 2449 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2450 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2451 { 2452 const struct iommu_domain_ops *ops = domain->ops; 2453 int ret; 2454 2455 might_sleep_if(gfpflags_allow_blocking(gfp)); 2456 2457 /* Discourage passing strange GFP flags */ 2458 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2459 __GFP_HIGHMEM))) 2460 return -EINVAL; 2461 2462 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2463 if (ret == 0 && ops->iotlb_sync_map) 2464 ops->iotlb_sync_map(domain, iova, size); 2465 2466 return ret; 2467 } 2468 EXPORT_SYMBOL_GPL(iommu_map); 2469 2470 static size_t __iommu_unmap_pages(struct iommu_domain *domain, 2471 unsigned long iova, size_t size, 2472 struct iommu_iotlb_gather *iotlb_gather) 2473 { 2474 const struct iommu_domain_ops *ops = domain->ops; 2475 size_t pgsize, count; 2476 2477 pgsize = iommu_pgsize(domain, iova, iova, size, &count); 2478 return ops->unmap_pages ? 2479 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : 2480 ops->unmap(domain, iova, pgsize, iotlb_gather); 2481 } 2482 2483 static size_t __iommu_unmap(struct iommu_domain *domain, 2484 unsigned long iova, size_t size, 2485 struct iommu_iotlb_gather *iotlb_gather) 2486 { 2487 const struct iommu_domain_ops *ops = domain->ops; 2488 size_t unmapped_page, unmapped = 0; 2489 unsigned long orig_iova = iova; 2490 unsigned int min_pagesz; 2491 2492 if (unlikely(!(ops->unmap || ops->unmap_pages) || 2493 domain->pgsize_bitmap == 0UL)) 2494 return 0; 2495 2496 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2497 return 0; 2498 2499 /* find out the minimum page size supported */ 2500 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2501 2502 /* 2503 * The virtual address, as well as the size of the mapping, must be 2504 * aligned (at least) to the size of the smallest page supported 2505 * by the hardware 2506 */ 2507 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2508 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2509 iova, size, min_pagesz); 2510 return 0; 2511 } 2512 2513 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2514 2515 /* 2516 * Keep iterating until we either unmap 'size' bytes (or more) 2517 * or we hit an area that isn't mapped. 2518 */ 2519 while (unmapped < size) { 2520 unmapped_page = __iommu_unmap_pages(domain, iova, 2521 size - unmapped, 2522 iotlb_gather); 2523 if (!unmapped_page) 2524 break; 2525 2526 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2527 iova, unmapped_page); 2528 2529 iova += unmapped_page; 2530 unmapped += unmapped_page; 2531 } 2532 2533 trace_unmap(orig_iova, size, unmapped); 2534 return unmapped; 2535 } 2536 2537 size_t iommu_unmap(struct iommu_domain *domain, 2538 unsigned long iova, size_t size) 2539 { 2540 struct iommu_iotlb_gather iotlb_gather; 2541 size_t ret; 2542 2543 iommu_iotlb_gather_init(&iotlb_gather); 2544 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2545 iommu_iotlb_sync(domain, &iotlb_gather); 2546 2547 return ret; 2548 } 2549 EXPORT_SYMBOL_GPL(iommu_unmap); 2550 2551 size_t iommu_unmap_fast(struct iommu_domain *domain, 2552 unsigned long iova, size_t size, 2553 struct iommu_iotlb_gather *iotlb_gather) 2554 { 2555 return __iommu_unmap(domain, iova, size, iotlb_gather); 2556 } 2557 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2558 2559 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2560 struct scatterlist *sg, unsigned int nents, int prot, 2561 gfp_t gfp) 2562 { 2563 const struct iommu_domain_ops *ops = domain->ops; 2564 size_t len = 0, mapped = 0; 2565 phys_addr_t start; 2566 unsigned int i = 0; 2567 int ret; 2568 2569 might_sleep_if(gfpflags_allow_blocking(gfp)); 2570 2571 /* Discourage passing strange GFP flags */ 2572 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2573 __GFP_HIGHMEM))) 2574 return -EINVAL; 2575 2576 while (i <= nents) { 2577 phys_addr_t s_phys = sg_phys(sg); 2578 2579 if (len && s_phys != start + len) { 2580 ret = __iommu_map(domain, iova + mapped, start, 2581 len, prot, gfp); 2582 2583 if (ret) 2584 goto out_err; 2585 2586 mapped += len; 2587 len = 0; 2588 } 2589 2590 if (sg_is_dma_bus_address(sg)) 2591 goto next; 2592 2593 if (len) { 2594 len += sg->length; 2595 } else { 2596 len = sg->length; 2597 start = s_phys; 2598 } 2599 2600 next: 2601 if (++i < nents) 2602 sg = sg_next(sg); 2603 } 2604 2605 if (ops->iotlb_sync_map) 2606 ops->iotlb_sync_map(domain, iova, mapped); 2607 return mapped; 2608 2609 out_err: 2610 /* undo mappings already done */ 2611 iommu_unmap(domain, iova, mapped); 2612 2613 return ret; 2614 } 2615 EXPORT_SYMBOL_GPL(iommu_map_sg); 2616 2617 /** 2618 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2619 * @domain: the iommu domain where the fault has happened 2620 * @dev: the device where the fault has happened 2621 * @iova: the faulting address 2622 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2623 * 2624 * This function should be called by the low-level IOMMU implementations 2625 * whenever IOMMU faults happen, to allow high-level users, that are 2626 * interested in such events, to know about them. 2627 * 2628 * This event may be useful for several possible use cases: 2629 * - mere logging of the event 2630 * - dynamic TLB/PTE loading 2631 * - if restarting of the faulting device is required 2632 * 2633 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2634 * PTE/TLB loading will one day be supported, implementations will be able 2635 * to tell whether it succeeded or not according to this return value). 2636 * 2637 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2638 * (though fault handlers can also return -ENOSYS, in case they want to 2639 * elicit the default behavior of the IOMMU drivers). 2640 */ 2641 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2642 unsigned long iova, int flags) 2643 { 2644 int ret = -ENOSYS; 2645 2646 /* 2647 * if upper layers showed interest and installed a fault handler, 2648 * invoke it. 2649 */ 2650 if (domain->handler) 2651 ret = domain->handler(domain, dev, iova, flags, 2652 domain->handler_token); 2653 2654 trace_io_page_fault(dev, iova, flags); 2655 return ret; 2656 } 2657 EXPORT_SYMBOL_GPL(report_iommu_fault); 2658 2659 static int __init iommu_init(void) 2660 { 2661 iommu_group_kset = kset_create_and_add("iommu_groups", 2662 NULL, kernel_kobj); 2663 BUG_ON(!iommu_group_kset); 2664 2665 iommu_debugfs_setup(); 2666 2667 return 0; 2668 } 2669 core_initcall(iommu_init); 2670 2671 int iommu_enable_nesting(struct iommu_domain *domain) 2672 { 2673 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2674 return -EINVAL; 2675 if (!domain->ops->enable_nesting) 2676 return -EINVAL; 2677 return domain->ops->enable_nesting(domain); 2678 } 2679 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2680 2681 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2682 unsigned long quirk) 2683 { 2684 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2685 return -EINVAL; 2686 if (!domain->ops->set_pgtable_quirks) 2687 return -EINVAL; 2688 return domain->ops->set_pgtable_quirks(domain, quirk); 2689 } 2690 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2691 2692 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2693 { 2694 const struct iommu_ops *ops = dev_iommu_ops(dev); 2695 2696 if (ops->get_resv_regions) 2697 ops->get_resv_regions(dev, list); 2698 } 2699 2700 /** 2701 * iommu_put_resv_regions - release resered regions 2702 * @dev: device for which to free reserved regions 2703 * @list: reserved region list for device 2704 * 2705 * This releases a reserved region list acquired by iommu_get_resv_regions(). 2706 */ 2707 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2708 { 2709 struct iommu_resv_region *entry, *next; 2710 2711 list_for_each_entry_safe(entry, next, list, list) { 2712 if (entry->free) 2713 entry->free(dev, entry); 2714 else 2715 kfree(entry); 2716 } 2717 } 2718 EXPORT_SYMBOL(iommu_put_resv_regions); 2719 2720 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2721 size_t length, int prot, 2722 enum iommu_resv_type type, 2723 gfp_t gfp) 2724 { 2725 struct iommu_resv_region *region; 2726 2727 region = kzalloc(sizeof(*region), gfp); 2728 if (!region) 2729 return NULL; 2730 2731 INIT_LIST_HEAD(®ion->list); 2732 region->start = start; 2733 region->length = length; 2734 region->prot = prot; 2735 region->type = type; 2736 return region; 2737 } 2738 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2739 2740 void iommu_set_default_passthrough(bool cmd_line) 2741 { 2742 if (cmd_line) 2743 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2744 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2745 } 2746 2747 void iommu_set_default_translated(bool cmd_line) 2748 { 2749 if (cmd_line) 2750 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2751 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2752 } 2753 2754 bool iommu_default_passthrough(void) 2755 { 2756 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2757 } 2758 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2759 2760 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2761 { 2762 const struct iommu_ops *ops = NULL; 2763 struct iommu_device *iommu; 2764 2765 spin_lock(&iommu_device_lock); 2766 list_for_each_entry(iommu, &iommu_device_list, list) 2767 if (iommu->fwnode == fwnode) { 2768 ops = iommu->ops; 2769 break; 2770 } 2771 spin_unlock(&iommu_device_lock); 2772 return ops; 2773 } 2774 2775 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2776 const struct iommu_ops *ops) 2777 { 2778 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2779 2780 if (fwspec) 2781 return ops == fwspec->ops ? 0 : -EINVAL; 2782 2783 if (!dev_iommu_get(dev)) 2784 return -ENOMEM; 2785 2786 /* Preallocate for the overwhelmingly common case of 1 ID */ 2787 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2788 if (!fwspec) 2789 return -ENOMEM; 2790 2791 of_node_get(to_of_node(iommu_fwnode)); 2792 fwspec->iommu_fwnode = iommu_fwnode; 2793 fwspec->ops = ops; 2794 dev_iommu_fwspec_set(dev, fwspec); 2795 return 0; 2796 } 2797 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2798 2799 void iommu_fwspec_free(struct device *dev) 2800 { 2801 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2802 2803 if (fwspec) { 2804 fwnode_handle_put(fwspec->iommu_fwnode); 2805 kfree(fwspec); 2806 dev_iommu_fwspec_set(dev, NULL); 2807 } 2808 } 2809 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2810 2811 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2812 { 2813 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2814 int i, new_num; 2815 2816 if (!fwspec) 2817 return -EINVAL; 2818 2819 new_num = fwspec->num_ids + num_ids; 2820 if (new_num > 1) { 2821 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2822 GFP_KERNEL); 2823 if (!fwspec) 2824 return -ENOMEM; 2825 2826 dev_iommu_fwspec_set(dev, fwspec); 2827 } 2828 2829 for (i = 0; i < num_ids; i++) 2830 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2831 2832 fwspec->num_ids = new_num; 2833 return 0; 2834 } 2835 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2836 2837 /* 2838 * Per device IOMMU features. 2839 */ 2840 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2841 { 2842 if (dev->iommu && dev->iommu->iommu_dev) { 2843 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2844 2845 if (ops->dev_enable_feat) 2846 return ops->dev_enable_feat(dev, feat); 2847 } 2848 2849 return -ENODEV; 2850 } 2851 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2852 2853 /* 2854 * The device drivers should do the necessary cleanups before calling this. 2855 */ 2856 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2857 { 2858 if (dev->iommu && dev->iommu->iommu_dev) { 2859 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2860 2861 if (ops->dev_disable_feat) 2862 return ops->dev_disable_feat(dev, feat); 2863 } 2864 2865 return -EBUSY; 2866 } 2867 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2868 2869 /* 2870 * Changes the default domain of an iommu group that has *only* one device 2871 * 2872 * @group: The group for which the default domain should be changed 2873 * @prev_dev: The device in the group (this is used to make sure that the device 2874 * hasn't changed after the caller has called this function) 2875 * @type: The type of the new default domain that gets associated with the group 2876 * 2877 * Returns 0 on success and error code on failure 2878 * 2879 * Note: 2880 * 1. Presently, this function is called only when user requests to change the 2881 * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type 2882 * Please take a closer look if intended to use for other purposes. 2883 */ 2884 static int iommu_change_dev_def_domain(struct iommu_group *group, 2885 struct device *prev_dev, int type) 2886 { 2887 struct iommu_domain *prev_dom; 2888 struct group_device *grp_dev; 2889 int ret, dev_def_dom; 2890 struct device *dev; 2891 2892 lockdep_assert_held(&group->mutex); 2893 2894 if (group->default_domain != group->domain) { 2895 dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n"); 2896 ret = -EBUSY; 2897 goto out; 2898 } 2899 2900 /* 2901 * iommu group wasn't locked while acquiring device lock in 2902 * iommu_group_store_type(). So, make sure that the device count hasn't 2903 * changed while acquiring device lock. 2904 * 2905 * Changing default domain of an iommu group with two or more devices 2906 * isn't supported because there could be a potential deadlock. Consider 2907 * the following scenario. T1 is trying to acquire device locks of all 2908 * the devices in the group and before it could acquire all of them, 2909 * there could be another thread T2 (from different sub-system and use 2910 * case) that has already acquired some of the device locks and might be 2911 * waiting for T1 to release other device locks. 2912 */ 2913 if (iommu_group_device_count(group) != 1) { 2914 dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n"); 2915 ret = -EINVAL; 2916 goto out; 2917 } 2918 2919 /* Since group has only one device */ 2920 grp_dev = list_first_entry(&group->devices, struct group_device, list); 2921 dev = grp_dev->dev; 2922 2923 if (prev_dev != dev) { 2924 dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n"); 2925 ret = -EBUSY; 2926 goto out; 2927 } 2928 2929 prev_dom = group->default_domain; 2930 if (!prev_dom) { 2931 ret = -EINVAL; 2932 goto out; 2933 } 2934 2935 dev_def_dom = iommu_get_def_domain_type(dev); 2936 if (!type) { 2937 /* 2938 * If the user hasn't requested any specific type of domain and 2939 * if the device supports both the domains, then default to the 2940 * domain the device was booted with 2941 */ 2942 type = dev_def_dom ? : iommu_def_domain_type; 2943 } else if (dev_def_dom && type != dev_def_dom) { 2944 dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n", 2945 iommu_domain_type_str(type)); 2946 ret = -EINVAL; 2947 goto out; 2948 } 2949 2950 /* 2951 * Switch to a new domain only if the requested domain type is different 2952 * from the existing default domain type 2953 */ 2954 if (prev_dom->type == type) { 2955 ret = 0; 2956 goto out; 2957 } 2958 2959 /* We can bring up a flush queue without tearing down the domain */ 2960 if (type == IOMMU_DOMAIN_DMA_FQ && prev_dom->type == IOMMU_DOMAIN_DMA) { 2961 ret = iommu_dma_init_fq(prev_dom); 2962 if (!ret) 2963 prev_dom->type = IOMMU_DOMAIN_DMA_FQ; 2964 goto out; 2965 } 2966 2967 /* Sets group->default_domain to the newly allocated domain */ 2968 ret = iommu_group_alloc_default_domain(dev->bus, group, type); 2969 if (ret) 2970 goto out; 2971 2972 ret = iommu_create_device_direct_mappings(group, dev); 2973 if (ret) 2974 goto free_new_domain; 2975 2976 ret = __iommu_attach_device(group->default_domain, dev); 2977 if (ret) 2978 goto free_new_domain; 2979 2980 group->domain = group->default_domain; 2981 iommu_domain_free(prev_dom); 2982 2983 return 0; 2984 2985 free_new_domain: 2986 iommu_domain_free(group->default_domain); 2987 group->default_domain = prev_dom; 2988 group->domain = prev_dom; 2989 out: 2990 return ret; 2991 } 2992 2993 /* 2994 * Changing the default domain through sysfs requires the users to unbind the 2995 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ 2996 * transition. Return failure if this isn't met. 2997 * 2998 * We need to consider the race between this and the device release path. 2999 * device_lock(dev) is used here to guarantee that the device release path 3000 * will not be entered at the same time. 3001 */ 3002 static ssize_t iommu_group_store_type(struct iommu_group *group, 3003 const char *buf, size_t count) 3004 { 3005 struct group_device *grp_dev; 3006 struct device *dev; 3007 int ret, req_type; 3008 3009 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 3010 return -EACCES; 3011 3012 if (WARN_ON(!group) || !group->default_domain) 3013 return -EINVAL; 3014 3015 if (sysfs_streq(buf, "identity")) 3016 req_type = IOMMU_DOMAIN_IDENTITY; 3017 else if (sysfs_streq(buf, "DMA")) 3018 req_type = IOMMU_DOMAIN_DMA; 3019 else if (sysfs_streq(buf, "DMA-FQ")) 3020 req_type = IOMMU_DOMAIN_DMA_FQ; 3021 else if (sysfs_streq(buf, "auto")) 3022 req_type = 0; 3023 else 3024 return -EINVAL; 3025 3026 /* 3027 * Lock/Unlock the group mutex here before device lock to 3028 * 1. Make sure that the iommu group has only one device (this is a 3029 * prerequisite for step 2) 3030 * 2. Get struct *dev which is needed to lock device 3031 */ 3032 mutex_lock(&group->mutex); 3033 if (iommu_group_device_count(group) != 1) { 3034 mutex_unlock(&group->mutex); 3035 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n"); 3036 return -EINVAL; 3037 } 3038 3039 /* Since group has only one device */ 3040 grp_dev = list_first_entry(&group->devices, struct group_device, list); 3041 dev = grp_dev->dev; 3042 get_device(dev); 3043 3044 /* 3045 * Don't hold the group mutex because taking group mutex first and then 3046 * the device lock could potentially cause a deadlock as below. Assume 3047 * two threads T1 and T2. T1 is trying to change default domain of an 3048 * iommu group and T2 is trying to hot unplug a device or release [1] VF 3049 * of a PCIe device which is in the same iommu group. T1 takes group 3050 * mutex and before it could take device lock assume T2 has taken device 3051 * lock and is yet to take group mutex. Now, both the threads will be 3052 * waiting for the other thread to release lock. Below, lock order was 3053 * suggested. 3054 * device_lock(dev); 3055 * mutex_lock(&group->mutex); 3056 * iommu_change_dev_def_domain(); 3057 * mutex_unlock(&group->mutex); 3058 * device_unlock(dev); 3059 * 3060 * [1] Typical device release path 3061 * device_lock() from device/driver core code 3062 * -> bus_notifier() 3063 * -> iommu_bus_notifier() 3064 * -> iommu_release_device() 3065 * -> ops->release_device() vendor driver calls back iommu core code 3066 * -> mutex_lock() from iommu core code 3067 */ 3068 mutex_unlock(&group->mutex); 3069 3070 /* Check if the device in the group still has a driver bound to it */ 3071 device_lock(dev); 3072 if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ && 3073 group->default_domain->type == IOMMU_DOMAIN_DMA)) { 3074 pr_err_ratelimited("Device is still bound to driver\n"); 3075 ret = -EBUSY; 3076 goto out; 3077 } 3078 3079 mutex_lock(&group->mutex); 3080 ret = iommu_change_dev_def_domain(group, dev, req_type); 3081 /* 3082 * Release the mutex here because ops->probe_finalize() call-back of 3083 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 3084 * in-turn might call back into IOMMU core code, where it tries to take 3085 * group->mutex, resulting in a deadlock. 3086 */ 3087 mutex_unlock(&group->mutex); 3088 3089 /* Make sure dma_ops is appropriatley set */ 3090 if (!ret) 3091 iommu_group_do_probe_finalize(dev, group->default_domain); 3092 ret = ret ?: count; 3093 3094 out: 3095 device_unlock(dev); 3096 put_device(dev); 3097 3098 return ret; 3099 } 3100 3101 static bool iommu_is_default_domain(struct iommu_group *group) 3102 { 3103 if (group->domain == group->default_domain) 3104 return true; 3105 3106 /* 3107 * If the default domain was set to identity and it is still an identity 3108 * domain then we consider this a pass. This happens because of 3109 * amd_iommu_init_device() replacing the default idenytity domain with an 3110 * identity domain that has a different configuration for AMDGPU. 3111 */ 3112 if (group->default_domain && 3113 group->default_domain->type == IOMMU_DOMAIN_IDENTITY && 3114 group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY) 3115 return true; 3116 return false; 3117 } 3118 3119 /** 3120 * iommu_device_use_default_domain() - Device driver wants to handle device 3121 * DMA through the kernel DMA API. 3122 * @dev: The device. 3123 * 3124 * The device driver about to bind @dev wants to do DMA through the kernel 3125 * DMA API. Return 0 if it is allowed, otherwise an error. 3126 */ 3127 int iommu_device_use_default_domain(struct device *dev) 3128 { 3129 struct iommu_group *group = iommu_group_get(dev); 3130 int ret = 0; 3131 3132 if (!group) 3133 return 0; 3134 3135 mutex_lock(&group->mutex); 3136 if (group->owner_cnt) { 3137 if (group->owner || !iommu_is_default_domain(group) || 3138 !xa_empty(&group->pasid_array)) { 3139 ret = -EBUSY; 3140 goto unlock_out; 3141 } 3142 } 3143 3144 group->owner_cnt++; 3145 3146 unlock_out: 3147 mutex_unlock(&group->mutex); 3148 iommu_group_put(group); 3149 3150 return ret; 3151 } 3152 3153 /** 3154 * iommu_device_unuse_default_domain() - Device driver stops handling device 3155 * DMA through the kernel DMA API. 3156 * @dev: The device. 3157 * 3158 * The device driver doesn't want to do DMA through kernel DMA API anymore. 3159 * It must be called after iommu_device_use_default_domain(). 3160 */ 3161 void iommu_device_unuse_default_domain(struct device *dev) 3162 { 3163 struct iommu_group *group = iommu_group_get(dev); 3164 3165 if (!group) 3166 return; 3167 3168 mutex_lock(&group->mutex); 3169 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) 3170 group->owner_cnt--; 3171 3172 mutex_unlock(&group->mutex); 3173 iommu_group_put(group); 3174 } 3175 3176 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) 3177 { 3178 struct group_device *dev = 3179 list_first_entry(&group->devices, struct group_device, list); 3180 3181 if (group->blocking_domain) 3182 return 0; 3183 3184 group->blocking_domain = 3185 __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED); 3186 if (!group->blocking_domain) { 3187 /* 3188 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED 3189 * create an empty domain instead. 3190 */ 3191 group->blocking_domain = __iommu_domain_alloc( 3192 dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); 3193 if (!group->blocking_domain) 3194 return -EINVAL; 3195 } 3196 return 0; 3197 } 3198 3199 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) 3200 { 3201 int ret; 3202 3203 if ((group->domain && group->domain != group->default_domain) || 3204 !xa_empty(&group->pasid_array)) 3205 return -EBUSY; 3206 3207 ret = __iommu_group_alloc_blocking_domain(group); 3208 if (ret) 3209 return ret; 3210 ret = __iommu_group_set_domain(group, group->blocking_domain); 3211 if (ret) 3212 return ret; 3213 3214 group->owner = owner; 3215 group->owner_cnt++; 3216 return 0; 3217 } 3218 3219 /** 3220 * iommu_group_claim_dma_owner() - Set DMA ownership of a group 3221 * @group: The group. 3222 * @owner: Caller specified pointer. Used for exclusive ownership. 3223 * 3224 * This is to support backward compatibility for vfio which manages the dma 3225 * ownership in iommu_group level. New invocations on this interface should be 3226 * prohibited. Only a single owner may exist for a group. 3227 */ 3228 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 3229 { 3230 int ret = 0; 3231 3232 if (WARN_ON(!owner)) 3233 return -EINVAL; 3234 3235 mutex_lock(&group->mutex); 3236 if (group->owner_cnt) { 3237 ret = -EPERM; 3238 goto unlock_out; 3239 } 3240 3241 ret = __iommu_take_dma_ownership(group, owner); 3242 unlock_out: 3243 mutex_unlock(&group->mutex); 3244 3245 return ret; 3246 } 3247 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); 3248 3249 /** 3250 * iommu_device_claim_dma_owner() - Set DMA ownership of a device 3251 * @dev: The device. 3252 * @owner: Caller specified pointer. Used for exclusive ownership. 3253 * 3254 * Claim the DMA ownership of a device. Multiple devices in the same group may 3255 * concurrently claim ownership if they present the same owner value. Returns 0 3256 * on success and error code on failure 3257 */ 3258 int iommu_device_claim_dma_owner(struct device *dev, void *owner) 3259 { 3260 struct iommu_group *group; 3261 int ret = 0; 3262 3263 if (WARN_ON(!owner)) 3264 return -EINVAL; 3265 3266 group = iommu_group_get(dev); 3267 if (!group) 3268 return -ENODEV; 3269 3270 mutex_lock(&group->mutex); 3271 if (group->owner_cnt) { 3272 if (group->owner != owner) { 3273 ret = -EPERM; 3274 goto unlock_out; 3275 } 3276 group->owner_cnt++; 3277 goto unlock_out; 3278 } 3279 3280 ret = __iommu_take_dma_ownership(group, owner); 3281 unlock_out: 3282 mutex_unlock(&group->mutex); 3283 iommu_group_put(group); 3284 3285 return ret; 3286 } 3287 EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); 3288 3289 static void __iommu_release_dma_ownership(struct iommu_group *group) 3290 { 3291 int ret; 3292 3293 if (WARN_ON(!group->owner_cnt || !group->owner || 3294 !xa_empty(&group->pasid_array))) 3295 return; 3296 3297 group->owner_cnt = 0; 3298 group->owner = NULL; 3299 ret = __iommu_group_set_domain(group, group->default_domain); 3300 WARN(ret, "iommu driver failed to attach the default domain"); 3301 } 3302 3303 /** 3304 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3305 * @dev: The device 3306 * 3307 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3308 */ 3309 void iommu_group_release_dma_owner(struct iommu_group *group) 3310 { 3311 mutex_lock(&group->mutex); 3312 __iommu_release_dma_ownership(group); 3313 mutex_unlock(&group->mutex); 3314 } 3315 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); 3316 3317 /** 3318 * iommu_device_release_dma_owner() - Release DMA ownership of a device 3319 * @group: The device. 3320 * 3321 * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). 3322 */ 3323 void iommu_device_release_dma_owner(struct device *dev) 3324 { 3325 struct iommu_group *group = iommu_group_get(dev); 3326 3327 mutex_lock(&group->mutex); 3328 if (group->owner_cnt > 1) 3329 group->owner_cnt--; 3330 else 3331 __iommu_release_dma_ownership(group); 3332 mutex_unlock(&group->mutex); 3333 iommu_group_put(group); 3334 } 3335 EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); 3336 3337 /** 3338 * iommu_group_dma_owner_claimed() - Query group dma ownership status 3339 * @group: The group. 3340 * 3341 * This provides status query on a given group. It is racy and only for 3342 * non-binding status reporting. 3343 */ 3344 bool iommu_group_dma_owner_claimed(struct iommu_group *group) 3345 { 3346 unsigned int user; 3347 3348 mutex_lock(&group->mutex); 3349 user = group->owner_cnt; 3350 mutex_unlock(&group->mutex); 3351 3352 return user; 3353 } 3354 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); 3355 3356 static int __iommu_set_group_pasid(struct iommu_domain *domain, 3357 struct iommu_group *group, ioasid_t pasid) 3358 { 3359 struct group_device *device; 3360 int ret = 0; 3361 3362 list_for_each_entry(device, &group->devices, list) { 3363 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); 3364 if (ret) 3365 break; 3366 } 3367 3368 return ret; 3369 } 3370 3371 static void __iommu_remove_group_pasid(struct iommu_group *group, 3372 ioasid_t pasid) 3373 { 3374 struct group_device *device; 3375 const struct iommu_ops *ops; 3376 3377 list_for_each_entry(device, &group->devices, list) { 3378 ops = dev_iommu_ops(device->dev); 3379 ops->remove_dev_pasid(device->dev, pasid); 3380 } 3381 } 3382 3383 /* 3384 * iommu_attach_device_pasid() - Attach a domain to pasid of device 3385 * @domain: the iommu domain. 3386 * @dev: the attached device. 3387 * @pasid: the pasid of the device. 3388 * 3389 * Return: 0 on success, or an error. 3390 */ 3391 int iommu_attach_device_pasid(struct iommu_domain *domain, 3392 struct device *dev, ioasid_t pasid) 3393 { 3394 struct iommu_group *group; 3395 void *curr; 3396 int ret; 3397 3398 if (!domain->ops->set_dev_pasid) 3399 return -EOPNOTSUPP; 3400 3401 group = iommu_group_get(dev); 3402 if (!group) 3403 return -ENODEV; 3404 3405 mutex_lock(&group->mutex); 3406 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); 3407 if (curr) { 3408 ret = xa_err(curr) ? : -EBUSY; 3409 goto out_unlock; 3410 } 3411 3412 ret = __iommu_set_group_pasid(domain, group, pasid); 3413 if (ret) { 3414 __iommu_remove_group_pasid(group, pasid); 3415 xa_erase(&group->pasid_array, pasid); 3416 } 3417 out_unlock: 3418 mutex_unlock(&group->mutex); 3419 iommu_group_put(group); 3420 3421 return ret; 3422 } 3423 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); 3424 3425 /* 3426 * iommu_detach_device_pasid() - Detach the domain from pasid of device 3427 * @domain: the iommu domain. 3428 * @dev: the attached device. 3429 * @pasid: the pasid of the device. 3430 * 3431 * The @domain must have been attached to @pasid of the @dev with 3432 * iommu_attach_device_pasid(). 3433 */ 3434 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, 3435 ioasid_t pasid) 3436 { 3437 struct iommu_group *group = iommu_group_get(dev); 3438 3439 mutex_lock(&group->mutex); 3440 __iommu_remove_group_pasid(group, pasid); 3441 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); 3442 mutex_unlock(&group->mutex); 3443 3444 iommu_group_put(group); 3445 } 3446 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); 3447 3448 /* 3449 * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev 3450 * @dev: the queried device 3451 * @pasid: the pasid of the device 3452 * @type: matched domain type, 0 for any match 3453 * 3454 * This is a variant of iommu_get_domain_for_dev(). It returns the existing 3455 * domain attached to pasid of a device. Callers must hold a lock around this 3456 * function, and both iommu_attach/detach_dev_pasid() whenever a domain of 3457 * type is being manipulated. This API does not internally resolve races with 3458 * attach/detach. 3459 * 3460 * Return: attached domain on success, NULL otherwise. 3461 */ 3462 struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, 3463 ioasid_t pasid, 3464 unsigned int type) 3465 { 3466 struct iommu_domain *domain; 3467 struct iommu_group *group; 3468 3469 group = iommu_group_get(dev); 3470 if (!group) 3471 return NULL; 3472 3473 xa_lock(&group->pasid_array); 3474 domain = xa_load(&group->pasid_array, pasid); 3475 if (type && domain && domain->type != type) 3476 domain = ERR_PTR(-EBUSY); 3477 xa_unlock(&group->pasid_array); 3478 iommu_group_put(group); 3479 3480 return domain; 3481 } 3482 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); 3483 3484 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 3485 struct mm_struct *mm) 3486 { 3487 const struct iommu_ops *ops = dev_iommu_ops(dev); 3488 struct iommu_domain *domain; 3489 3490 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); 3491 if (!domain) 3492 return NULL; 3493 3494 domain->type = IOMMU_DOMAIN_SVA; 3495 mmgrab(mm); 3496 domain->mm = mm; 3497 domain->iopf_handler = iommu_sva_handle_iopf; 3498 domain->fault_data = mm; 3499 3500 return domain; 3501 } 3502