1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/amba/bus.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/bits.h> 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/host1x_context_bus.h> 20 #include <linux/iommu.h> 21 #include <linux/idr.h> 22 #include <linux/err.h> 23 #include <linux/pci.h> 24 #include <linux/pci-ats.h> 25 #include <linux/bitops.h> 26 #include <linux/platform_device.h> 27 #include <linux/property.h> 28 #include <linux/fsl/mc.h> 29 #include <linux/module.h> 30 #include <linux/cc_platform.h> 31 #include <trace/events/iommu.h> 32 #include <linux/sched/mm.h> 33 34 #include "dma-iommu.h" 35 36 #include "iommu-sva.h" 37 38 static struct kset *iommu_group_kset; 39 static DEFINE_IDA(iommu_group_ida); 40 41 static unsigned int iommu_def_domain_type __read_mostly; 42 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); 43 static u32 iommu_cmd_line __read_mostly; 44 45 struct iommu_group { 46 struct kobject kobj; 47 struct kobject *devices_kobj; 48 struct list_head devices; 49 struct xarray pasid_array; 50 struct mutex mutex; 51 void *iommu_data; 52 void (*iommu_data_release)(void *iommu_data); 53 char *name; 54 int id; 55 struct iommu_domain *default_domain; 56 struct iommu_domain *blocking_domain; 57 struct iommu_domain *domain; 58 struct list_head entry; 59 unsigned int owner_cnt; 60 void *owner; 61 }; 62 63 struct group_device { 64 struct list_head list; 65 struct device *dev; 66 char *name; 67 }; 68 69 struct iommu_group_attribute { 70 struct attribute attr; 71 ssize_t (*show)(struct iommu_group *group, char *buf); 72 ssize_t (*store)(struct iommu_group *group, 73 const char *buf, size_t count); 74 }; 75 76 static const char * const iommu_group_resv_type_string[] = { 77 [IOMMU_RESV_DIRECT] = "direct", 78 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 79 [IOMMU_RESV_RESERVED] = "reserved", 80 [IOMMU_RESV_MSI] = "msi", 81 [IOMMU_RESV_SW_MSI] = "msi", 82 }; 83 84 #define IOMMU_CMD_LINE_DMA_API BIT(0) 85 #define IOMMU_CMD_LINE_STRICT BIT(1) 86 87 static int iommu_bus_notifier(struct notifier_block *nb, 88 unsigned long action, void *data); 89 static int iommu_alloc_default_domain(struct iommu_group *group, 90 struct device *dev); 91 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 92 unsigned type); 93 static int __iommu_attach_device(struct iommu_domain *domain, 94 struct device *dev); 95 static int __iommu_attach_group(struct iommu_domain *domain, 96 struct iommu_group *group); 97 static int __iommu_group_set_domain(struct iommu_group *group, 98 struct iommu_domain *new_domain); 99 static int iommu_create_device_direct_mappings(struct iommu_group *group, 100 struct device *dev); 101 static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 102 static ssize_t iommu_group_store_type(struct iommu_group *group, 103 const char *buf, size_t count); 104 105 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 106 struct iommu_group_attribute iommu_group_attr_##_name = \ 107 __ATTR(_name, _mode, _show, _store) 108 109 #define to_iommu_group_attr(_attr) \ 110 container_of(_attr, struct iommu_group_attribute, attr) 111 #define to_iommu_group(_kobj) \ 112 container_of(_kobj, struct iommu_group, kobj) 113 114 static LIST_HEAD(iommu_device_list); 115 static DEFINE_SPINLOCK(iommu_device_lock); 116 117 static struct bus_type * const iommu_buses[] = { 118 &platform_bus_type, 119 #ifdef CONFIG_PCI 120 &pci_bus_type, 121 #endif 122 #ifdef CONFIG_ARM_AMBA 123 &amba_bustype, 124 #endif 125 #ifdef CONFIG_FSL_MC_BUS 126 &fsl_mc_bus_type, 127 #endif 128 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS 129 &host1x_context_device_bus_type, 130 #endif 131 }; 132 133 /* 134 * Use a function instead of an array here because the domain-type is a 135 * bit-field, so an array would waste memory. 136 */ 137 static const char *iommu_domain_type_str(unsigned int t) 138 { 139 switch (t) { 140 case IOMMU_DOMAIN_BLOCKED: 141 return "Blocked"; 142 case IOMMU_DOMAIN_IDENTITY: 143 return "Passthrough"; 144 case IOMMU_DOMAIN_UNMANAGED: 145 return "Unmanaged"; 146 case IOMMU_DOMAIN_DMA: 147 case IOMMU_DOMAIN_DMA_FQ: 148 return "Translated"; 149 default: 150 return "Unknown"; 151 } 152 } 153 154 static int __init iommu_subsys_init(void) 155 { 156 struct notifier_block *nb; 157 158 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 159 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 160 iommu_set_default_passthrough(false); 161 else 162 iommu_set_default_translated(false); 163 164 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 165 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 166 iommu_set_default_translated(false); 167 } 168 } 169 170 if (!iommu_default_passthrough() && !iommu_dma_strict) 171 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; 172 173 pr_info("Default domain type: %s %s\n", 174 iommu_domain_type_str(iommu_def_domain_type), 175 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 176 "(set via kernel command line)" : ""); 177 178 if (!iommu_default_passthrough()) 179 pr_info("DMA domain TLB invalidation policy: %s mode %s\n", 180 iommu_dma_strict ? "strict" : "lazy", 181 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? 182 "(set via kernel command line)" : ""); 183 184 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); 185 if (!nb) 186 return -ENOMEM; 187 188 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { 189 nb[i].notifier_call = iommu_bus_notifier; 190 bus_register_notifier(iommu_buses[i], &nb[i]); 191 } 192 193 return 0; 194 } 195 subsys_initcall(iommu_subsys_init); 196 197 static int remove_iommu_group(struct device *dev, void *data) 198 { 199 if (dev->iommu && dev->iommu->iommu_dev == data) 200 iommu_release_device(dev); 201 202 return 0; 203 } 204 205 /** 206 * iommu_device_register() - Register an IOMMU hardware instance 207 * @iommu: IOMMU handle for the instance 208 * @ops: IOMMU ops to associate with the instance 209 * @hwdev: (optional) actual instance device, used for fwnode lookup 210 * 211 * Return: 0 on success, or an error. 212 */ 213 int iommu_device_register(struct iommu_device *iommu, 214 const struct iommu_ops *ops, struct device *hwdev) 215 { 216 int err = 0; 217 218 /* We need to be able to take module references appropriately */ 219 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 220 return -EINVAL; 221 /* 222 * Temporarily enforce global restriction to a single driver. This was 223 * already the de-facto behaviour, since any possible combination of 224 * existing drivers would compete for at least the PCI or platform bus. 225 */ 226 if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops) 227 return -EBUSY; 228 229 iommu->ops = ops; 230 if (hwdev) 231 iommu->fwnode = dev_fwnode(hwdev); 232 233 spin_lock(&iommu_device_lock); 234 list_add_tail(&iommu->list, &iommu_device_list); 235 spin_unlock(&iommu_device_lock); 236 237 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) { 238 iommu_buses[i]->iommu_ops = ops; 239 err = bus_iommu_probe(iommu_buses[i]); 240 } 241 if (err) 242 iommu_device_unregister(iommu); 243 return err; 244 } 245 EXPORT_SYMBOL_GPL(iommu_device_register); 246 247 void iommu_device_unregister(struct iommu_device *iommu) 248 { 249 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) 250 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); 251 252 spin_lock(&iommu_device_lock); 253 list_del(&iommu->list); 254 spin_unlock(&iommu_device_lock); 255 } 256 EXPORT_SYMBOL_GPL(iommu_device_unregister); 257 258 static struct dev_iommu *dev_iommu_get(struct device *dev) 259 { 260 struct dev_iommu *param = dev->iommu; 261 262 if (param) 263 return param; 264 265 param = kzalloc(sizeof(*param), GFP_KERNEL); 266 if (!param) 267 return NULL; 268 269 mutex_init(¶m->lock); 270 dev->iommu = param; 271 return param; 272 } 273 274 static void dev_iommu_free(struct device *dev) 275 { 276 struct dev_iommu *param = dev->iommu; 277 278 dev->iommu = NULL; 279 if (param->fwspec) { 280 fwnode_handle_put(param->fwspec->iommu_fwnode); 281 kfree(param->fwspec); 282 } 283 kfree(param); 284 } 285 286 static u32 dev_iommu_get_max_pasids(struct device *dev) 287 { 288 u32 max_pasids = 0, bits = 0; 289 int ret; 290 291 if (dev_is_pci(dev)) { 292 ret = pci_max_pasids(to_pci_dev(dev)); 293 if (ret > 0) 294 max_pasids = ret; 295 } else { 296 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); 297 if (!ret) 298 max_pasids = 1UL << bits; 299 } 300 301 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); 302 } 303 304 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 305 { 306 const struct iommu_ops *ops = dev->bus->iommu_ops; 307 struct iommu_device *iommu_dev; 308 struct iommu_group *group; 309 static DEFINE_MUTEX(iommu_probe_device_lock); 310 int ret; 311 312 if (!ops) 313 return -ENODEV; 314 /* 315 * Serialise to avoid races between IOMMU drivers registering in 316 * parallel and/or the "replay" calls from ACPI/OF code via client 317 * driver probe. Once the latter have been cleaned up we should 318 * probably be able to use device_lock() here to minimise the scope, 319 * but for now enforcing a simple global ordering is fine. 320 */ 321 mutex_lock(&iommu_probe_device_lock); 322 if (!dev_iommu_get(dev)) { 323 ret = -ENOMEM; 324 goto err_unlock; 325 } 326 327 if (!try_module_get(ops->owner)) { 328 ret = -EINVAL; 329 goto err_free; 330 } 331 332 iommu_dev = ops->probe_device(dev); 333 if (IS_ERR(iommu_dev)) { 334 ret = PTR_ERR(iommu_dev); 335 goto out_module_put; 336 } 337 338 dev->iommu->iommu_dev = iommu_dev; 339 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 340 341 group = iommu_group_get_for_dev(dev); 342 if (IS_ERR(group)) { 343 ret = PTR_ERR(group); 344 goto out_release; 345 } 346 347 mutex_lock(&group->mutex); 348 if (group_list && !group->default_domain && list_empty(&group->entry)) 349 list_add_tail(&group->entry, group_list); 350 mutex_unlock(&group->mutex); 351 iommu_group_put(group); 352 353 mutex_unlock(&iommu_probe_device_lock); 354 iommu_device_link(iommu_dev, dev); 355 356 return 0; 357 358 out_release: 359 if (ops->release_device) 360 ops->release_device(dev); 361 362 out_module_put: 363 module_put(ops->owner); 364 365 err_free: 366 dev_iommu_free(dev); 367 368 err_unlock: 369 mutex_unlock(&iommu_probe_device_lock); 370 371 return ret; 372 } 373 374 static bool iommu_is_attach_deferred(struct device *dev) 375 { 376 const struct iommu_ops *ops = dev_iommu_ops(dev); 377 378 if (ops->is_attach_deferred) 379 return ops->is_attach_deferred(dev); 380 381 return false; 382 } 383 384 static int iommu_group_do_dma_first_attach(struct device *dev, void *data) 385 { 386 struct iommu_domain *domain = data; 387 388 lockdep_assert_held(&dev->iommu_group->mutex); 389 390 if (iommu_is_attach_deferred(dev)) { 391 dev->iommu->attach_deferred = 1; 392 return 0; 393 } 394 395 return __iommu_attach_device(domain, dev); 396 } 397 398 int iommu_probe_device(struct device *dev) 399 { 400 const struct iommu_ops *ops; 401 struct iommu_group *group; 402 int ret; 403 404 ret = __iommu_probe_device(dev, NULL); 405 if (ret) 406 goto err_out; 407 408 group = iommu_group_get(dev); 409 if (!group) { 410 ret = -ENODEV; 411 goto err_release; 412 } 413 414 /* 415 * Try to allocate a default domain - needs support from the 416 * IOMMU driver. There are still some drivers which don't 417 * support default domains, so the return value is not yet 418 * checked. 419 */ 420 mutex_lock(&group->mutex); 421 iommu_alloc_default_domain(group, dev); 422 423 /* 424 * If device joined an existing group which has been claimed, don't 425 * attach the default domain. 426 */ 427 if (group->default_domain && !group->owner) { 428 ret = iommu_group_do_dma_first_attach(dev, group->default_domain); 429 if (ret) { 430 mutex_unlock(&group->mutex); 431 iommu_group_put(group); 432 goto err_release; 433 } 434 } 435 436 iommu_create_device_direct_mappings(group, dev); 437 438 mutex_unlock(&group->mutex); 439 iommu_group_put(group); 440 441 ops = dev_iommu_ops(dev); 442 if (ops->probe_finalize) 443 ops->probe_finalize(dev); 444 445 return 0; 446 447 err_release: 448 iommu_release_device(dev); 449 450 err_out: 451 return ret; 452 453 } 454 455 void iommu_release_device(struct device *dev) 456 { 457 const struct iommu_ops *ops; 458 459 if (!dev->iommu) 460 return; 461 462 iommu_device_unlink(dev->iommu->iommu_dev, dev); 463 464 ops = dev_iommu_ops(dev); 465 if (ops->release_device) 466 ops->release_device(dev); 467 468 iommu_group_remove_device(dev); 469 module_put(ops->owner); 470 dev_iommu_free(dev); 471 } 472 473 static int __init iommu_set_def_domain_type(char *str) 474 { 475 bool pt; 476 int ret; 477 478 ret = kstrtobool(str, &pt); 479 if (ret) 480 return ret; 481 482 if (pt) 483 iommu_set_default_passthrough(true); 484 else 485 iommu_set_default_translated(true); 486 487 return 0; 488 } 489 early_param("iommu.passthrough", iommu_set_def_domain_type); 490 491 static int __init iommu_dma_setup(char *str) 492 { 493 int ret = kstrtobool(str, &iommu_dma_strict); 494 495 if (!ret) 496 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 497 return ret; 498 } 499 early_param("iommu.strict", iommu_dma_setup); 500 501 void iommu_set_dma_strict(void) 502 { 503 iommu_dma_strict = true; 504 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) 505 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 506 } 507 508 static ssize_t iommu_group_attr_show(struct kobject *kobj, 509 struct attribute *__attr, char *buf) 510 { 511 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 512 struct iommu_group *group = to_iommu_group(kobj); 513 ssize_t ret = -EIO; 514 515 if (attr->show) 516 ret = attr->show(group, buf); 517 return ret; 518 } 519 520 static ssize_t iommu_group_attr_store(struct kobject *kobj, 521 struct attribute *__attr, 522 const char *buf, size_t count) 523 { 524 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 525 struct iommu_group *group = to_iommu_group(kobj); 526 ssize_t ret = -EIO; 527 528 if (attr->store) 529 ret = attr->store(group, buf, count); 530 return ret; 531 } 532 533 static const struct sysfs_ops iommu_group_sysfs_ops = { 534 .show = iommu_group_attr_show, 535 .store = iommu_group_attr_store, 536 }; 537 538 static int iommu_group_create_file(struct iommu_group *group, 539 struct iommu_group_attribute *attr) 540 { 541 return sysfs_create_file(&group->kobj, &attr->attr); 542 } 543 544 static void iommu_group_remove_file(struct iommu_group *group, 545 struct iommu_group_attribute *attr) 546 { 547 sysfs_remove_file(&group->kobj, &attr->attr); 548 } 549 550 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 551 { 552 return sprintf(buf, "%s\n", group->name); 553 } 554 555 /** 556 * iommu_insert_resv_region - Insert a new region in the 557 * list of reserved regions. 558 * @new: new region to insert 559 * @regions: list of regions 560 * 561 * Elements are sorted by start address and overlapping segments 562 * of the same type are merged. 563 */ 564 static int iommu_insert_resv_region(struct iommu_resv_region *new, 565 struct list_head *regions) 566 { 567 struct iommu_resv_region *iter, *tmp, *nr, *top; 568 LIST_HEAD(stack); 569 570 nr = iommu_alloc_resv_region(new->start, new->length, 571 new->prot, new->type, GFP_KERNEL); 572 if (!nr) 573 return -ENOMEM; 574 575 /* First add the new element based on start address sorting */ 576 list_for_each_entry(iter, regions, list) { 577 if (nr->start < iter->start || 578 (nr->start == iter->start && nr->type <= iter->type)) 579 break; 580 } 581 list_add_tail(&nr->list, &iter->list); 582 583 /* Merge overlapping segments of type nr->type in @regions, if any */ 584 list_for_each_entry_safe(iter, tmp, regions, list) { 585 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 586 587 /* no merge needed on elements of different types than @new */ 588 if (iter->type != new->type) { 589 list_move_tail(&iter->list, &stack); 590 continue; 591 } 592 593 /* look for the last stack element of same type as @iter */ 594 list_for_each_entry_reverse(top, &stack, list) 595 if (top->type == iter->type) 596 goto check_overlap; 597 598 list_move_tail(&iter->list, &stack); 599 continue; 600 601 check_overlap: 602 top_end = top->start + top->length - 1; 603 604 if (iter->start > top_end + 1) { 605 list_move_tail(&iter->list, &stack); 606 } else { 607 top->length = max(top_end, iter_end) - top->start + 1; 608 list_del(&iter->list); 609 kfree(iter); 610 } 611 } 612 list_splice(&stack, regions); 613 return 0; 614 } 615 616 static int 617 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 618 struct list_head *group_resv_regions) 619 { 620 struct iommu_resv_region *entry; 621 int ret = 0; 622 623 list_for_each_entry(entry, dev_resv_regions, list) { 624 ret = iommu_insert_resv_region(entry, group_resv_regions); 625 if (ret) 626 break; 627 } 628 return ret; 629 } 630 631 int iommu_get_group_resv_regions(struct iommu_group *group, 632 struct list_head *head) 633 { 634 struct group_device *device; 635 int ret = 0; 636 637 mutex_lock(&group->mutex); 638 list_for_each_entry(device, &group->devices, list) { 639 struct list_head dev_resv_regions; 640 641 /* 642 * Non-API groups still expose reserved_regions in sysfs, 643 * so filter out calls that get here that way. 644 */ 645 if (!device->dev->iommu) 646 break; 647 648 INIT_LIST_HEAD(&dev_resv_regions); 649 iommu_get_resv_regions(device->dev, &dev_resv_regions); 650 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 651 iommu_put_resv_regions(device->dev, &dev_resv_regions); 652 if (ret) 653 break; 654 } 655 mutex_unlock(&group->mutex); 656 return ret; 657 } 658 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 659 660 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 661 char *buf) 662 { 663 struct iommu_resv_region *region, *next; 664 struct list_head group_resv_regions; 665 char *str = buf; 666 667 INIT_LIST_HEAD(&group_resv_regions); 668 iommu_get_group_resv_regions(group, &group_resv_regions); 669 670 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 671 str += sprintf(str, "0x%016llx 0x%016llx %s\n", 672 (long long int)region->start, 673 (long long int)(region->start + 674 region->length - 1), 675 iommu_group_resv_type_string[region->type]); 676 kfree(region); 677 } 678 679 return (str - buf); 680 } 681 682 static ssize_t iommu_group_show_type(struct iommu_group *group, 683 char *buf) 684 { 685 char *type = "unknown\n"; 686 687 mutex_lock(&group->mutex); 688 if (group->default_domain) { 689 switch (group->default_domain->type) { 690 case IOMMU_DOMAIN_BLOCKED: 691 type = "blocked\n"; 692 break; 693 case IOMMU_DOMAIN_IDENTITY: 694 type = "identity\n"; 695 break; 696 case IOMMU_DOMAIN_UNMANAGED: 697 type = "unmanaged\n"; 698 break; 699 case IOMMU_DOMAIN_DMA: 700 type = "DMA\n"; 701 break; 702 case IOMMU_DOMAIN_DMA_FQ: 703 type = "DMA-FQ\n"; 704 break; 705 } 706 } 707 mutex_unlock(&group->mutex); 708 strcpy(buf, type); 709 710 return strlen(type); 711 } 712 713 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 714 715 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 716 iommu_group_show_resv_regions, NULL); 717 718 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 719 iommu_group_store_type); 720 721 static void iommu_group_release(struct kobject *kobj) 722 { 723 struct iommu_group *group = to_iommu_group(kobj); 724 725 pr_debug("Releasing group %d\n", group->id); 726 727 if (group->iommu_data_release) 728 group->iommu_data_release(group->iommu_data); 729 730 ida_free(&iommu_group_ida, group->id); 731 732 if (group->default_domain) 733 iommu_domain_free(group->default_domain); 734 if (group->blocking_domain) 735 iommu_domain_free(group->blocking_domain); 736 737 kfree(group->name); 738 kfree(group); 739 } 740 741 static struct kobj_type iommu_group_ktype = { 742 .sysfs_ops = &iommu_group_sysfs_ops, 743 .release = iommu_group_release, 744 }; 745 746 /** 747 * iommu_group_alloc - Allocate a new group 748 * 749 * This function is called by an iommu driver to allocate a new iommu 750 * group. The iommu group represents the minimum granularity of the iommu. 751 * Upon successful return, the caller holds a reference to the supplied 752 * group in order to hold the group until devices are added. Use 753 * iommu_group_put() to release this extra reference count, allowing the 754 * group to be automatically reclaimed once it has no devices or external 755 * references. 756 */ 757 struct iommu_group *iommu_group_alloc(void) 758 { 759 struct iommu_group *group; 760 int ret; 761 762 group = kzalloc(sizeof(*group), GFP_KERNEL); 763 if (!group) 764 return ERR_PTR(-ENOMEM); 765 766 group->kobj.kset = iommu_group_kset; 767 mutex_init(&group->mutex); 768 INIT_LIST_HEAD(&group->devices); 769 INIT_LIST_HEAD(&group->entry); 770 xa_init(&group->pasid_array); 771 772 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); 773 if (ret < 0) { 774 kfree(group); 775 return ERR_PTR(ret); 776 } 777 group->id = ret; 778 779 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 780 NULL, "%d", group->id); 781 if (ret) { 782 kobject_put(&group->kobj); 783 return ERR_PTR(ret); 784 } 785 786 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 787 if (!group->devices_kobj) { 788 kobject_put(&group->kobj); /* triggers .release & free */ 789 return ERR_PTR(-ENOMEM); 790 } 791 792 /* 793 * The devices_kobj holds a reference on the group kobject, so 794 * as long as that exists so will the group. We can therefore 795 * use the devices_kobj for reference counting. 796 */ 797 kobject_put(&group->kobj); 798 799 ret = iommu_group_create_file(group, 800 &iommu_group_attr_reserved_regions); 801 if (ret) 802 return ERR_PTR(ret); 803 804 ret = iommu_group_create_file(group, &iommu_group_attr_type); 805 if (ret) 806 return ERR_PTR(ret); 807 808 pr_debug("Allocated group %d\n", group->id); 809 810 return group; 811 } 812 EXPORT_SYMBOL_GPL(iommu_group_alloc); 813 814 struct iommu_group *iommu_group_get_by_id(int id) 815 { 816 struct kobject *group_kobj; 817 struct iommu_group *group; 818 const char *name; 819 820 if (!iommu_group_kset) 821 return NULL; 822 823 name = kasprintf(GFP_KERNEL, "%d", id); 824 if (!name) 825 return NULL; 826 827 group_kobj = kset_find_obj(iommu_group_kset, name); 828 kfree(name); 829 830 if (!group_kobj) 831 return NULL; 832 833 group = container_of(group_kobj, struct iommu_group, kobj); 834 BUG_ON(group->id != id); 835 836 kobject_get(group->devices_kobj); 837 kobject_put(&group->kobj); 838 839 return group; 840 } 841 EXPORT_SYMBOL_GPL(iommu_group_get_by_id); 842 843 /** 844 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 845 * @group: the group 846 * 847 * iommu drivers can store data in the group for use when doing iommu 848 * operations. This function provides a way to retrieve it. Caller 849 * should hold a group reference. 850 */ 851 void *iommu_group_get_iommudata(struct iommu_group *group) 852 { 853 return group->iommu_data; 854 } 855 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 856 857 /** 858 * iommu_group_set_iommudata - set iommu_data for a group 859 * @group: the group 860 * @iommu_data: new data 861 * @release: release function for iommu_data 862 * 863 * iommu drivers can store data in the group for use when doing iommu 864 * operations. This function provides a way to set the data after 865 * the group has been allocated. Caller should hold a group reference. 866 */ 867 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 868 void (*release)(void *iommu_data)) 869 { 870 group->iommu_data = iommu_data; 871 group->iommu_data_release = release; 872 } 873 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 874 875 /** 876 * iommu_group_set_name - set name for a group 877 * @group: the group 878 * @name: name 879 * 880 * Allow iommu driver to set a name for a group. When set it will 881 * appear in a name attribute file under the group in sysfs. 882 */ 883 int iommu_group_set_name(struct iommu_group *group, const char *name) 884 { 885 int ret; 886 887 if (group->name) { 888 iommu_group_remove_file(group, &iommu_group_attr_name); 889 kfree(group->name); 890 group->name = NULL; 891 if (!name) 892 return 0; 893 } 894 895 group->name = kstrdup(name, GFP_KERNEL); 896 if (!group->name) 897 return -ENOMEM; 898 899 ret = iommu_group_create_file(group, &iommu_group_attr_name); 900 if (ret) { 901 kfree(group->name); 902 group->name = NULL; 903 return ret; 904 } 905 906 return 0; 907 } 908 EXPORT_SYMBOL_GPL(iommu_group_set_name); 909 910 static int iommu_create_device_direct_mappings(struct iommu_group *group, 911 struct device *dev) 912 { 913 struct iommu_domain *domain = group->default_domain; 914 struct iommu_resv_region *entry; 915 struct list_head mappings; 916 unsigned long pg_size; 917 int ret = 0; 918 919 if (!domain || !iommu_is_dma_domain(domain)) 920 return 0; 921 922 BUG_ON(!domain->pgsize_bitmap); 923 924 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 925 INIT_LIST_HEAD(&mappings); 926 927 iommu_get_resv_regions(dev, &mappings); 928 929 /* We need to consider overlapping regions for different devices */ 930 list_for_each_entry(entry, &mappings, list) { 931 dma_addr_t start, end, addr; 932 size_t map_size = 0; 933 934 start = ALIGN(entry->start, pg_size); 935 end = ALIGN(entry->start + entry->length, pg_size); 936 937 if (entry->type != IOMMU_RESV_DIRECT && 938 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 939 continue; 940 941 for (addr = start; addr <= end; addr += pg_size) { 942 phys_addr_t phys_addr; 943 944 if (addr == end) 945 goto map_end; 946 947 phys_addr = iommu_iova_to_phys(domain, addr); 948 if (!phys_addr) { 949 map_size += pg_size; 950 continue; 951 } 952 953 map_end: 954 if (map_size) { 955 ret = iommu_map(domain, addr - map_size, 956 addr - map_size, map_size, 957 entry->prot); 958 if (ret) 959 goto out; 960 map_size = 0; 961 } 962 } 963 964 } 965 966 iommu_flush_iotlb_all(domain); 967 968 out: 969 iommu_put_resv_regions(dev, &mappings); 970 971 return ret; 972 } 973 974 /** 975 * iommu_group_add_device - add a device to an iommu group 976 * @group: the group into which to add the device (reference should be held) 977 * @dev: the device 978 * 979 * This function is called by an iommu driver to add a device into a 980 * group. Adding a device increments the group reference count. 981 */ 982 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 983 { 984 int ret, i = 0; 985 struct group_device *device; 986 987 device = kzalloc(sizeof(*device), GFP_KERNEL); 988 if (!device) 989 return -ENOMEM; 990 991 device->dev = dev; 992 993 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 994 if (ret) 995 goto err_free_device; 996 997 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 998 rename: 999 if (!device->name) { 1000 ret = -ENOMEM; 1001 goto err_remove_link; 1002 } 1003 1004 ret = sysfs_create_link_nowarn(group->devices_kobj, 1005 &dev->kobj, device->name); 1006 if (ret) { 1007 if (ret == -EEXIST && i >= 0) { 1008 /* 1009 * Account for the slim chance of collision 1010 * and append an instance to the name. 1011 */ 1012 kfree(device->name); 1013 device->name = kasprintf(GFP_KERNEL, "%s.%d", 1014 kobject_name(&dev->kobj), i++); 1015 goto rename; 1016 } 1017 goto err_free_name; 1018 } 1019 1020 kobject_get(group->devices_kobj); 1021 1022 dev->iommu_group = group; 1023 1024 mutex_lock(&group->mutex); 1025 list_add_tail(&device->list, &group->devices); 1026 if (group->domain) 1027 ret = iommu_group_do_dma_first_attach(dev, group->domain); 1028 mutex_unlock(&group->mutex); 1029 if (ret) 1030 goto err_put_group; 1031 1032 trace_add_device_to_group(group->id, dev); 1033 1034 dev_info(dev, "Adding to iommu group %d\n", group->id); 1035 1036 return 0; 1037 1038 err_put_group: 1039 mutex_lock(&group->mutex); 1040 list_del(&device->list); 1041 mutex_unlock(&group->mutex); 1042 dev->iommu_group = NULL; 1043 kobject_put(group->devices_kobj); 1044 sysfs_remove_link(group->devices_kobj, device->name); 1045 err_free_name: 1046 kfree(device->name); 1047 err_remove_link: 1048 sysfs_remove_link(&dev->kobj, "iommu_group"); 1049 err_free_device: 1050 kfree(device); 1051 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 1052 return ret; 1053 } 1054 EXPORT_SYMBOL_GPL(iommu_group_add_device); 1055 1056 /** 1057 * iommu_group_remove_device - remove a device from it's current group 1058 * @dev: device to be removed 1059 * 1060 * This function is called by an iommu driver to remove the device from 1061 * it's current group. This decrements the iommu group reference count. 1062 */ 1063 void iommu_group_remove_device(struct device *dev) 1064 { 1065 struct iommu_group *group = dev->iommu_group; 1066 struct group_device *tmp_device, *device = NULL; 1067 1068 if (!group) 1069 return; 1070 1071 dev_info(dev, "Removing from iommu group %d\n", group->id); 1072 1073 mutex_lock(&group->mutex); 1074 list_for_each_entry(tmp_device, &group->devices, list) { 1075 if (tmp_device->dev == dev) { 1076 device = tmp_device; 1077 list_del(&device->list); 1078 break; 1079 } 1080 } 1081 mutex_unlock(&group->mutex); 1082 1083 if (!device) 1084 return; 1085 1086 sysfs_remove_link(group->devices_kobj, device->name); 1087 sysfs_remove_link(&dev->kobj, "iommu_group"); 1088 1089 trace_remove_device_from_group(group->id, dev); 1090 1091 kfree(device->name); 1092 kfree(device); 1093 dev->iommu_group = NULL; 1094 kobject_put(group->devices_kobj); 1095 } 1096 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 1097 1098 static int iommu_group_device_count(struct iommu_group *group) 1099 { 1100 struct group_device *entry; 1101 int ret = 0; 1102 1103 list_for_each_entry(entry, &group->devices, list) 1104 ret++; 1105 1106 return ret; 1107 } 1108 1109 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 1110 int (*fn)(struct device *, void *)) 1111 { 1112 struct group_device *device; 1113 int ret = 0; 1114 1115 list_for_each_entry(device, &group->devices, list) { 1116 ret = fn(device->dev, data); 1117 if (ret) 1118 break; 1119 } 1120 return ret; 1121 } 1122 1123 /** 1124 * iommu_group_for_each_dev - iterate over each device in the group 1125 * @group: the group 1126 * @data: caller opaque data to be passed to callback function 1127 * @fn: caller supplied callback function 1128 * 1129 * This function is called by group users to iterate over group devices. 1130 * Callers should hold a reference count to the group during callback. 1131 * The group->mutex is held across callbacks, which will block calls to 1132 * iommu_group_add/remove_device. 1133 */ 1134 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 1135 int (*fn)(struct device *, void *)) 1136 { 1137 int ret; 1138 1139 mutex_lock(&group->mutex); 1140 ret = __iommu_group_for_each_dev(group, data, fn); 1141 mutex_unlock(&group->mutex); 1142 1143 return ret; 1144 } 1145 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1146 1147 /** 1148 * iommu_group_get - Return the group for a device and increment reference 1149 * @dev: get the group that this device belongs to 1150 * 1151 * This function is called by iommu drivers and users to get the group 1152 * for the specified device. If found, the group is returned and the group 1153 * reference in incremented, else NULL. 1154 */ 1155 struct iommu_group *iommu_group_get(struct device *dev) 1156 { 1157 struct iommu_group *group = dev->iommu_group; 1158 1159 if (group) 1160 kobject_get(group->devices_kobj); 1161 1162 return group; 1163 } 1164 EXPORT_SYMBOL_GPL(iommu_group_get); 1165 1166 /** 1167 * iommu_group_ref_get - Increment reference on a group 1168 * @group: the group to use, must not be NULL 1169 * 1170 * This function is called by iommu drivers to take additional references on an 1171 * existing group. Returns the given group for convenience. 1172 */ 1173 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1174 { 1175 kobject_get(group->devices_kobj); 1176 return group; 1177 } 1178 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1179 1180 /** 1181 * iommu_group_put - Decrement group reference 1182 * @group: the group to use 1183 * 1184 * This function is called by iommu drivers and users to release the 1185 * iommu group. Once the reference count is zero, the group is released. 1186 */ 1187 void iommu_group_put(struct iommu_group *group) 1188 { 1189 if (group) 1190 kobject_put(group->devices_kobj); 1191 } 1192 EXPORT_SYMBOL_GPL(iommu_group_put); 1193 1194 /** 1195 * iommu_register_device_fault_handler() - Register a device fault handler 1196 * @dev: the device 1197 * @handler: the fault handler 1198 * @data: private data passed as argument to the handler 1199 * 1200 * When an IOMMU fault event is received, this handler gets called with the 1201 * fault event and data as argument. The handler should return 0 on success. If 1202 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1203 * complete the fault by calling iommu_page_response() with one of the following 1204 * response code: 1205 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1206 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1207 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1208 * page faults if possible. 1209 * 1210 * Return 0 if the fault handler was installed successfully, or an error. 1211 */ 1212 int iommu_register_device_fault_handler(struct device *dev, 1213 iommu_dev_fault_handler_t handler, 1214 void *data) 1215 { 1216 struct dev_iommu *param = dev->iommu; 1217 int ret = 0; 1218 1219 if (!param) 1220 return -EINVAL; 1221 1222 mutex_lock(¶m->lock); 1223 /* Only allow one fault handler registered for each device */ 1224 if (param->fault_param) { 1225 ret = -EBUSY; 1226 goto done_unlock; 1227 } 1228 1229 get_device(dev); 1230 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1231 if (!param->fault_param) { 1232 put_device(dev); 1233 ret = -ENOMEM; 1234 goto done_unlock; 1235 } 1236 param->fault_param->handler = handler; 1237 param->fault_param->data = data; 1238 mutex_init(¶m->fault_param->lock); 1239 INIT_LIST_HEAD(¶m->fault_param->faults); 1240 1241 done_unlock: 1242 mutex_unlock(¶m->lock); 1243 1244 return ret; 1245 } 1246 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1247 1248 /** 1249 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1250 * @dev: the device 1251 * 1252 * Remove the device fault handler installed with 1253 * iommu_register_device_fault_handler(). 1254 * 1255 * Return 0 on success, or an error. 1256 */ 1257 int iommu_unregister_device_fault_handler(struct device *dev) 1258 { 1259 struct dev_iommu *param = dev->iommu; 1260 int ret = 0; 1261 1262 if (!param) 1263 return -EINVAL; 1264 1265 mutex_lock(¶m->lock); 1266 1267 if (!param->fault_param) 1268 goto unlock; 1269 1270 /* we cannot unregister handler if there are pending faults */ 1271 if (!list_empty(¶m->fault_param->faults)) { 1272 ret = -EBUSY; 1273 goto unlock; 1274 } 1275 1276 kfree(param->fault_param); 1277 param->fault_param = NULL; 1278 put_device(dev); 1279 unlock: 1280 mutex_unlock(¶m->lock); 1281 1282 return ret; 1283 } 1284 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1285 1286 /** 1287 * iommu_report_device_fault() - Report fault event to device driver 1288 * @dev: the device 1289 * @evt: fault event data 1290 * 1291 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1292 * handler. When this function fails and the fault is recoverable, it is the 1293 * caller's responsibility to complete the fault. 1294 * 1295 * Return 0 on success, or an error. 1296 */ 1297 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1298 { 1299 struct dev_iommu *param = dev->iommu; 1300 struct iommu_fault_event *evt_pending = NULL; 1301 struct iommu_fault_param *fparam; 1302 int ret = 0; 1303 1304 if (!param || !evt) 1305 return -EINVAL; 1306 1307 /* we only report device fault if there is a handler registered */ 1308 mutex_lock(¶m->lock); 1309 fparam = param->fault_param; 1310 if (!fparam || !fparam->handler) { 1311 ret = -EINVAL; 1312 goto done_unlock; 1313 } 1314 1315 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1316 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1317 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1318 GFP_KERNEL); 1319 if (!evt_pending) { 1320 ret = -ENOMEM; 1321 goto done_unlock; 1322 } 1323 mutex_lock(&fparam->lock); 1324 list_add_tail(&evt_pending->list, &fparam->faults); 1325 mutex_unlock(&fparam->lock); 1326 } 1327 1328 ret = fparam->handler(&evt->fault, fparam->data); 1329 if (ret && evt_pending) { 1330 mutex_lock(&fparam->lock); 1331 list_del(&evt_pending->list); 1332 mutex_unlock(&fparam->lock); 1333 kfree(evt_pending); 1334 } 1335 done_unlock: 1336 mutex_unlock(¶m->lock); 1337 return ret; 1338 } 1339 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1340 1341 int iommu_page_response(struct device *dev, 1342 struct iommu_page_response *msg) 1343 { 1344 bool needs_pasid; 1345 int ret = -EINVAL; 1346 struct iommu_fault_event *evt; 1347 struct iommu_fault_page_request *prm; 1348 struct dev_iommu *param = dev->iommu; 1349 const struct iommu_ops *ops = dev_iommu_ops(dev); 1350 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1351 1352 if (!ops->page_response) 1353 return -ENODEV; 1354 1355 if (!param || !param->fault_param) 1356 return -EINVAL; 1357 1358 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1359 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1360 return -EINVAL; 1361 1362 /* Only send response if there is a fault report pending */ 1363 mutex_lock(¶m->fault_param->lock); 1364 if (list_empty(¶m->fault_param->faults)) { 1365 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1366 goto done_unlock; 1367 } 1368 /* 1369 * Check if we have a matching page request pending to respond, 1370 * otherwise return -EINVAL 1371 */ 1372 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1373 prm = &evt->fault.prm; 1374 if (prm->grpid != msg->grpid) 1375 continue; 1376 1377 /* 1378 * If the PASID is required, the corresponding request is 1379 * matched using the group ID, the PASID valid bit and the PASID 1380 * value. Otherwise only the group ID matches request and 1381 * response. 1382 */ 1383 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1384 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1385 continue; 1386 1387 if (!needs_pasid && has_pasid) { 1388 /* No big deal, just clear it. */ 1389 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1390 msg->pasid = 0; 1391 } 1392 1393 ret = ops->page_response(dev, evt, msg); 1394 list_del(&evt->list); 1395 kfree(evt); 1396 break; 1397 } 1398 1399 done_unlock: 1400 mutex_unlock(¶m->fault_param->lock); 1401 return ret; 1402 } 1403 EXPORT_SYMBOL_GPL(iommu_page_response); 1404 1405 /** 1406 * iommu_group_id - Return ID for a group 1407 * @group: the group to ID 1408 * 1409 * Return the unique ID for the group matching the sysfs group number. 1410 */ 1411 int iommu_group_id(struct iommu_group *group) 1412 { 1413 return group->id; 1414 } 1415 EXPORT_SYMBOL_GPL(iommu_group_id); 1416 1417 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1418 unsigned long *devfns); 1419 1420 /* 1421 * To consider a PCI device isolated, we require ACS to support Source 1422 * Validation, Request Redirection, Completer Redirection, and Upstream 1423 * Forwarding. This effectively means that devices cannot spoof their 1424 * requester ID, requests and completions cannot be redirected, and all 1425 * transactions are forwarded upstream, even as it passes through a 1426 * bridge where the target device is downstream. 1427 */ 1428 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1429 1430 /* 1431 * For multifunction devices which are not isolated from each other, find 1432 * all the other non-isolated functions and look for existing groups. For 1433 * each function, we also need to look for aliases to or from other devices 1434 * that may already have a group. 1435 */ 1436 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1437 unsigned long *devfns) 1438 { 1439 struct pci_dev *tmp = NULL; 1440 struct iommu_group *group; 1441 1442 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1443 return NULL; 1444 1445 for_each_pci_dev(tmp) { 1446 if (tmp == pdev || tmp->bus != pdev->bus || 1447 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1448 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1449 continue; 1450 1451 group = get_pci_alias_group(tmp, devfns); 1452 if (group) { 1453 pci_dev_put(tmp); 1454 return group; 1455 } 1456 } 1457 1458 return NULL; 1459 } 1460 1461 /* 1462 * Look for aliases to or from the given device for existing groups. DMA 1463 * aliases are only supported on the same bus, therefore the search 1464 * space is quite small (especially since we're really only looking at pcie 1465 * device, and therefore only expect multiple slots on the root complex or 1466 * downstream switch ports). It's conceivable though that a pair of 1467 * multifunction devices could have aliases between them that would cause a 1468 * loop. To prevent this, we use a bitmap to track where we've been. 1469 */ 1470 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1471 unsigned long *devfns) 1472 { 1473 struct pci_dev *tmp = NULL; 1474 struct iommu_group *group; 1475 1476 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1477 return NULL; 1478 1479 group = iommu_group_get(&pdev->dev); 1480 if (group) 1481 return group; 1482 1483 for_each_pci_dev(tmp) { 1484 if (tmp == pdev || tmp->bus != pdev->bus) 1485 continue; 1486 1487 /* We alias them or they alias us */ 1488 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1489 group = get_pci_alias_group(tmp, devfns); 1490 if (group) { 1491 pci_dev_put(tmp); 1492 return group; 1493 } 1494 1495 group = get_pci_function_alias_group(tmp, devfns); 1496 if (group) { 1497 pci_dev_put(tmp); 1498 return group; 1499 } 1500 } 1501 } 1502 1503 return NULL; 1504 } 1505 1506 struct group_for_pci_data { 1507 struct pci_dev *pdev; 1508 struct iommu_group *group; 1509 }; 1510 1511 /* 1512 * DMA alias iterator callback, return the last seen device. Stop and return 1513 * the IOMMU group if we find one along the way. 1514 */ 1515 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1516 { 1517 struct group_for_pci_data *data = opaque; 1518 1519 data->pdev = pdev; 1520 data->group = iommu_group_get(&pdev->dev); 1521 1522 return data->group != NULL; 1523 } 1524 1525 /* 1526 * Generic device_group call-back function. It just allocates one 1527 * iommu-group per device. 1528 */ 1529 struct iommu_group *generic_device_group(struct device *dev) 1530 { 1531 return iommu_group_alloc(); 1532 } 1533 EXPORT_SYMBOL_GPL(generic_device_group); 1534 1535 /* 1536 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1537 * to find or create an IOMMU group for a device. 1538 */ 1539 struct iommu_group *pci_device_group(struct device *dev) 1540 { 1541 struct pci_dev *pdev = to_pci_dev(dev); 1542 struct group_for_pci_data data; 1543 struct pci_bus *bus; 1544 struct iommu_group *group = NULL; 1545 u64 devfns[4] = { 0 }; 1546 1547 if (WARN_ON(!dev_is_pci(dev))) 1548 return ERR_PTR(-EINVAL); 1549 1550 /* 1551 * Find the upstream DMA alias for the device. A device must not 1552 * be aliased due to topology in order to have its own IOMMU group. 1553 * If we find an alias along the way that already belongs to a 1554 * group, use it. 1555 */ 1556 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1557 return data.group; 1558 1559 pdev = data.pdev; 1560 1561 /* 1562 * Continue upstream from the point of minimum IOMMU granularity 1563 * due to aliases to the point where devices are protected from 1564 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1565 * group, use it. 1566 */ 1567 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1568 if (!bus->self) 1569 continue; 1570 1571 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1572 break; 1573 1574 pdev = bus->self; 1575 1576 group = iommu_group_get(&pdev->dev); 1577 if (group) 1578 return group; 1579 } 1580 1581 /* 1582 * Look for existing groups on device aliases. If we alias another 1583 * device or another device aliases us, use the same group. 1584 */ 1585 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1586 if (group) 1587 return group; 1588 1589 /* 1590 * Look for existing groups on non-isolated functions on the same 1591 * slot and aliases of those funcions, if any. No need to clear 1592 * the search bitmap, the tested devfns are still valid. 1593 */ 1594 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1595 if (group) 1596 return group; 1597 1598 /* No shared group found, allocate new */ 1599 return iommu_group_alloc(); 1600 } 1601 EXPORT_SYMBOL_GPL(pci_device_group); 1602 1603 /* Get the IOMMU group for device on fsl-mc bus */ 1604 struct iommu_group *fsl_mc_device_group(struct device *dev) 1605 { 1606 struct device *cont_dev = fsl_mc_cont_dev(dev); 1607 struct iommu_group *group; 1608 1609 group = iommu_group_get(cont_dev); 1610 if (!group) 1611 group = iommu_group_alloc(); 1612 return group; 1613 } 1614 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1615 1616 static int iommu_get_def_domain_type(struct device *dev) 1617 { 1618 const struct iommu_ops *ops = dev_iommu_ops(dev); 1619 1620 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 1621 return IOMMU_DOMAIN_DMA; 1622 1623 if (ops->def_domain_type) 1624 return ops->def_domain_type(dev); 1625 1626 return 0; 1627 } 1628 1629 static int iommu_group_alloc_default_domain(struct bus_type *bus, 1630 struct iommu_group *group, 1631 unsigned int type) 1632 { 1633 struct iommu_domain *dom; 1634 1635 dom = __iommu_domain_alloc(bus, type); 1636 if (!dom && type != IOMMU_DOMAIN_DMA) { 1637 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA); 1638 if (dom) 1639 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1640 type, group->name); 1641 } 1642 1643 if (!dom) 1644 return -ENOMEM; 1645 1646 group->default_domain = dom; 1647 if (!group->domain) 1648 group->domain = dom; 1649 return 0; 1650 } 1651 1652 static int iommu_alloc_default_domain(struct iommu_group *group, 1653 struct device *dev) 1654 { 1655 unsigned int type; 1656 1657 if (group->default_domain) 1658 return 0; 1659 1660 type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type; 1661 1662 return iommu_group_alloc_default_domain(dev->bus, group, type); 1663 } 1664 1665 /** 1666 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1667 * @dev: target device 1668 * 1669 * This function is intended to be called by IOMMU drivers and extended to 1670 * support common, bus-defined algorithms when determining or creating the 1671 * IOMMU group for a device. On success, the caller will hold a reference 1672 * to the returned IOMMU group, which will already include the provided 1673 * device. The reference should be released with iommu_group_put(). 1674 */ 1675 static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1676 { 1677 const struct iommu_ops *ops = dev_iommu_ops(dev); 1678 struct iommu_group *group; 1679 int ret; 1680 1681 group = iommu_group_get(dev); 1682 if (group) 1683 return group; 1684 1685 group = ops->device_group(dev); 1686 if (WARN_ON_ONCE(group == NULL)) 1687 return ERR_PTR(-EINVAL); 1688 1689 if (IS_ERR(group)) 1690 return group; 1691 1692 ret = iommu_group_add_device(group, dev); 1693 if (ret) 1694 goto out_put_group; 1695 1696 return group; 1697 1698 out_put_group: 1699 iommu_group_put(group); 1700 1701 return ERR_PTR(ret); 1702 } 1703 1704 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1705 { 1706 return group->default_domain; 1707 } 1708 1709 static int probe_iommu_group(struct device *dev, void *data) 1710 { 1711 struct list_head *group_list = data; 1712 struct iommu_group *group; 1713 int ret; 1714 1715 /* Device is probed already if in a group */ 1716 group = iommu_group_get(dev); 1717 if (group) { 1718 iommu_group_put(group); 1719 return 0; 1720 } 1721 1722 ret = __iommu_probe_device(dev, group_list); 1723 if (ret == -ENODEV) 1724 ret = 0; 1725 1726 return ret; 1727 } 1728 1729 static int iommu_bus_notifier(struct notifier_block *nb, 1730 unsigned long action, void *data) 1731 { 1732 struct device *dev = data; 1733 1734 if (action == BUS_NOTIFY_ADD_DEVICE) { 1735 int ret; 1736 1737 ret = iommu_probe_device(dev); 1738 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1739 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1740 iommu_release_device(dev); 1741 return NOTIFY_OK; 1742 } 1743 1744 return 0; 1745 } 1746 1747 struct __group_domain_type { 1748 struct device *dev; 1749 unsigned int type; 1750 }; 1751 1752 static int probe_get_default_domain_type(struct device *dev, void *data) 1753 { 1754 struct __group_domain_type *gtype = data; 1755 unsigned int type = iommu_get_def_domain_type(dev); 1756 1757 if (type) { 1758 if (gtype->type && gtype->type != type) { 1759 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", 1760 iommu_domain_type_str(type), 1761 dev_name(gtype->dev), 1762 iommu_domain_type_str(gtype->type)); 1763 gtype->type = 0; 1764 } 1765 1766 if (!gtype->dev) { 1767 gtype->dev = dev; 1768 gtype->type = type; 1769 } 1770 } 1771 1772 return 0; 1773 } 1774 1775 static void probe_alloc_default_domain(struct bus_type *bus, 1776 struct iommu_group *group) 1777 { 1778 struct __group_domain_type gtype; 1779 1780 memset(>ype, 0, sizeof(gtype)); 1781 1782 /* Ask for default domain requirements of all devices in the group */ 1783 __iommu_group_for_each_dev(group, >ype, 1784 probe_get_default_domain_type); 1785 1786 if (!gtype.type) 1787 gtype.type = iommu_def_domain_type; 1788 1789 iommu_group_alloc_default_domain(bus, group, gtype.type); 1790 1791 } 1792 1793 static int __iommu_group_dma_first_attach(struct iommu_group *group) 1794 { 1795 return __iommu_group_for_each_dev(group, group->default_domain, 1796 iommu_group_do_dma_first_attach); 1797 } 1798 1799 static int iommu_group_do_probe_finalize(struct device *dev, void *data) 1800 { 1801 const struct iommu_ops *ops = dev_iommu_ops(dev); 1802 1803 if (ops->probe_finalize) 1804 ops->probe_finalize(dev); 1805 1806 return 0; 1807 } 1808 1809 static void __iommu_group_dma_finalize(struct iommu_group *group) 1810 { 1811 __iommu_group_for_each_dev(group, group->default_domain, 1812 iommu_group_do_probe_finalize); 1813 } 1814 1815 static int iommu_do_create_direct_mappings(struct device *dev, void *data) 1816 { 1817 struct iommu_group *group = data; 1818 1819 iommu_create_device_direct_mappings(group, dev); 1820 1821 return 0; 1822 } 1823 1824 static int iommu_group_create_direct_mappings(struct iommu_group *group) 1825 { 1826 return __iommu_group_for_each_dev(group, group, 1827 iommu_do_create_direct_mappings); 1828 } 1829 1830 int bus_iommu_probe(struct bus_type *bus) 1831 { 1832 struct iommu_group *group, *next; 1833 LIST_HEAD(group_list); 1834 int ret; 1835 1836 /* 1837 * This code-path does not allocate the default domain when 1838 * creating the iommu group, so do it after the groups are 1839 * created. 1840 */ 1841 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1842 if (ret) 1843 return ret; 1844 1845 list_for_each_entry_safe(group, next, &group_list, entry) { 1846 mutex_lock(&group->mutex); 1847 1848 /* Remove item from the list */ 1849 list_del_init(&group->entry); 1850 1851 /* Try to allocate default domain */ 1852 probe_alloc_default_domain(bus, group); 1853 1854 if (!group->default_domain) { 1855 mutex_unlock(&group->mutex); 1856 continue; 1857 } 1858 1859 iommu_group_create_direct_mappings(group); 1860 1861 ret = __iommu_group_dma_first_attach(group); 1862 1863 mutex_unlock(&group->mutex); 1864 1865 if (ret) 1866 break; 1867 1868 __iommu_group_dma_finalize(group); 1869 } 1870 1871 return ret; 1872 } 1873 1874 bool iommu_present(struct bus_type *bus) 1875 { 1876 return bus->iommu_ops != NULL; 1877 } 1878 EXPORT_SYMBOL_GPL(iommu_present); 1879 1880 /** 1881 * device_iommu_capable() - check for a general IOMMU capability 1882 * @dev: device to which the capability would be relevant, if available 1883 * @cap: IOMMU capability 1884 * 1885 * Return: true if an IOMMU is present and supports the given capability 1886 * for the given device, otherwise false. 1887 */ 1888 bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 1889 { 1890 const struct iommu_ops *ops; 1891 1892 if (!dev->iommu || !dev->iommu->iommu_dev) 1893 return false; 1894 1895 ops = dev_iommu_ops(dev); 1896 if (!ops->capable) 1897 return false; 1898 1899 return ops->capable(dev, cap); 1900 } 1901 EXPORT_SYMBOL_GPL(device_iommu_capable); 1902 1903 /** 1904 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1905 * @domain: iommu domain 1906 * @handler: fault handler 1907 * @token: user data, will be passed back to the fault handler 1908 * 1909 * This function should be used by IOMMU users which want to be notified 1910 * whenever an IOMMU fault happens. 1911 * 1912 * The fault handler itself should return 0 on success, and an appropriate 1913 * error code otherwise. 1914 */ 1915 void iommu_set_fault_handler(struct iommu_domain *domain, 1916 iommu_fault_handler_t handler, 1917 void *token) 1918 { 1919 BUG_ON(!domain); 1920 1921 domain->handler = handler; 1922 domain->handler_token = token; 1923 } 1924 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1925 1926 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 1927 unsigned type) 1928 { 1929 struct iommu_domain *domain; 1930 1931 if (bus == NULL || bus->iommu_ops == NULL) 1932 return NULL; 1933 1934 domain = bus->iommu_ops->domain_alloc(type); 1935 if (!domain) 1936 return NULL; 1937 1938 domain->type = type; 1939 /* Assume all sizes by default; the driver may override this later */ 1940 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1941 if (!domain->ops) 1942 domain->ops = bus->iommu_ops->default_domain_ops; 1943 1944 if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { 1945 iommu_domain_free(domain); 1946 domain = NULL; 1947 } 1948 return domain; 1949 } 1950 1951 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 1952 { 1953 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1954 } 1955 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1956 1957 void iommu_domain_free(struct iommu_domain *domain) 1958 { 1959 if (domain->type == IOMMU_DOMAIN_SVA) 1960 mmdrop(domain->mm); 1961 iommu_put_dma_cookie(domain); 1962 domain->ops->free(domain); 1963 } 1964 EXPORT_SYMBOL_GPL(iommu_domain_free); 1965 1966 /* 1967 * Put the group's domain back to the appropriate core-owned domain - either the 1968 * standard kernel-mode DMA configuration or an all-DMA-blocked domain. 1969 */ 1970 static void __iommu_group_set_core_domain(struct iommu_group *group) 1971 { 1972 struct iommu_domain *new_domain; 1973 int ret; 1974 1975 if (group->owner) 1976 new_domain = group->blocking_domain; 1977 else 1978 new_domain = group->default_domain; 1979 1980 ret = __iommu_group_set_domain(group, new_domain); 1981 WARN(ret, "iommu driver failed to attach the default/blocking domain"); 1982 } 1983 1984 static int __iommu_attach_device(struct iommu_domain *domain, 1985 struct device *dev) 1986 { 1987 int ret; 1988 1989 if (unlikely(domain->ops->attach_dev == NULL)) 1990 return -ENODEV; 1991 1992 ret = domain->ops->attach_dev(domain, dev); 1993 if (ret) 1994 return ret; 1995 dev->iommu->attach_deferred = 0; 1996 trace_attach_device_to_domain(dev); 1997 return 0; 1998 } 1999 2000 /** 2001 * iommu_attach_device - Attach an IOMMU domain to a device 2002 * @domain: IOMMU domain to attach 2003 * @dev: Device that will be attached 2004 * 2005 * Returns 0 on success and error code on failure 2006 * 2007 * Note that EINVAL can be treated as a soft failure, indicating 2008 * that certain configuration of the domain is incompatible with 2009 * the device. In this case attaching a different domain to the 2010 * device may succeed. 2011 */ 2012 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 2013 { 2014 struct iommu_group *group; 2015 int ret; 2016 2017 group = iommu_group_get(dev); 2018 if (!group) 2019 return -ENODEV; 2020 2021 /* 2022 * Lock the group to make sure the device-count doesn't 2023 * change while we are attaching 2024 */ 2025 mutex_lock(&group->mutex); 2026 ret = -EINVAL; 2027 if (iommu_group_device_count(group) != 1) 2028 goto out_unlock; 2029 2030 ret = __iommu_attach_group(domain, group); 2031 2032 out_unlock: 2033 mutex_unlock(&group->mutex); 2034 iommu_group_put(group); 2035 2036 return ret; 2037 } 2038 EXPORT_SYMBOL_GPL(iommu_attach_device); 2039 2040 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2041 { 2042 if (dev->iommu && dev->iommu->attach_deferred) 2043 return __iommu_attach_device(domain, dev); 2044 2045 return 0; 2046 } 2047 2048 static void __iommu_detach_device(struct iommu_domain *domain, 2049 struct device *dev) 2050 { 2051 domain->ops->detach_dev(domain, dev); 2052 trace_detach_device_from_domain(dev); 2053 } 2054 2055 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2056 { 2057 struct iommu_group *group; 2058 2059 group = iommu_group_get(dev); 2060 if (!group) 2061 return; 2062 2063 mutex_lock(&group->mutex); 2064 if (WARN_ON(domain != group->domain) || 2065 WARN_ON(iommu_group_device_count(group) != 1)) 2066 goto out_unlock; 2067 __iommu_group_set_core_domain(group); 2068 2069 out_unlock: 2070 mutex_unlock(&group->mutex); 2071 iommu_group_put(group); 2072 } 2073 EXPORT_SYMBOL_GPL(iommu_detach_device); 2074 2075 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2076 { 2077 struct iommu_domain *domain; 2078 struct iommu_group *group; 2079 2080 group = iommu_group_get(dev); 2081 if (!group) 2082 return NULL; 2083 2084 domain = group->domain; 2085 2086 iommu_group_put(group); 2087 2088 return domain; 2089 } 2090 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2091 2092 /* 2093 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2094 * guarantees that the group and its default domain are valid and correct. 2095 */ 2096 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2097 { 2098 return dev->iommu_group->default_domain; 2099 } 2100 2101 /* 2102 * IOMMU groups are really the natural working unit of the IOMMU, but 2103 * the IOMMU API works on domains and devices. Bridge that gap by 2104 * iterating over the devices in a group. Ideally we'd have a single 2105 * device which represents the requestor ID of the group, but we also 2106 * allow IOMMU drivers to create policy defined minimum sets, where 2107 * the physical hardware may be able to distiguish members, but we 2108 * wish to group them at a higher level (ex. untrusted multi-function 2109 * PCI devices). Thus we attach each device. 2110 */ 2111 static int iommu_group_do_attach_device(struct device *dev, void *data) 2112 { 2113 struct iommu_domain *domain = data; 2114 2115 return __iommu_attach_device(domain, dev); 2116 } 2117 2118 static int __iommu_attach_group(struct iommu_domain *domain, 2119 struct iommu_group *group) 2120 { 2121 int ret; 2122 2123 if (group->domain && group->domain != group->default_domain && 2124 group->domain != group->blocking_domain) 2125 return -EBUSY; 2126 2127 ret = __iommu_group_for_each_dev(group, domain, 2128 iommu_group_do_attach_device); 2129 if (ret == 0) 2130 group->domain = domain; 2131 2132 return ret; 2133 } 2134 2135 /** 2136 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group 2137 * @domain: IOMMU domain to attach 2138 * @group: IOMMU group that will be attached 2139 * 2140 * Returns 0 on success and error code on failure 2141 * 2142 * Note that EINVAL can be treated as a soft failure, indicating 2143 * that certain configuration of the domain is incompatible with 2144 * the group. In this case attaching a different domain to the 2145 * group may succeed. 2146 */ 2147 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2148 { 2149 int ret; 2150 2151 mutex_lock(&group->mutex); 2152 ret = __iommu_attach_group(domain, group); 2153 mutex_unlock(&group->mutex); 2154 2155 return ret; 2156 } 2157 EXPORT_SYMBOL_GPL(iommu_attach_group); 2158 2159 static int iommu_group_do_detach_device(struct device *dev, void *data) 2160 { 2161 struct iommu_domain *domain = data; 2162 2163 __iommu_detach_device(domain, dev); 2164 2165 return 0; 2166 } 2167 2168 static int iommu_group_do_set_platform_dma(struct device *dev, void *data) 2169 { 2170 const struct iommu_ops *ops = dev_iommu_ops(dev); 2171 2172 if (!WARN_ON(!ops->set_platform_dma_ops)) 2173 ops->set_platform_dma_ops(dev); 2174 2175 return 0; 2176 } 2177 2178 static int __iommu_group_set_domain(struct iommu_group *group, 2179 struct iommu_domain *new_domain) 2180 { 2181 int ret; 2182 2183 if (group->domain == new_domain) 2184 return 0; 2185 2186 /* 2187 * New drivers should support default domains and so the detach_dev() op 2188 * will never be called. Otherwise the NULL domain represents some 2189 * platform specific behavior. 2190 */ 2191 if (!new_domain) { 2192 struct group_device *grp_dev; 2193 2194 grp_dev = list_first_entry(&group->devices, 2195 struct group_device, list); 2196 2197 if (dev_iommu_ops(grp_dev->dev)->set_platform_dma_ops) 2198 __iommu_group_for_each_dev(group, NULL, 2199 iommu_group_do_set_platform_dma); 2200 else if (group->domain->ops->detach_dev) 2201 __iommu_group_for_each_dev(group, group->domain, 2202 iommu_group_do_detach_device); 2203 else 2204 WARN_ON_ONCE(1); 2205 2206 group->domain = NULL; 2207 return 0; 2208 } 2209 2210 /* 2211 * Changing the domain is done by calling attach_dev() on the new 2212 * domain. This switch does not have to be atomic and DMA can be 2213 * discarded during the transition. DMA must only be able to access 2214 * either new_domain or group->domain, never something else. 2215 * 2216 * Note that this is called in error unwind paths, attaching to a 2217 * domain that has already been attached cannot fail. 2218 */ 2219 ret = __iommu_group_for_each_dev(group, new_domain, 2220 iommu_group_do_attach_device); 2221 if (ret) 2222 return ret; 2223 group->domain = new_domain; 2224 return 0; 2225 } 2226 2227 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2228 { 2229 mutex_lock(&group->mutex); 2230 __iommu_group_set_core_domain(group); 2231 mutex_unlock(&group->mutex); 2232 } 2233 EXPORT_SYMBOL_GPL(iommu_detach_group); 2234 2235 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2236 { 2237 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2238 return iova; 2239 2240 if (domain->type == IOMMU_DOMAIN_BLOCKED) 2241 return 0; 2242 2243 return domain->ops->iova_to_phys(domain, iova); 2244 } 2245 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2246 2247 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, 2248 phys_addr_t paddr, size_t size, size_t *count) 2249 { 2250 unsigned int pgsize_idx, pgsize_idx_next; 2251 unsigned long pgsizes; 2252 size_t offset, pgsize, pgsize_next; 2253 unsigned long addr_merge = paddr | iova; 2254 2255 /* Page sizes supported by the hardware and small enough for @size */ 2256 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); 2257 2258 /* Constrain the page sizes further based on the maximum alignment */ 2259 if (likely(addr_merge)) 2260 pgsizes &= GENMASK(__ffs(addr_merge), 0); 2261 2262 /* Make sure we have at least one suitable page size */ 2263 BUG_ON(!pgsizes); 2264 2265 /* Pick the biggest page size remaining */ 2266 pgsize_idx = __fls(pgsizes); 2267 pgsize = BIT(pgsize_idx); 2268 if (!count) 2269 return pgsize; 2270 2271 /* Find the next biggest support page size, if it exists */ 2272 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); 2273 if (!pgsizes) 2274 goto out_set_count; 2275 2276 pgsize_idx_next = __ffs(pgsizes); 2277 pgsize_next = BIT(pgsize_idx_next); 2278 2279 /* 2280 * There's no point trying a bigger page size unless the virtual 2281 * and physical addresses are similarly offset within the larger page. 2282 */ 2283 if ((iova ^ paddr) & (pgsize_next - 1)) 2284 goto out_set_count; 2285 2286 /* Calculate the offset to the next page size alignment boundary */ 2287 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); 2288 2289 /* 2290 * If size is big enough to accommodate the larger page, reduce 2291 * the number of smaller pages. 2292 */ 2293 if (offset + pgsize_next <= size) 2294 size = offset; 2295 2296 out_set_count: 2297 *count = size >> pgsize_idx; 2298 return pgsize; 2299 } 2300 2301 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, 2302 phys_addr_t paddr, size_t size, int prot, 2303 gfp_t gfp, size_t *mapped) 2304 { 2305 const struct iommu_domain_ops *ops = domain->ops; 2306 size_t pgsize, count; 2307 int ret; 2308 2309 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); 2310 2311 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", 2312 iova, &paddr, pgsize, count); 2313 2314 if (ops->map_pages) { 2315 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, 2316 gfp, mapped); 2317 } else { 2318 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2319 *mapped = ret ? 0 : pgsize; 2320 } 2321 2322 return ret; 2323 } 2324 2325 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2326 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2327 { 2328 const struct iommu_domain_ops *ops = domain->ops; 2329 unsigned long orig_iova = iova; 2330 unsigned int min_pagesz; 2331 size_t orig_size = size; 2332 phys_addr_t orig_paddr = paddr; 2333 int ret = 0; 2334 2335 if (unlikely(!(ops->map || ops->map_pages) || 2336 domain->pgsize_bitmap == 0UL)) 2337 return -ENODEV; 2338 2339 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2340 return -EINVAL; 2341 2342 /* find out the minimum page size supported */ 2343 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2344 2345 /* 2346 * both the virtual address and the physical one, as well as 2347 * the size of the mapping, must be aligned (at least) to the 2348 * size of the smallest page supported by the hardware 2349 */ 2350 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2351 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2352 iova, &paddr, size, min_pagesz); 2353 return -EINVAL; 2354 } 2355 2356 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2357 2358 while (size) { 2359 size_t mapped = 0; 2360 2361 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, 2362 &mapped); 2363 /* 2364 * Some pages may have been mapped, even if an error occurred, 2365 * so we should account for those so they can be unmapped. 2366 */ 2367 size -= mapped; 2368 2369 if (ret) 2370 break; 2371 2372 iova += mapped; 2373 paddr += mapped; 2374 } 2375 2376 /* unroll mapping in case something went wrong */ 2377 if (ret) 2378 iommu_unmap(domain, orig_iova, orig_size - size); 2379 else 2380 trace_map(orig_iova, orig_paddr, orig_size); 2381 2382 return ret; 2383 } 2384 2385 static int _iommu_map(struct iommu_domain *domain, unsigned long iova, 2386 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2387 { 2388 const struct iommu_domain_ops *ops = domain->ops; 2389 int ret; 2390 2391 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2392 if (ret == 0 && ops->iotlb_sync_map) 2393 ops->iotlb_sync_map(domain, iova, size); 2394 2395 return ret; 2396 } 2397 2398 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2399 phys_addr_t paddr, size_t size, int prot) 2400 { 2401 might_sleep(); 2402 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); 2403 } 2404 EXPORT_SYMBOL_GPL(iommu_map); 2405 2406 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, 2407 phys_addr_t paddr, size_t size, int prot) 2408 { 2409 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); 2410 } 2411 EXPORT_SYMBOL_GPL(iommu_map_atomic); 2412 2413 static size_t __iommu_unmap_pages(struct iommu_domain *domain, 2414 unsigned long iova, size_t size, 2415 struct iommu_iotlb_gather *iotlb_gather) 2416 { 2417 const struct iommu_domain_ops *ops = domain->ops; 2418 size_t pgsize, count; 2419 2420 pgsize = iommu_pgsize(domain, iova, iova, size, &count); 2421 return ops->unmap_pages ? 2422 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : 2423 ops->unmap(domain, iova, pgsize, iotlb_gather); 2424 } 2425 2426 static size_t __iommu_unmap(struct iommu_domain *domain, 2427 unsigned long iova, size_t size, 2428 struct iommu_iotlb_gather *iotlb_gather) 2429 { 2430 const struct iommu_domain_ops *ops = domain->ops; 2431 size_t unmapped_page, unmapped = 0; 2432 unsigned long orig_iova = iova; 2433 unsigned int min_pagesz; 2434 2435 if (unlikely(!(ops->unmap || ops->unmap_pages) || 2436 domain->pgsize_bitmap == 0UL)) 2437 return 0; 2438 2439 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2440 return 0; 2441 2442 /* find out the minimum page size supported */ 2443 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2444 2445 /* 2446 * The virtual address, as well as the size of the mapping, must be 2447 * aligned (at least) to the size of the smallest page supported 2448 * by the hardware 2449 */ 2450 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2451 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2452 iova, size, min_pagesz); 2453 return 0; 2454 } 2455 2456 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2457 2458 /* 2459 * Keep iterating until we either unmap 'size' bytes (or more) 2460 * or we hit an area that isn't mapped. 2461 */ 2462 while (unmapped < size) { 2463 unmapped_page = __iommu_unmap_pages(domain, iova, 2464 size - unmapped, 2465 iotlb_gather); 2466 if (!unmapped_page) 2467 break; 2468 2469 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2470 iova, unmapped_page); 2471 2472 iova += unmapped_page; 2473 unmapped += unmapped_page; 2474 } 2475 2476 trace_unmap(orig_iova, size, unmapped); 2477 return unmapped; 2478 } 2479 2480 size_t iommu_unmap(struct iommu_domain *domain, 2481 unsigned long iova, size_t size) 2482 { 2483 struct iommu_iotlb_gather iotlb_gather; 2484 size_t ret; 2485 2486 iommu_iotlb_gather_init(&iotlb_gather); 2487 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2488 iommu_iotlb_sync(domain, &iotlb_gather); 2489 2490 return ret; 2491 } 2492 EXPORT_SYMBOL_GPL(iommu_unmap); 2493 2494 size_t iommu_unmap_fast(struct iommu_domain *domain, 2495 unsigned long iova, size_t size, 2496 struct iommu_iotlb_gather *iotlb_gather) 2497 { 2498 return __iommu_unmap(domain, iova, size, iotlb_gather); 2499 } 2500 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2501 2502 static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2503 struct scatterlist *sg, unsigned int nents, int prot, 2504 gfp_t gfp) 2505 { 2506 const struct iommu_domain_ops *ops = domain->ops; 2507 size_t len = 0, mapped = 0; 2508 phys_addr_t start; 2509 unsigned int i = 0; 2510 int ret; 2511 2512 while (i <= nents) { 2513 phys_addr_t s_phys = sg_phys(sg); 2514 2515 if (len && s_phys != start + len) { 2516 ret = __iommu_map(domain, iova + mapped, start, 2517 len, prot, gfp); 2518 2519 if (ret) 2520 goto out_err; 2521 2522 mapped += len; 2523 len = 0; 2524 } 2525 2526 if (sg_is_dma_bus_address(sg)) 2527 goto next; 2528 2529 if (len) { 2530 len += sg->length; 2531 } else { 2532 len = sg->length; 2533 start = s_phys; 2534 } 2535 2536 next: 2537 if (++i < nents) 2538 sg = sg_next(sg); 2539 } 2540 2541 if (ops->iotlb_sync_map) 2542 ops->iotlb_sync_map(domain, iova, mapped); 2543 return mapped; 2544 2545 out_err: 2546 /* undo mappings already done */ 2547 iommu_unmap(domain, iova, mapped); 2548 2549 return ret; 2550 } 2551 2552 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2553 struct scatterlist *sg, unsigned int nents, int prot) 2554 { 2555 might_sleep(); 2556 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); 2557 } 2558 EXPORT_SYMBOL_GPL(iommu_map_sg); 2559 2560 ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, 2561 struct scatterlist *sg, unsigned int nents, int prot) 2562 { 2563 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); 2564 } 2565 2566 /** 2567 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2568 * @domain: the iommu domain where the fault has happened 2569 * @dev: the device where the fault has happened 2570 * @iova: the faulting address 2571 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2572 * 2573 * This function should be called by the low-level IOMMU implementations 2574 * whenever IOMMU faults happen, to allow high-level users, that are 2575 * interested in such events, to know about them. 2576 * 2577 * This event may be useful for several possible use cases: 2578 * - mere logging of the event 2579 * - dynamic TLB/PTE loading 2580 * - if restarting of the faulting device is required 2581 * 2582 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2583 * PTE/TLB loading will one day be supported, implementations will be able 2584 * to tell whether it succeeded or not according to this return value). 2585 * 2586 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2587 * (though fault handlers can also return -ENOSYS, in case they want to 2588 * elicit the default behavior of the IOMMU drivers). 2589 */ 2590 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2591 unsigned long iova, int flags) 2592 { 2593 int ret = -ENOSYS; 2594 2595 /* 2596 * if upper layers showed interest and installed a fault handler, 2597 * invoke it. 2598 */ 2599 if (domain->handler) 2600 ret = domain->handler(domain, dev, iova, flags, 2601 domain->handler_token); 2602 2603 trace_io_page_fault(dev, iova, flags); 2604 return ret; 2605 } 2606 EXPORT_SYMBOL_GPL(report_iommu_fault); 2607 2608 static int __init iommu_init(void) 2609 { 2610 iommu_group_kset = kset_create_and_add("iommu_groups", 2611 NULL, kernel_kobj); 2612 BUG_ON(!iommu_group_kset); 2613 2614 iommu_debugfs_setup(); 2615 2616 return 0; 2617 } 2618 core_initcall(iommu_init); 2619 2620 int iommu_enable_nesting(struct iommu_domain *domain) 2621 { 2622 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2623 return -EINVAL; 2624 if (!domain->ops->enable_nesting) 2625 return -EINVAL; 2626 return domain->ops->enable_nesting(domain); 2627 } 2628 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2629 2630 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2631 unsigned long quirk) 2632 { 2633 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2634 return -EINVAL; 2635 if (!domain->ops->set_pgtable_quirks) 2636 return -EINVAL; 2637 return domain->ops->set_pgtable_quirks(domain, quirk); 2638 } 2639 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2640 2641 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2642 { 2643 const struct iommu_ops *ops = dev_iommu_ops(dev); 2644 2645 if (ops->get_resv_regions) 2646 ops->get_resv_regions(dev, list); 2647 } 2648 2649 /** 2650 * iommu_put_resv_regions - release resered regions 2651 * @dev: device for which to free reserved regions 2652 * @list: reserved region list for device 2653 * 2654 * This releases a reserved region list acquired by iommu_get_resv_regions(). 2655 */ 2656 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2657 { 2658 struct iommu_resv_region *entry, *next; 2659 2660 list_for_each_entry_safe(entry, next, list, list) { 2661 if (entry->free) 2662 entry->free(dev, entry); 2663 else 2664 kfree(entry); 2665 } 2666 } 2667 EXPORT_SYMBOL(iommu_put_resv_regions); 2668 2669 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2670 size_t length, int prot, 2671 enum iommu_resv_type type, 2672 gfp_t gfp) 2673 { 2674 struct iommu_resv_region *region; 2675 2676 region = kzalloc(sizeof(*region), gfp); 2677 if (!region) 2678 return NULL; 2679 2680 INIT_LIST_HEAD(®ion->list); 2681 region->start = start; 2682 region->length = length; 2683 region->prot = prot; 2684 region->type = type; 2685 return region; 2686 } 2687 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2688 2689 void iommu_set_default_passthrough(bool cmd_line) 2690 { 2691 if (cmd_line) 2692 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2693 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2694 } 2695 2696 void iommu_set_default_translated(bool cmd_line) 2697 { 2698 if (cmd_line) 2699 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2700 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2701 } 2702 2703 bool iommu_default_passthrough(void) 2704 { 2705 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2706 } 2707 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2708 2709 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2710 { 2711 const struct iommu_ops *ops = NULL; 2712 struct iommu_device *iommu; 2713 2714 spin_lock(&iommu_device_lock); 2715 list_for_each_entry(iommu, &iommu_device_list, list) 2716 if (iommu->fwnode == fwnode) { 2717 ops = iommu->ops; 2718 break; 2719 } 2720 spin_unlock(&iommu_device_lock); 2721 return ops; 2722 } 2723 2724 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2725 const struct iommu_ops *ops) 2726 { 2727 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2728 2729 if (fwspec) 2730 return ops == fwspec->ops ? 0 : -EINVAL; 2731 2732 if (!dev_iommu_get(dev)) 2733 return -ENOMEM; 2734 2735 /* Preallocate for the overwhelmingly common case of 1 ID */ 2736 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2737 if (!fwspec) 2738 return -ENOMEM; 2739 2740 of_node_get(to_of_node(iommu_fwnode)); 2741 fwspec->iommu_fwnode = iommu_fwnode; 2742 fwspec->ops = ops; 2743 dev_iommu_fwspec_set(dev, fwspec); 2744 return 0; 2745 } 2746 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2747 2748 void iommu_fwspec_free(struct device *dev) 2749 { 2750 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2751 2752 if (fwspec) { 2753 fwnode_handle_put(fwspec->iommu_fwnode); 2754 kfree(fwspec); 2755 dev_iommu_fwspec_set(dev, NULL); 2756 } 2757 } 2758 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2759 2760 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2761 { 2762 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2763 int i, new_num; 2764 2765 if (!fwspec) 2766 return -EINVAL; 2767 2768 new_num = fwspec->num_ids + num_ids; 2769 if (new_num > 1) { 2770 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2771 GFP_KERNEL); 2772 if (!fwspec) 2773 return -ENOMEM; 2774 2775 dev_iommu_fwspec_set(dev, fwspec); 2776 } 2777 2778 for (i = 0; i < num_ids; i++) 2779 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2780 2781 fwspec->num_ids = new_num; 2782 return 0; 2783 } 2784 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2785 2786 /* 2787 * Per device IOMMU features. 2788 */ 2789 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2790 { 2791 if (dev->iommu && dev->iommu->iommu_dev) { 2792 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2793 2794 if (ops->dev_enable_feat) 2795 return ops->dev_enable_feat(dev, feat); 2796 } 2797 2798 return -ENODEV; 2799 } 2800 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2801 2802 /* 2803 * The device drivers should do the necessary cleanups before calling this. 2804 */ 2805 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2806 { 2807 if (dev->iommu && dev->iommu->iommu_dev) { 2808 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2809 2810 if (ops->dev_disable_feat) 2811 return ops->dev_disable_feat(dev, feat); 2812 } 2813 2814 return -EBUSY; 2815 } 2816 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2817 2818 /* 2819 * Changes the default domain of an iommu group that has *only* one device 2820 * 2821 * @group: The group for which the default domain should be changed 2822 * @prev_dev: The device in the group (this is used to make sure that the device 2823 * hasn't changed after the caller has called this function) 2824 * @type: The type of the new default domain that gets associated with the group 2825 * 2826 * Returns 0 on success and error code on failure 2827 * 2828 * Note: 2829 * 1. Presently, this function is called only when user requests to change the 2830 * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type 2831 * Please take a closer look if intended to use for other purposes. 2832 */ 2833 static int iommu_change_dev_def_domain(struct iommu_group *group, 2834 struct device *prev_dev, int type) 2835 { 2836 struct iommu_domain *prev_dom; 2837 struct group_device *grp_dev; 2838 int ret, dev_def_dom; 2839 struct device *dev; 2840 2841 mutex_lock(&group->mutex); 2842 2843 if (group->default_domain != group->domain) { 2844 dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n"); 2845 ret = -EBUSY; 2846 goto out; 2847 } 2848 2849 /* 2850 * iommu group wasn't locked while acquiring device lock in 2851 * iommu_group_store_type(). So, make sure that the device count hasn't 2852 * changed while acquiring device lock. 2853 * 2854 * Changing default domain of an iommu group with two or more devices 2855 * isn't supported because there could be a potential deadlock. Consider 2856 * the following scenario. T1 is trying to acquire device locks of all 2857 * the devices in the group and before it could acquire all of them, 2858 * there could be another thread T2 (from different sub-system and use 2859 * case) that has already acquired some of the device locks and might be 2860 * waiting for T1 to release other device locks. 2861 */ 2862 if (iommu_group_device_count(group) != 1) { 2863 dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n"); 2864 ret = -EINVAL; 2865 goto out; 2866 } 2867 2868 /* Since group has only one device */ 2869 grp_dev = list_first_entry(&group->devices, struct group_device, list); 2870 dev = grp_dev->dev; 2871 2872 if (prev_dev != dev) { 2873 dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n"); 2874 ret = -EBUSY; 2875 goto out; 2876 } 2877 2878 prev_dom = group->default_domain; 2879 if (!prev_dom) { 2880 ret = -EINVAL; 2881 goto out; 2882 } 2883 2884 dev_def_dom = iommu_get_def_domain_type(dev); 2885 if (!type) { 2886 /* 2887 * If the user hasn't requested any specific type of domain and 2888 * if the device supports both the domains, then default to the 2889 * domain the device was booted with 2890 */ 2891 type = dev_def_dom ? : iommu_def_domain_type; 2892 } else if (dev_def_dom && type != dev_def_dom) { 2893 dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n", 2894 iommu_domain_type_str(type)); 2895 ret = -EINVAL; 2896 goto out; 2897 } 2898 2899 /* 2900 * Switch to a new domain only if the requested domain type is different 2901 * from the existing default domain type 2902 */ 2903 if (prev_dom->type == type) { 2904 ret = 0; 2905 goto out; 2906 } 2907 2908 /* We can bring up a flush queue without tearing down the domain */ 2909 if (type == IOMMU_DOMAIN_DMA_FQ && prev_dom->type == IOMMU_DOMAIN_DMA) { 2910 ret = iommu_dma_init_fq(prev_dom); 2911 if (!ret) 2912 prev_dom->type = IOMMU_DOMAIN_DMA_FQ; 2913 goto out; 2914 } 2915 2916 /* Sets group->default_domain to the newly allocated domain */ 2917 ret = iommu_group_alloc_default_domain(dev->bus, group, type); 2918 if (ret) 2919 goto out; 2920 2921 ret = iommu_create_device_direct_mappings(group, dev); 2922 if (ret) 2923 goto free_new_domain; 2924 2925 ret = __iommu_attach_device(group->default_domain, dev); 2926 if (ret) 2927 goto free_new_domain; 2928 2929 group->domain = group->default_domain; 2930 2931 /* 2932 * Release the mutex here because ops->probe_finalize() call-back of 2933 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 2934 * in-turn might call back into IOMMU core code, where it tries to take 2935 * group->mutex, resulting in a deadlock. 2936 */ 2937 mutex_unlock(&group->mutex); 2938 2939 /* Make sure dma_ops is appropriatley set */ 2940 iommu_group_do_probe_finalize(dev, group->default_domain); 2941 iommu_domain_free(prev_dom); 2942 return 0; 2943 2944 free_new_domain: 2945 iommu_domain_free(group->default_domain); 2946 group->default_domain = prev_dom; 2947 group->domain = prev_dom; 2948 2949 out: 2950 mutex_unlock(&group->mutex); 2951 2952 return ret; 2953 } 2954 2955 /* 2956 * Changing the default domain through sysfs requires the users to unbind the 2957 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ 2958 * transition. Return failure if this isn't met. 2959 * 2960 * We need to consider the race between this and the device release path. 2961 * device_lock(dev) is used here to guarantee that the device release path 2962 * will not be entered at the same time. 2963 */ 2964 static ssize_t iommu_group_store_type(struct iommu_group *group, 2965 const char *buf, size_t count) 2966 { 2967 struct group_device *grp_dev; 2968 struct device *dev; 2969 int ret, req_type; 2970 2971 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2972 return -EACCES; 2973 2974 if (WARN_ON(!group) || !group->default_domain) 2975 return -EINVAL; 2976 2977 if (sysfs_streq(buf, "identity")) 2978 req_type = IOMMU_DOMAIN_IDENTITY; 2979 else if (sysfs_streq(buf, "DMA")) 2980 req_type = IOMMU_DOMAIN_DMA; 2981 else if (sysfs_streq(buf, "DMA-FQ")) 2982 req_type = IOMMU_DOMAIN_DMA_FQ; 2983 else if (sysfs_streq(buf, "auto")) 2984 req_type = 0; 2985 else 2986 return -EINVAL; 2987 2988 /* 2989 * Lock/Unlock the group mutex here before device lock to 2990 * 1. Make sure that the iommu group has only one device (this is a 2991 * prerequisite for step 2) 2992 * 2. Get struct *dev which is needed to lock device 2993 */ 2994 mutex_lock(&group->mutex); 2995 if (iommu_group_device_count(group) != 1) { 2996 mutex_unlock(&group->mutex); 2997 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n"); 2998 return -EINVAL; 2999 } 3000 3001 /* Since group has only one device */ 3002 grp_dev = list_first_entry(&group->devices, struct group_device, list); 3003 dev = grp_dev->dev; 3004 get_device(dev); 3005 3006 /* 3007 * Don't hold the group mutex because taking group mutex first and then 3008 * the device lock could potentially cause a deadlock as below. Assume 3009 * two threads T1 and T2. T1 is trying to change default domain of an 3010 * iommu group and T2 is trying to hot unplug a device or release [1] VF 3011 * of a PCIe device which is in the same iommu group. T1 takes group 3012 * mutex and before it could take device lock assume T2 has taken device 3013 * lock and is yet to take group mutex. Now, both the threads will be 3014 * waiting for the other thread to release lock. Below, lock order was 3015 * suggested. 3016 * device_lock(dev); 3017 * mutex_lock(&group->mutex); 3018 * iommu_change_dev_def_domain(); 3019 * mutex_unlock(&group->mutex); 3020 * device_unlock(dev); 3021 * 3022 * [1] Typical device release path 3023 * device_lock() from device/driver core code 3024 * -> bus_notifier() 3025 * -> iommu_bus_notifier() 3026 * -> iommu_release_device() 3027 * -> ops->release_device() vendor driver calls back iommu core code 3028 * -> mutex_lock() from iommu core code 3029 */ 3030 mutex_unlock(&group->mutex); 3031 3032 /* Check if the device in the group still has a driver bound to it */ 3033 device_lock(dev); 3034 if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ && 3035 group->default_domain->type == IOMMU_DOMAIN_DMA)) { 3036 pr_err_ratelimited("Device is still bound to driver\n"); 3037 ret = -EBUSY; 3038 goto out; 3039 } 3040 3041 ret = iommu_change_dev_def_domain(group, dev, req_type); 3042 ret = ret ?: count; 3043 3044 out: 3045 device_unlock(dev); 3046 put_device(dev); 3047 3048 return ret; 3049 } 3050 3051 static bool iommu_is_default_domain(struct iommu_group *group) 3052 { 3053 if (group->domain == group->default_domain) 3054 return true; 3055 3056 /* 3057 * If the default domain was set to identity and it is still an identity 3058 * domain then we consider this a pass. This happens because of 3059 * amd_iommu_init_device() replacing the default idenytity domain with an 3060 * identity domain that has a different configuration for AMDGPU. 3061 */ 3062 if (group->default_domain && 3063 group->default_domain->type == IOMMU_DOMAIN_IDENTITY && 3064 group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY) 3065 return true; 3066 return false; 3067 } 3068 3069 /** 3070 * iommu_device_use_default_domain() - Device driver wants to handle device 3071 * DMA through the kernel DMA API. 3072 * @dev: The device. 3073 * 3074 * The device driver about to bind @dev wants to do DMA through the kernel 3075 * DMA API. Return 0 if it is allowed, otherwise an error. 3076 */ 3077 int iommu_device_use_default_domain(struct device *dev) 3078 { 3079 struct iommu_group *group = iommu_group_get(dev); 3080 int ret = 0; 3081 3082 if (!group) 3083 return 0; 3084 3085 mutex_lock(&group->mutex); 3086 if (group->owner_cnt) { 3087 if (group->owner || !iommu_is_default_domain(group) || 3088 !xa_empty(&group->pasid_array)) { 3089 ret = -EBUSY; 3090 goto unlock_out; 3091 } 3092 } 3093 3094 group->owner_cnt++; 3095 3096 unlock_out: 3097 mutex_unlock(&group->mutex); 3098 iommu_group_put(group); 3099 3100 return ret; 3101 } 3102 3103 /** 3104 * iommu_device_unuse_default_domain() - Device driver stops handling device 3105 * DMA through the kernel DMA API. 3106 * @dev: The device. 3107 * 3108 * The device driver doesn't want to do DMA through kernel DMA API anymore. 3109 * It must be called after iommu_device_use_default_domain(). 3110 */ 3111 void iommu_device_unuse_default_domain(struct device *dev) 3112 { 3113 struct iommu_group *group = iommu_group_get(dev); 3114 3115 if (!group) 3116 return; 3117 3118 mutex_lock(&group->mutex); 3119 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) 3120 group->owner_cnt--; 3121 3122 mutex_unlock(&group->mutex); 3123 iommu_group_put(group); 3124 } 3125 3126 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) 3127 { 3128 struct group_device *dev = 3129 list_first_entry(&group->devices, struct group_device, list); 3130 3131 if (group->blocking_domain) 3132 return 0; 3133 3134 group->blocking_domain = 3135 __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED); 3136 if (!group->blocking_domain) { 3137 /* 3138 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED 3139 * create an empty domain instead. 3140 */ 3141 group->blocking_domain = __iommu_domain_alloc( 3142 dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); 3143 if (!group->blocking_domain) 3144 return -EINVAL; 3145 } 3146 return 0; 3147 } 3148 3149 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) 3150 { 3151 int ret; 3152 3153 if ((group->domain && group->domain != group->default_domain) || 3154 !xa_empty(&group->pasid_array)) 3155 return -EBUSY; 3156 3157 ret = __iommu_group_alloc_blocking_domain(group); 3158 if (ret) 3159 return ret; 3160 ret = __iommu_group_set_domain(group, group->blocking_domain); 3161 if (ret) 3162 return ret; 3163 3164 group->owner = owner; 3165 group->owner_cnt++; 3166 return 0; 3167 } 3168 3169 /** 3170 * iommu_group_claim_dma_owner() - Set DMA ownership of a group 3171 * @group: The group. 3172 * @owner: Caller specified pointer. Used for exclusive ownership. 3173 * 3174 * This is to support backward compatibility for vfio which manages the dma 3175 * ownership in iommu_group level. New invocations on this interface should be 3176 * prohibited. Only a single owner may exist for a group. 3177 */ 3178 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 3179 { 3180 int ret = 0; 3181 3182 if (WARN_ON(!owner)) 3183 return -EINVAL; 3184 3185 mutex_lock(&group->mutex); 3186 if (group->owner_cnt) { 3187 ret = -EPERM; 3188 goto unlock_out; 3189 } 3190 3191 ret = __iommu_take_dma_ownership(group, owner); 3192 unlock_out: 3193 mutex_unlock(&group->mutex); 3194 3195 return ret; 3196 } 3197 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); 3198 3199 /** 3200 * iommu_device_claim_dma_owner() - Set DMA ownership of a device 3201 * @dev: The device. 3202 * @owner: Caller specified pointer. Used for exclusive ownership. 3203 * 3204 * Claim the DMA ownership of a device. Multiple devices in the same group may 3205 * concurrently claim ownership if they present the same owner value. Returns 0 3206 * on success and error code on failure 3207 */ 3208 int iommu_device_claim_dma_owner(struct device *dev, void *owner) 3209 { 3210 struct iommu_group *group = iommu_group_get(dev); 3211 int ret = 0; 3212 3213 if (!group) 3214 return -ENODEV; 3215 if (WARN_ON(!owner)) 3216 return -EINVAL; 3217 3218 mutex_lock(&group->mutex); 3219 if (group->owner_cnt) { 3220 if (group->owner != owner) { 3221 ret = -EPERM; 3222 goto unlock_out; 3223 } 3224 group->owner_cnt++; 3225 goto unlock_out; 3226 } 3227 3228 ret = __iommu_take_dma_ownership(group, owner); 3229 unlock_out: 3230 mutex_unlock(&group->mutex); 3231 iommu_group_put(group); 3232 3233 return ret; 3234 } 3235 EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); 3236 3237 static void __iommu_release_dma_ownership(struct iommu_group *group) 3238 { 3239 int ret; 3240 3241 if (WARN_ON(!group->owner_cnt || !group->owner || 3242 !xa_empty(&group->pasid_array))) 3243 return; 3244 3245 group->owner_cnt = 0; 3246 group->owner = NULL; 3247 ret = __iommu_group_set_domain(group, group->default_domain); 3248 WARN(ret, "iommu driver failed to attach the default domain"); 3249 } 3250 3251 /** 3252 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3253 * @dev: The device 3254 * 3255 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3256 */ 3257 void iommu_group_release_dma_owner(struct iommu_group *group) 3258 { 3259 mutex_lock(&group->mutex); 3260 __iommu_release_dma_ownership(group); 3261 mutex_unlock(&group->mutex); 3262 } 3263 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); 3264 3265 /** 3266 * iommu_device_release_dma_owner() - Release DMA ownership of a device 3267 * @group: The device. 3268 * 3269 * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). 3270 */ 3271 void iommu_device_release_dma_owner(struct device *dev) 3272 { 3273 struct iommu_group *group = iommu_group_get(dev); 3274 3275 mutex_lock(&group->mutex); 3276 if (group->owner_cnt > 1) 3277 group->owner_cnt--; 3278 else 3279 __iommu_release_dma_ownership(group); 3280 mutex_unlock(&group->mutex); 3281 iommu_group_put(group); 3282 } 3283 EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); 3284 3285 /** 3286 * iommu_group_dma_owner_claimed() - Query group dma ownership status 3287 * @group: The group. 3288 * 3289 * This provides status query on a given group. It is racy and only for 3290 * non-binding status reporting. 3291 */ 3292 bool iommu_group_dma_owner_claimed(struct iommu_group *group) 3293 { 3294 unsigned int user; 3295 3296 mutex_lock(&group->mutex); 3297 user = group->owner_cnt; 3298 mutex_unlock(&group->mutex); 3299 3300 return user; 3301 } 3302 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); 3303 3304 static int __iommu_set_group_pasid(struct iommu_domain *domain, 3305 struct iommu_group *group, ioasid_t pasid) 3306 { 3307 struct group_device *device; 3308 int ret = 0; 3309 3310 list_for_each_entry(device, &group->devices, list) { 3311 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); 3312 if (ret) 3313 break; 3314 } 3315 3316 return ret; 3317 } 3318 3319 static void __iommu_remove_group_pasid(struct iommu_group *group, 3320 ioasid_t pasid) 3321 { 3322 struct group_device *device; 3323 const struct iommu_ops *ops; 3324 3325 list_for_each_entry(device, &group->devices, list) { 3326 ops = dev_iommu_ops(device->dev); 3327 ops->remove_dev_pasid(device->dev, pasid); 3328 } 3329 } 3330 3331 /* 3332 * iommu_attach_device_pasid() - Attach a domain to pasid of device 3333 * @domain: the iommu domain. 3334 * @dev: the attached device. 3335 * @pasid: the pasid of the device. 3336 * 3337 * Return: 0 on success, or an error. 3338 */ 3339 int iommu_attach_device_pasid(struct iommu_domain *domain, 3340 struct device *dev, ioasid_t pasid) 3341 { 3342 struct iommu_group *group; 3343 void *curr; 3344 int ret; 3345 3346 if (!domain->ops->set_dev_pasid) 3347 return -EOPNOTSUPP; 3348 3349 group = iommu_group_get(dev); 3350 if (!group) 3351 return -ENODEV; 3352 3353 mutex_lock(&group->mutex); 3354 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); 3355 if (curr) { 3356 ret = xa_err(curr) ? : -EBUSY; 3357 goto out_unlock; 3358 } 3359 3360 ret = __iommu_set_group_pasid(domain, group, pasid); 3361 if (ret) { 3362 __iommu_remove_group_pasid(group, pasid); 3363 xa_erase(&group->pasid_array, pasid); 3364 } 3365 out_unlock: 3366 mutex_unlock(&group->mutex); 3367 iommu_group_put(group); 3368 3369 return ret; 3370 } 3371 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); 3372 3373 /* 3374 * iommu_detach_device_pasid() - Detach the domain from pasid of device 3375 * @domain: the iommu domain. 3376 * @dev: the attached device. 3377 * @pasid: the pasid of the device. 3378 * 3379 * The @domain must have been attached to @pasid of the @dev with 3380 * iommu_attach_device_pasid(). 3381 */ 3382 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, 3383 ioasid_t pasid) 3384 { 3385 struct iommu_group *group = iommu_group_get(dev); 3386 3387 mutex_lock(&group->mutex); 3388 __iommu_remove_group_pasid(group, pasid); 3389 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); 3390 mutex_unlock(&group->mutex); 3391 3392 iommu_group_put(group); 3393 } 3394 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); 3395 3396 /* 3397 * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev 3398 * @dev: the queried device 3399 * @pasid: the pasid of the device 3400 * @type: matched domain type, 0 for any match 3401 * 3402 * This is a variant of iommu_get_domain_for_dev(). It returns the existing 3403 * domain attached to pasid of a device. Callers must hold a lock around this 3404 * function, and both iommu_attach/detach_dev_pasid() whenever a domain of 3405 * type is being manipulated. This API does not internally resolve races with 3406 * attach/detach. 3407 * 3408 * Return: attached domain on success, NULL otherwise. 3409 */ 3410 struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, 3411 ioasid_t pasid, 3412 unsigned int type) 3413 { 3414 struct iommu_domain *domain; 3415 struct iommu_group *group; 3416 3417 group = iommu_group_get(dev); 3418 if (!group) 3419 return NULL; 3420 3421 xa_lock(&group->pasid_array); 3422 domain = xa_load(&group->pasid_array, pasid); 3423 if (type && domain && domain->type != type) 3424 domain = ERR_PTR(-EBUSY); 3425 xa_unlock(&group->pasid_array); 3426 iommu_group_put(group); 3427 3428 return domain; 3429 } 3430 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); 3431 3432 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 3433 struct mm_struct *mm) 3434 { 3435 const struct iommu_ops *ops = dev_iommu_ops(dev); 3436 struct iommu_domain *domain; 3437 3438 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); 3439 if (!domain) 3440 return NULL; 3441 3442 domain->type = IOMMU_DOMAIN_SVA; 3443 mmgrab(mm); 3444 domain->mm = mm; 3445 domain->iopf_handler = iommu_sva_handle_iopf; 3446 domain->fault_data = mm; 3447 3448 return domain; 3449 } 3450