1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/amba/bus.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/bits.h> 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/host1x_context_bus.h> 20 #include <linux/iommu.h> 21 #include <linux/idr.h> 22 #include <linux/err.h> 23 #include <linux/pci.h> 24 #include <linux/pci-ats.h> 25 #include <linux/bitops.h> 26 #include <linux/platform_device.h> 27 #include <linux/property.h> 28 #include <linux/fsl/mc.h> 29 #include <linux/module.h> 30 #include <linux/cc_platform.h> 31 #include <trace/events/iommu.h> 32 #include <linux/sched/mm.h> 33 34 #include "dma-iommu.h" 35 36 static struct kset *iommu_group_kset; 37 static DEFINE_IDA(iommu_group_ida); 38 39 static unsigned int iommu_def_domain_type __read_mostly; 40 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); 41 static u32 iommu_cmd_line __read_mostly; 42 43 struct iommu_group { 44 struct kobject kobj; 45 struct kobject *devices_kobj; 46 struct list_head devices; 47 struct xarray pasid_array; 48 struct mutex mutex; 49 void *iommu_data; 50 void (*iommu_data_release)(void *iommu_data); 51 char *name; 52 int id; 53 struct iommu_domain *default_domain; 54 struct iommu_domain *blocking_domain; 55 struct iommu_domain *domain; 56 struct list_head entry; 57 unsigned int owner_cnt; 58 void *owner; 59 }; 60 61 struct group_device { 62 struct list_head list; 63 struct device *dev; 64 char *name; 65 }; 66 67 struct iommu_group_attribute { 68 struct attribute attr; 69 ssize_t (*show)(struct iommu_group *group, char *buf); 70 ssize_t (*store)(struct iommu_group *group, 71 const char *buf, size_t count); 72 }; 73 74 static const char * const iommu_group_resv_type_string[] = { 75 [IOMMU_RESV_DIRECT] = "direct", 76 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 77 [IOMMU_RESV_RESERVED] = "reserved", 78 [IOMMU_RESV_MSI] = "msi", 79 [IOMMU_RESV_SW_MSI] = "msi", 80 }; 81 82 #define IOMMU_CMD_LINE_DMA_API BIT(0) 83 #define IOMMU_CMD_LINE_STRICT BIT(1) 84 85 static int iommu_bus_notifier(struct notifier_block *nb, 86 unsigned long action, void *data); 87 static int iommu_alloc_default_domain(struct iommu_group *group, 88 struct device *dev); 89 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 90 unsigned type); 91 static int __iommu_attach_device(struct iommu_domain *domain, 92 struct device *dev); 93 static int __iommu_attach_group(struct iommu_domain *domain, 94 struct iommu_group *group); 95 static int __iommu_group_set_domain(struct iommu_group *group, 96 struct iommu_domain *new_domain); 97 static int iommu_create_device_direct_mappings(struct iommu_group *group, 98 struct device *dev); 99 static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 100 static ssize_t iommu_group_store_type(struct iommu_group *group, 101 const char *buf, size_t count); 102 103 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 104 struct iommu_group_attribute iommu_group_attr_##_name = \ 105 __ATTR(_name, _mode, _show, _store) 106 107 #define to_iommu_group_attr(_attr) \ 108 container_of(_attr, struct iommu_group_attribute, attr) 109 #define to_iommu_group(_kobj) \ 110 container_of(_kobj, struct iommu_group, kobj) 111 112 static LIST_HEAD(iommu_device_list); 113 static DEFINE_SPINLOCK(iommu_device_lock); 114 115 static struct bus_type * const iommu_buses[] = { 116 &platform_bus_type, 117 #ifdef CONFIG_PCI 118 &pci_bus_type, 119 #endif 120 #ifdef CONFIG_ARM_AMBA 121 &amba_bustype, 122 #endif 123 #ifdef CONFIG_FSL_MC_BUS 124 &fsl_mc_bus_type, 125 #endif 126 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS 127 &host1x_context_device_bus_type, 128 #endif 129 }; 130 131 /* 132 * Use a function instead of an array here because the domain-type is a 133 * bit-field, so an array would waste memory. 134 */ 135 static const char *iommu_domain_type_str(unsigned int t) 136 { 137 switch (t) { 138 case IOMMU_DOMAIN_BLOCKED: 139 return "Blocked"; 140 case IOMMU_DOMAIN_IDENTITY: 141 return "Passthrough"; 142 case IOMMU_DOMAIN_UNMANAGED: 143 return "Unmanaged"; 144 case IOMMU_DOMAIN_DMA: 145 case IOMMU_DOMAIN_DMA_FQ: 146 return "Translated"; 147 default: 148 return "Unknown"; 149 } 150 } 151 152 static int __init iommu_subsys_init(void) 153 { 154 struct notifier_block *nb; 155 156 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 157 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 158 iommu_set_default_passthrough(false); 159 else 160 iommu_set_default_translated(false); 161 162 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 163 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 164 iommu_set_default_translated(false); 165 } 166 } 167 168 if (!iommu_default_passthrough() && !iommu_dma_strict) 169 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; 170 171 pr_info("Default domain type: %s %s\n", 172 iommu_domain_type_str(iommu_def_domain_type), 173 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 174 "(set via kernel command line)" : ""); 175 176 if (!iommu_default_passthrough()) 177 pr_info("DMA domain TLB invalidation policy: %s mode %s\n", 178 iommu_dma_strict ? "strict" : "lazy", 179 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? 180 "(set via kernel command line)" : ""); 181 182 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); 183 if (!nb) 184 return -ENOMEM; 185 186 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { 187 nb[i].notifier_call = iommu_bus_notifier; 188 bus_register_notifier(iommu_buses[i], &nb[i]); 189 } 190 191 return 0; 192 } 193 subsys_initcall(iommu_subsys_init); 194 195 static int remove_iommu_group(struct device *dev, void *data) 196 { 197 if (dev->iommu && dev->iommu->iommu_dev == data) 198 iommu_release_device(dev); 199 200 return 0; 201 } 202 203 /** 204 * iommu_device_register() - Register an IOMMU hardware instance 205 * @iommu: IOMMU handle for the instance 206 * @ops: IOMMU ops to associate with the instance 207 * @hwdev: (optional) actual instance device, used for fwnode lookup 208 * 209 * Return: 0 on success, or an error. 210 */ 211 int iommu_device_register(struct iommu_device *iommu, 212 const struct iommu_ops *ops, struct device *hwdev) 213 { 214 int err = 0; 215 216 /* We need to be able to take module references appropriately */ 217 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 218 return -EINVAL; 219 /* 220 * Temporarily enforce global restriction to a single driver. This was 221 * already the de-facto behaviour, since any possible combination of 222 * existing drivers would compete for at least the PCI or platform bus. 223 */ 224 if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops) 225 return -EBUSY; 226 227 iommu->ops = ops; 228 if (hwdev) 229 iommu->fwnode = dev_fwnode(hwdev); 230 231 spin_lock(&iommu_device_lock); 232 list_add_tail(&iommu->list, &iommu_device_list); 233 spin_unlock(&iommu_device_lock); 234 235 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) { 236 iommu_buses[i]->iommu_ops = ops; 237 err = bus_iommu_probe(iommu_buses[i]); 238 } 239 if (err) 240 iommu_device_unregister(iommu); 241 return err; 242 } 243 EXPORT_SYMBOL_GPL(iommu_device_register); 244 245 void iommu_device_unregister(struct iommu_device *iommu) 246 { 247 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) 248 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); 249 250 spin_lock(&iommu_device_lock); 251 list_del(&iommu->list); 252 spin_unlock(&iommu_device_lock); 253 } 254 EXPORT_SYMBOL_GPL(iommu_device_unregister); 255 256 static struct dev_iommu *dev_iommu_get(struct device *dev) 257 { 258 struct dev_iommu *param = dev->iommu; 259 260 if (param) 261 return param; 262 263 param = kzalloc(sizeof(*param), GFP_KERNEL); 264 if (!param) 265 return NULL; 266 267 mutex_init(¶m->lock); 268 dev->iommu = param; 269 return param; 270 } 271 272 static void dev_iommu_free(struct device *dev) 273 { 274 struct dev_iommu *param = dev->iommu; 275 276 dev->iommu = NULL; 277 if (param->fwspec) { 278 fwnode_handle_put(param->fwspec->iommu_fwnode); 279 kfree(param->fwspec); 280 } 281 kfree(param); 282 } 283 284 static u32 dev_iommu_get_max_pasids(struct device *dev) 285 { 286 u32 max_pasids = 0, bits = 0; 287 int ret; 288 289 if (dev_is_pci(dev)) { 290 ret = pci_max_pasids(to_pci_dev(dev)); 291 if (ret > 0) 292 max_pasids = ret; 293 } else { 294 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); 295 if (!ret) 296 max_pasids = 1UL << bits; 297 } 298 299 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); 300 } 301 302 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 303 { 304 const struct iommu_ops *ops = dev->bus->iommu_ops; 305 struct iommu_device *iommu_dev; 306 struct iommu_group *group; 307 int ret; 308 309 if (!ops) 310 return -ENODEV; 311 312 if (!dev_iommu_get(dev)) 313 return -ENOMEM; 314 315 if (!try_module_get(ops->owner)) { 316 ret = -EINVAL; 317 goto err_free; 318 } 319 320 iommu_dev = ops->probe_device(dev); 321 if (IS_ERR(iommu_dev)) { 322 ret = PTR_ERR(iommu_dev); 323 goto out_module_put; 324 } 325 326 dev->iommu->iommu_dev = iommu_dev; 327 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 328 329 group = iommu_group_get_for_dev(dev); 330 if (IS_ERR(group)) { 331 ret = PTR_ERR(group); 332 goto out_release; 333 } 334 iommu_group_put(group); 335 336 if (group_list && !group->default_domain && list_empty(&group->entry)) 337 list_add_tail(&group->entry, group_list); 338 339 iommu_device_link(iommu_dev, dev); 340 341 return 0; 342 343 out_release: 344 if (ops->release_device) 345 ops->release_device(dev); 346 347 out_module_put: 348 module_put(ops->owner); 349 350 err_free: 351 dev_iommu_free(dev); 352 353 return ret; 354 } 355 356 int iommu_probe_device(struct device *dev) 357 { 358 const struct iommu_ops *ops; 359 struct iommu_group *group; 360 int ret; 361 362 ret = __iommu_probe_device(dev, NULL); 363 if (ret) 364 goto err_out; 365 366 group = iommu_group_get(dev); 367 if (!group) { 368 ret = -ENODEV; 369 goto err_release; 370 } 371 372 /* 373 * Try to allocate a default domain - needs support from the 374 * IOMMU driver. There are still some drivers which don't 375 * support default domains, so the return value is not yet 376 * checked. 377 */ 378 mutex_lock(&group->mutex); 379 iommu_alloc_default_domain(group, dev); 380 381 /* 382 * If device joined an existing group which has been claimed, don't 383 * attach the default domain. 384 */ 385 if (group->default_domain && !group->owner) { 386 ret = __iommu_attach_device(group->default_domain, dev); 387 if (ret) { 388 mutex_unlock(&group->mutex); 389 iommu_group_put(group); 390 goto err_release; 391 } 392 } 393 394 iommu_create_device_direct_mappings(group, dev); 395 396 mutex_unlock(&group->mutex); 397 iommu_group_put(group); 398 399 ops = dev_iommu_ops(dev); 400 if (ops->probe_finalize) 401 ops->probe_finalize(dev); 402 403 return 0; 404 405 err_release: 406 iommu_release_device(dev); 407 408 err_out: 409 return ret; 410 411 } 412 413 void iommu_release_device(struct device *dev) 414 { 415 const struct iommu_ops *ops; 416 417 if (!dev->iommu) 418 return; 419 420 iommu_device_unlink(dev->iommu->iommu_dev, dev); 421 422 ops = dev_iommu_ops(dev); 423 if (ops->release_device) 424 ops->release_device(dev); 425 426 iommu_group_remove_device(dev); 427 module_put(ops->owner); 428 dev_iommu_free(dev); 429 } 430 431 static int __init iommu_set_def_domain_type(char *str) 432 { 433 bool pt; 434 int ret; 435 436 ret = kstrtobool(str, &pt); 437 if (ret) 438 return ret; 439 440 if (pt) 441 iommu_set_default_passthrough(true); 442 else 443 iommu_set_default_translated(true); 444 445 return 0; 446 } 447 early_param("iommu.passthrough", iommu_set_def_domain_type); 448 449 static int __init iommu_dma_setup(char *str) 450 { 451 int ret = kstrtobool(str, &iommu_dma_strict); 452 453 if (!ret) 454 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 455 return ret; 456 } 457 early_param("iommu.strict", iommu_dma_setup); 458 459 void iommu_set_dma_strict(void) 460 { 461 iommu_dma_strict = true; 462 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) 463 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 464 } 465 466 static ssize_t iommu_group_attr_show(struct kobject *kobj, 467 struct attribute *__attr, char *buf) 468 { 469 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 470 struct iommu_group *group = to_iommu_group(kobj); 471 ssize_t ret = -EIO; 472 473 if (attr->show) 474 ret = attr->show(group, buf); 475 return ret; 476 } 477 478 static ssize_t iommu_group_attr_store(struct kobject *kobj, 479 struct attribute *__attr, 480 const char *buf, size_t count) 481 { 482 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 483 struct iommu_group *group = to_iommu_group(kobj); 484 ssize_t ret = -EIO; 485 486 if (attr->store) 487 ret = attr->store(group, buf, count); 488 return ret; 489 } 490 491 static const struct sysfs_ops iommu_group_sysfs_ops = { 492 .show = iommu_group_attr_show, 493 .store = iommu_group_attr_store, 494 }; 495 496 static int iommu_group_create_file(struct iommu_group *group, 497 struct iommu_group_attribute *attr) 498 { 499 return sysfs_create_file(&group->kobj, &attr->attr); 500 } 501 502 static void iommu_group_remove_file(struct iommu_group *group, 503 struct iommu_group_attribute *attr) 504 { 505 sysfs_remove_file(&group->kobj, &attr->attr); 506 } 507 508 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 509 { 510 return sprintf(buf, "%s\n", group->name); 511 } 512 513 /** 514 * iommu_insert_resv_region - Insert a new region in the 515 * list of reserved regions. 516 * @new: new region to insert 517 * @regions: list of regions 518 * 519 * Elements are sorted by start address and overlapping segments 520 * of the same type are merged. 521 */ 522 static int iommu_insert_resv_region(struct iommu_resv_region *new, 523 struct list_head *regions) 524 { 525 struct iommu_resv_region *iter, *tmp, *nr, *top; 526 LIST_HEAD(stack); 527 528 nr = iommu_alloc_resv_region(new->start, new->length, 529 new->prot, new->type, GFP_KERNEL); 530 if (!nr) 531 return -ENOMEM; 532 533 /* First add the new element based on start address sorting */ 534 list_for_each_entry(iter, regions, list) { 535 if (nr->start < iter->start || 536 (nr->start == iter->start && nr->type <= iter->type)) 537 break; 538 } 539 list_add_tail(&nr->list, &iter->list); 540 541 /* Merge overlapping segments of type nr->type in @regions, if any */ 542 list_for_each_entry_safe(iter, tmp, regions, list) { 543 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 544 545 /* no merge needed on elements of different types than @new */ 546 if (iter->type != new->type) { 547 list_move_tail(&iter->list, &stack); 548 continue; 549 } 550 551 /* look for the last stack element of same type as @iter */ 552 list_for_each_entry_reverse(top, &stack, list) 553 if (top->type == iter->type) 554 goto check_overlap; 555 556 list_move_tail(&iter->list, &stack); 557 continue; 558 559 check_overlap: 560 top_end = top->start + top->length - 1; 561 562 if (iter->start > top_end + 1) { 563 list_move_tail(&iter->list, &stack); 564 } else { 565 top->length = max(top_end, iter_end) - top->start + 1; 566 list_del(&iter->list); 567 kfree(iter); 568 } 569 } 570 list_splice(&stack, regions); 571 return 0; 572 } 573 574 static int 575 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 576 struct list_head *group_resv_regions) 577 { 578 struct iommu_resv_region *entry; 579 int ret = 0; 580 581 list_for_each_entry(entry, dev_resv_regions, list) { 582 ret = iommu_insert_resv_region(entry, group_resv_regions); 583 if (ret) 584 break; 585 } 586 return ret; 587 } 588 589 int iommu_get_group_resv_regions(struct iommu_group *group, 590 struct list_head *head) 591 { 592 struct group_device *device; 593 int ret = 0; 594 595 mutex_lock(&group->mutex); 596 list_for_each_entry(device, &group->devices, list) { 597 struct list_head dev_resv_regions; 598 599 /* 600 * Non-API groups still expose reserved_regions in sysfs, 601 * so filter out calls that get here that way. 602 */ 603 if (!device->dev->iommu) 604 break; 605 606 INIT_LIST_HEAD(&dev_resv_regions); 607 iommu_get_resv_regions(device->dev, &dev_resv_regions); 608 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 609 iommu_put_resv_regions(device->dev, &dev_resv_regions); 610 if (ret) 611 break; 612 } 613 mutex_unlock(&group->mutex); 614 return ret; 615 } 616 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 617 618 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 619 char *buf) 620 { 621 struct iommu_resv_region *region, *next; 622 struct list_head group_resv_regions; 623 char *str = buf; 624 625 INIT_LIST_HEAD(&group_resv_regions); 626 iommu_get_group_resv_regions(group, &group_resv_regions); 627 628 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 629 str += sprintf(str, "0x%016llx 0x%016llx %s\n", 630 (long long int)region->start, 631 (long long int)(region->start + 632 region->length - 1), 633 iommu_group_resv_type_string[region->type]); 634 kfree(region); 635 } 636 637 return (str - buf); 638 } 639 640 static ssize_t iommu_group_show_type(struct iommu_group *group, 641 char *buf) 642 { 643 char *type = "unknown\n"; 644 645 mutex_lock(&group->mutex); 646 if (group->default_domain) { 647 switch (group->default_domain->type) { 648 case IOMMU_DOMAIN_BLOCKED: 649 type = "blocked\n"; 650 break; 651 case IOMMU_DOMAIN_IDENTITY: 652 type = "identity\n"; 653 break; 654 case IOMMU_DOMAIN_UNMANAGED: 655 type = "unmanaged\n"; 656 break; 657 case IOMMU_DOMAIN_DMA: 658 type = "DMA\n"; 659 break; 660 case IOMMU_DOMAIN_DMA_FQ: 661 type = "DMA-FQ\n"; 662 break; 663 } 664 } 665 mutex_unlock(&group->mutex); 666 strcpy(buf, type); 667 668 return strlen(type); 669 } 670 671 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 672 673 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 674 iommu_group_show_resv_regions, NULL); 675 676 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 677 iommu_group_store_type); 678 679 static void iommu_group_release(struct kobject *kobj) 680 { 681 struct iommu_group *group = to_iommu_group(kobj); 682 683 pr_debug("Releasing group %d\n", group->id); 684 685 if (group->iommu_data_release) 686 group->iommu_data_release(group->iommu_data); 687 688 ida_free(&iommu_group_ida, group->id); 689 690 if (group->default_domain) 691 iommu_domain_free(group->default_domain); 692 if (group->blocking_domain) 693 iommu_domain_free(group->blocking_domain); 694 695 kfree(group->name); 696 kfree(group); 697 } 698 699 static struct kobj_type iommu_group_ktype = { 700 .sysfs_ops = &iommu_group_sysfs_ops, 701 .release = iommu_group_release, 702 }; 703 704 /** 705 * iommu_group_alloc - Allocate a new group 706 * 707 * This function is called by an iommu driver to allocate a new iommu 708 * group. The iommu group represents the minimum granularity of the iommu. 709 * Upon successful return, the caller holds a reference to the supplied 710 * group in order to hold the group until devices are added. Use 711 * iommu_group_put() to release this extra reference count, allowing the 712 * group to be automatically reclaimed once it has no devices or external 713 * references. 714 */ 715 struct iommu_group *iommu_group_alloc(void) 716 { 717 struct iommu_group *group; 718 int ret; 719 720 group = kzalloc(sizeof(*group), GFP_KERNEL); 721 if (!group) 722 return ERR_PTR(-ENOMEM); 723 724 group->kobj.kset = iommu_group_kset; 725 mutex_init(&group->mutex); 726 INIT_LIST_HEAD(&group->devices); 727 INIT_LIST_HEAD(&group->entry); 728 xa_init(&group->pasid_array); 729 730 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); 731 if (ret < 0) { 732 kfree(group); 733 return ERR_PTR(ret); 734 } 735 group->id = ret; 736 737 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 738 NULL, "%d", group->id); 739 if (ret) { 740 kobject_put(&group->kobj); 741 return ERR_PTR(ret); 742 } 743 744 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 745 if (!group->devices_kobj) { 746 kobject_put(&group->kobj); /* triggers .release & free */ 747 return ERR_PTR(-ENOMEM); 748 } 749 750 /* 751 * The devices_kobj holds a reference on the group kobject, so 752 * as long as that exists so will the group. We can therefore 753 * use the devices_kobj for reference counting. 754 */ 755 kobject_put(&group->kobj); 756 757 ret = iommu_group_create_file(group, 758 &iommu_group_attr_reserved_regions); 759 if (ret) 760 return ERR_PTR(ret); 761 762 ret = iommu_group_create_file(group, &iommu_group_attr_type); 763 if (ret) 764 return ERR_PTR(ret); 765 766 pr_debug("Allocated group %d\n", group->id); 767 768 return group; 769 } 770 EXPORT_SYMBOL_GPL(iommu_group_alloc); 771 772 struct iommu_group *iommu_group_get_by_id(int id) 773 { 774 struct kobject *group_kobj; 775 struct iommu_group *group; 776 const char *name; 777 778 if (!iommu_group_kset) 779 return NULL; 780 781 name = kasprintf(GFP_KERNEL, "%d", id); 782 if (!name) 783 return NULL; 784 785 group_kobj = kset_find_obj(iommu_group_kset, name); 786 kfree(name); 787 788 if (!group_kobj) 789 return NULL; 790 791 group = container_of(group_kobj, struct iommu_group, kobj); 792 BUG_ON(group->id != id); 793 794 kobject_get(group->devices_kobj); 795 kobject_put(&group->kobj); 796 797 return group; 798 } 799 EXPORT_SYMBOL_GPL(iommu_group_get_by_id); 800 801 /** 802 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 803 * @group: the group 804 * 805 * iommu drivers can store data in the group for use when doing iommu 806 * operations. This function provides a way to retrieve it. Caller 807 * should hold a group reference. 808 */ 809 void *iommu_group_get_iommudata(struct iommu_group *group) 810 { 811 return group->iommu_data; 812 } 813 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 814 815 /** 816 * iommu_group_set_iommudata - set iommu_data for a group 817 * @group: the group 818 * @iommu_data: new data 819 * @release: release function for iommu_data 820 * 821 * iommu drivers can store data in the group for use when doing iommu 822 * operations. This function provides a way to set the data after 823 * the group has been allocated. Caller should hold a group reference. 824 */ 825 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 826 void (*release)(void *iommu_data)) 827 { 828 group->iommu_data = iommu_data; 829 group->iommu_data_release = release; 830 } 831 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 832 833 /** 834 * iommu_group_set_name - set name for a group 835 * @group: the group 836 * @name: name 837 * 838 * Allow iommu driver to set a name for a group. When set it will 839 * appear in a name attribute file under the group in sysfs. 840 */ 841 int iommu_group_set_name(struct iommu_group *group, const char *name) 842 { 843 int ret; 844 845 if (group->name) { 846 iommu_group_remove_file(group, &iommu_group_attr_name); 847 kfree(group->name); 848 group->name = NULL; 849 if (!name) 850 return 0; 851 } 852 853 group->name = kstrdup(name, GFP_KERNEL); 854 if (!group->name) 855 return -ENOMEM; 856 857 ret = iommu_group_create_file(group, &iommu_group_attr_name); 858 if (ret) { 859 kfree(group->name); 860 group->name = NULL; 861 return ret; 862 } 863 864 return 0; 865 } 866 EXPORT_SYMBOL_GPL(iommu_group_set_name); 867 868 static int iommu_create_device_direct_mappings(struct iommu_group *group, 869 struct device *dev) 870 { 871 struct iommu_domain *domain = group->default_domain; 872 struct iommu_resv_region *entry; 873 struct list_head mappings; 874 unsigned long pg_size; 875 int ret = 0; 876 877 if (!domain || !iommu_is_dma_domain(domain)) 878 return 0; 879 880 BUG_ON(!domain->pgsize_bitmap); 881 882 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 883 INIT_LIST_HEAD(&mappings); 884 885 iommu_get_resv_regions(dev, &mappings); 886 887 /* We need to consider overlapping regions for different devices */ 888 list_for_each_entry(entry, &mappings, list) { 889 dma_addr_t start, end, addr; 890 size_t map_size = 0; 891 892 start = ALIGN(entry->start, pg_size); 893 end = ALIGN(entry->start + entry->length, pg_size); 894 895 if (entry->type != IOMMU_RESV_DIRECT && 896 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 897 continue; 898 899 for (addr = start; addr <= end; addr += pg_size) { 900 phys_addr_t phys_addr; 901 902 if (addr == end) 903 goto map_end; 904 905 phys_addr = iommu_iova_to_phys(domain, addr); 906 if (!phys_addr) { 907 map_size += pg_size; 908 continue; 909 } 910 911 map_end: 912 if (map_size) { 913 ret = iommu_map(domain, addr - map_size, 914 addr - map_size, map_size, 915 entry->prot); 916 if (ret) 917 goto out; 918 map_size = 0; 919 } 920 } 921 922 } 923 924 iommu_flush_iotlb_all(domain); 925 926 out: 927 iommu_put_resv_regions(dev, &mappings); 928 929 return ret; 930 } 931 932 static bool iommu_is_attach_deferred(struct device *dev) 933 { 934 const struct iommu_ops *ops = dev_iommu_ops(dev); 935 936 if (ops->is_attach_deferred) 937 return ops->is_attach_deferred(dev); 938 939 return false; 940 } 941 942 /** 943 * iommu_group_add_device - add a device to an iommu group 944 * @group: the group into which to add the device (reference should be held) 945 * @dev: the device 946 * 947 * This function is called by an iommu driver to add a device into a 948 * group. Adding a device increments the group reference count. 949 */ 950 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 951 { 952 int ret, i = 0; 953 struct group_device *device; 954 955 device = kzalloc(sizeof(*device), GFP_KERNEL); 956 if (!device) 957 return -ENOMEM; 958 959 device->dev = dev; 960 961 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 962 if (ret) 963 goto err_free_device; 964 965 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 966 rename: 967 if (!device->name) { 968 ret = -ENOMEM; 969 goto err_remove_link; 970 } 971 972 ret = sysfs_create_link_nowarn(group->devices_kobj, 973 &dev->kobj, device->name); 974 if (ret) { 975 if (ret == -EEXIST && i >= 0) { 976 /* 977 * Account for the slim chance of collision 978 * and append an instance to the name. 979 */ 980 kfree(device->name); 981 device->name = kasprintf(GFP_KERNEL, "%s.%d", 982 kobject_name(&dev->kobj), i++); 983 goto rename; 984 } 985 goto err_free_name; 986 } 987 988 kobject_get(group->devices_kobj); 989 990 dev->iommu_group = group; 991 992 mutex_lock(&group->mutex); 993 list_add_tail(&device->list, &group->devices); 994 if (group->domain && !iommu_is_attach_deferred(dev)) 995 ret = __iommu_attach_device(group->domain, dev); 996 mutex_unlock(&group->mutex); 997 if (ret) 998 goto err_put_group; 999 1000 trace_add_device_to_group(group->id, dev); 1001 1002 dev_info(dev, "Adding to iommu group %d\n", group->id); 1003 1004 return 0; 1005 1006 err_put_group: 1007 mutex_lock(&group->mutex); 1008 list_del(&device->list); 1009 mutex_unlock(&group->mutex); 1010 dev->iommu_group = NULL; 1011 kobject_put(group->devices_kobj); 1012 sysfs_remove_link(group->devices_kobj, device->name); 1013 err_free_name: 1014 kfree(device->name); 1015 err_remove_link: 1016 sysfs_remove_link(&dev->kobj, "iommu_group"); 1017 err_free_device: 1018 kfree(device); 1019 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 1020 return ret; 1021 } 1022 EXPORT_SYMBOL_GPL(iommu_group_add_device); 1023 1024 /** 1025 * iommu_group_remove_device - remove a device from it's current group 1026 * @dev: device to be removed 1027 * 1028 * This function is called by an iommu driver to remove the device from 1029 * it's current group. This decrements the iommu group reference count. 1030 */ 1031 void iommu_group_remove_device(struct device *dev) 1032 { 1033 struct iommu_group *group = dev->iommu_group; 1034 struct group_device *tmp_device, *device = NULL; 1035 1036 if (!group) 1037 return; 1038 1039 dev_info(dev, "Removing from iommu group %d\n", group->id); 1040 1041 mutex_lock(&group->mutex); 1042 list_for_each_entry(tmp_device, &group->devices, list) { 1043 if (tmp_device->dev == dev) { 1044 device = tmp_device; 1045 list_del(&device->list); 1046 break; 1047 } 1048 } 1049 mutex_unlock(&group->mutex); 1050 1051 if (!device) 1052 return; 1053 1054 sysfs_remove_link(group->devices_kobj, device->name); 1055 sysfs_remove_link(&dev->kobj, "iommu_group"); 1056 1057 trace_remove_device_from_group(group->id, dev); 1058 1059 kfree(device->name); 1060 kfree(device); 1061 dev->iommu_group = NULL; 1062 kobject_put(group->devices_kobj); 1063 } 1064 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 1065 1066 static int iommu_group_device_count(struct iommu_group *group) 1067 { 1068 struct group_device *entry; 1069 int ret = 0; 1070 1071 list_for_each_entry(entry, &group->devices, list) 1072 ret++; 1073 1074 return ret; 1075 } 1076 1077 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 1078 int (*fn)(struct device *, void *)) 1079 { 1080 struct group_device *device; 1081 int ret = 0; 1082 1083 list_for_each_entry(device, &group->devices, list) { 1084 ret = fn(device->dev, data); 1085 if (ret) 1086 break; 1087 } 1088 return ret; 1089 } 1090 1091 /** 1092 * iommu_group_for_each_dev - iterate over each device in the group 1093 * @group: the group 1094 * @data: caller opaque data to be passed to callback function 1095 * @fn: caller supplied callback function 1096 * 1097 * This function is called by group users to iterate over group devices. 1098 * Callers should hold a reference count to the group during callback. 1099 * The group->mutex is held across callbacks, which will block calls to 1100 * iommu_group_add/remove_device. 1101 */ 1102 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 1103 int (*fn)(struct device *, void *)) 1104 { 1105 int ret; 1106 1107 mutex_lock(&group->mutex); 1108 ret = __iommu_group_for_each_dev(group, data, fn); 1109 mutex_unlock(&group->mutex); 1110 1111 return ret; 1112 } 1113 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1114 1115 /** 1116 * iommu_group_get - Return the group for a device and increment reference 1117 * @dev: get the group that this device belongs to 1118 * 1119 * This function is called by iommu drivers and users to get the group 1120 * for the specified device. If found, the group is returned and the group 1121 * reference in incremented, else NULL. 1122 */ 1123 struct iommu_group *iommu_group_get(struct device *dev) 1124 { 1125 struct iommu_group *group = dev->iommu_group; 1126 1127 if (group) 1128 kobject_get(group->devices_kobj); 1129 1130 return group; 1131 } 1132 EXPORT_SYMBOL_GPL(iommu_group_get); 1133 1134 /** 1135 * iommu_group_ref_get - Increment reference on a group 1136 * @group: the group to use, must not be NULL 1137 * 1138 * This function is called by iommu drivers to take additional references on an 1139 * existing group. Returns the given group for convenience. 1140 */ 1141 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1142 { 1143 kobject_get(group->devices_kobj); 1144 return group; 1145 } 1146 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1147 1148 /** 1149 * iommu_group_put - Decrement group reference 1150 * @group: the group to use 1151 * 1152 * This function is called by iommu drivers and users to release the 1153 * iommu group. Once the reference count is zero, the group is released. 1154 */ 1155 void iommu_group_put(struct iommu_group *group) 1156 { 1157 if (group) 1158 kobject_put(group->devices_kobj); 1159 } 1160 EXPORT_SYMBOL_GPL(iommu_group_put); 1161 1162 /** 1163 * iommu_register_device_fault_handler() - Register a device fault handler 1164 * @dev: the device 1165 * @handler: the fault handler 1166 * @data: private data passed as argument to the handler 1167 * 1168 * When an IOMMU fault event is received, this handler gets called with the 1169 * fault event and data as argument. The handler should return 0 on success. If 1170 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1171 * complete the fault by calling iommu_page_response() with one of the following 1172 * response code: 1173 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1174 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1175 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1176 * page faults if possible. 1177 * 1178 * Return 0 if the fault handler was installed successfully, or an error. 1179 */ 1180 int iommu_register_device_fault_handler(struct device *dev, 1181 iommu_dev_fault_handler_t handler, 1182 void *data) 1183 { 1184 struct dev_iommu *param = dev->iommu; 1185 int ret = 0; 1186 1187 if (!param) 1188 return -EINVAL; 1189 1190 mutex_lock(¶m->lock); 1191 /* Only allow one fault handler registered for each device */ 1192 if (param->fault_param) { 1193 ret = -EBUSY; 1194 goto done_unlock; 1195 } 1196 1197 get_device(dev); 1198 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1199 if (!param->fault_param) { 1200 put_device(dev); 1201 ret = -ENOMEM; 1202 goto done_unlock; 1203 } 1204 param->fault_param->handler = handler; 1205 param->fault_param->data = data; 1206 mutex_init(¶m->fault_param->lock); 1207 INIT_LIST_HEAD(¶m->fault_param->faults); 1208 1209 done_unlock: 1210 mutex_unlock(¶m->lock); 1211 1212 return ret; 1213 } 1214 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1215 1216 /** 1217 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1218 * @dev: the device 1219 * 1220 * Remove the device fault handler installed with 1221 * iommu_register_device_fault_handler(). 1222 * 1223 * Return 0 on success, or an error. 1224 */ 1225 int iommu_unregister_device_fault_handler(struct device *dev) 1226 { 1227 struct dev_iommu *param = dev->iommu; 1228 int ret = 0; 1229 1230 if (!param) 1231 return -EINVAL; 1232 1233 mutex_lock(¶m->lock); 1234 1235 if (!param->fault_param) 1236 goto unlock; 1237 1238 /* we cannot unregister handler if there are pending faults */ 1239 if (!list_empty(¶m->fault_param->faults)) { 1240 ret = -EBUSY; 1241 goto unlock; 1242 } 1243 1244 kfree(param->fault_param); 1245 param->fault_param = NULL; 1246 put_device(dev); 1247 unlock: 1248 mutex_unlock(¶m->lock); 1249 1250 return ret; 1251 } 1252 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1253 1254 /** 1255 * iommu_report_device_fault() - Report fault event to device driver 1256 * @dev: the device 1257 * @evt: fault event data 1258 * 1259 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1260 * handler. When this function fails and the fault is recoverable, it is the 1261 * caller's responsibility to complete the fault. 1262 * 1263 * Return 0 on success, or an error. 1264 */ 1265 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1266 { 1267 struct dev_iommu *param = dev->iommu; 1268 struct iommu_fault_event *evt_pending = NULL; 1269 struct iommu_fault_param *fparam; 1270 int ret = 0; 1271 1272 if (!param || !evt) 1273 return -EINVAL; 1274 1275 /* we only report device fault if there is a handler registered */ 1276 mutex_lock(¶m->lock); 1277 fparam = param->fault_param; 1278 if (!fparam || !fparam->handler) { 1279 ret = -EINVAL; 1280 goto done_unlock; 1281 } 1282 1283 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1284 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1285 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1286 GFP_KERNEL); 1287 if (!evt_pending) { 1288 ret = -ENOMEM; 1289 goto done_unlock; 1290 } 1291 mutex_lock(&fparam->lock); 1292 list_add_tail(&evt_pending->list, &fparam->faults); 1293 mutex_unlock(&fparam->lock); 1294 } 1295 1296 ret = fparam->handler(&evt->fault, fparam->data); 1297 if (ret && evt_pending) { 1298 mutex_lock(&fparam->lock); 1299 list_del(&evt_pending->list); 1300 mutex_unlock(&fparam->lock); 1301 kfree(evt_pending); 1302 } 1303 done_unlock: 1304 mutex_unlock(¶m->lock); 1305 return ret; 1306 } 1307 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1308 1309 int iommu_page_response(struct device *dev, 1310 struct iommu_page_response *msg) 1311 { 1312 bool needs_pasid; 1313 int ret = -EINVAL; 1314 struct iommu_fault_event *evt; 1315 struct iommu_fault_page_request *prm; 1316 struct dev_iommu *param = dev->iommu; 1317 const struct iommu_ops *ops = dev_iommu_ops(dev); 1318 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1319 1320 if (!ops->page_response) 1321 return -ENODEV; 1322 1323 if (!param || !param->fault_param) 1324 return -EINVAL; 1325 1326 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1327 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1328 return -EINVAL; 1329 1330 /* Only send response if there is a fault report pending */ 1331 mutex_lock(¶m->fault_param->lock); 1332 if (list_empty(¶m->fault_param->faults)) { 1333 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1334 goto done_unlock; 1335 } 1336 /* 1337 * Check if we have a matching page request pending to respond, 1338 * otherwise return -EINVAL 1339 */ 1340 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1341 prm = &evt->fault.prm; 1342 if (prm->grpid != msg->grpid) 1343 continue; 1344 1345 /* 1346 * If the PASID is required, the corresponding request is 1347 * matched using the group ID, the PASID valid bit and the PASID 1348 * value. Otherwise only the group ID matches request and 1349 * response. 1350 */ 1351 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1352 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1353 continue; 1354 1355 if (!needs_pasid && has_pasid) { 1356 /* No big deal, just clear it. */ 1357 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1358 msg->pasid = 0; 1359 } 1360 1361 ret = ops->page_response(dev, evt, msg); 1362 list_del(&evt->list); 1363 kfree(evt); 1364 break; 1365 } 1366 1367 done_unlock: 1368 mutex_unlock(¶m->fault_param->lock); 1369 return ret; 1370 } 1371 EXPORT_SYMBOL_GPL(iommu_page_response); 1372 1373 /** 1374 * iommu_group_id - Return ID for a group 1375 * @group: the group to ID 1376 * 1377 * Return the unique ID for the group matching the sysfs group number. 1378 */ 1379 int iommu_group_id(struct iommu_group *group) 1380 { 1381 return group->id; 1382 } 1383 EXPORT_SYMBOL_GPL(iommu_group_id); 1384 1385 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1386 unsigned long *devfns); 1387 1388 /* 1389 * To consider a PCI device isolated, we require ACS to support Source 1390 * Validation, Request Redirection, Completer Redirection, and Upstream 1391 * Forwarding. This effectively means that devices cannot spoof their 1392 * requester ID, requests and completions cannot be redirected, and all 1393 * transactions are forwarded upstream, even as it passes through a 1394 * bridge where the target device is downstream. 1395 */ 1396 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1397 1398 /* 1399 * For multifunction devices which are not isolated from each other, find 1400 * all the other non-isolated functions and look for existing groups. For 1401 * each function, we also need to look for aliases to or from other devices 1402 * that may already have a group. 1403 */ 1404 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1405 unsigned long *devfns) 1406 { 1407 struct pci_dev *tmp = NULL; 1408 struct iommu_group *group; 1409 1410 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1411 return NULL; 1412 1413 for_each_pci_dev(tmp) { 1414 if (tmp == pdev || tmp->bus != pdev->bus || 1415 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1416 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1417 continue; 1418 1419 group = get_pci_alias_group(tmp, devfns); 1420 if (group) { 1421 pci_dev_put(tmp); 1422 return group; 1423 } 1424 } 1425 1426 return NULL; 1427 } 1428 1429 /* 1430 * Look for aliases to or from the given device for existing groups. DMA 1431 * aliases are only supported on the same bus, therefore the search 1432 * space is quite small (especially since we're really only looking at pcie 1433 * device, and therefore only expect multiple slots on the root complex or 1434 * downstream switch ports). It's conceivable though that a pair of 1435 * multifunction devices could have aliases between them that would cause a 1436 * loop. To prevent this, we use a bitmap to track where we've been. 1437 */ 1438 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1439 unsigned long *devfns) 1440 { 1441 struct pci_dev *tmp = NULL; 1442 struct iommu_group *group; 1443 1444 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1445 return NULL; 1446 1447 group = iommu_group_get(&pdev->dev); 1448 if (group) 1449 return group; 1450 1451 for_each_pci_dev(tmp) { 1452 if (tmp == pdev || tmp->bus != pdev->bus) 1453 continue; 1454 1455 /* We alias them or they alias us */ 1456 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1457 group = get_pci_alias_group(tmp, devfns); 1458 if (group) { 1459 pci_dev_put(tmp); 1460 return group; 1461 } 1462 1463 group = get_pci_function_alias_group(tmp, devfns); 1464 if (group) { 1465 pci_dev_put(tmp); 1466 return group; 1467 } 1468 } 1469 } 1470 1471 return NULL; 1472 } 1473 1474 struct group_for_pci_data { 1475 struct pci_dev *pdev; 1476 struct iommu_group *group; 1477 }; 1478 1479 /* 1480 * DMA alias iterator callback, return the last seen device. Stop and return 1481 * the IOMMU group if we find one along the way. 1482 */ 1483 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1484 { 1485 struct group_for_pci_data *data = opaque; 1486 1487 data->pdev = pdev; 1488 data->group = iommu_group_get(&pdev->dev); 1489 1490 return data->group != NULL; 1491 } 1492 1493 /* 1494 * Generic device_group call-back function. It just allocates one 1495 * iommu-group per device. 1496 */ 1497 struct iommu_group *generic_device_group(struct device *dev) 1498 { 1499 return iommu_group_alloc(); 1500 } 1501 EXPORT_SYMBOL_GPL(generic_device_group); 1502 1503 /* 1504 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1505 * to find or create an IOMMU group for a device. 1506 */ 1507 struct iommu_group *pci_device_group(struct device *dev) 1508 { 1509 struct pci_dev *pdev = to_pci_dev(dev); 1510 struct group_for_pci_data data; 1511 struct pci_bus *bus; 1512 struct iommu_group *group = NULL; 1513 u64 devfns[4] = { 0 }; 1514 1515 if (WARN_ON(!dev_is_pci(dev))) 1516 return ERR_PTR(-EINVAL); 1517 1518 /* 1519 * Find the upstream DMA alias for the device. A device must not 1520 * be aliased due to topology in order to have its own IOMMU group. 1521 * If we find an alias along the way that already belongs to a 1522 * group, use it. 1523 */ 1524 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1525 return data.group; 1526 1527 pdev = data.pdev; 1528 1529 /* 1530 * Continue upstream from the point of minimum IOMMU granularity 1531 * due to aliases to the point where devices are protected from 1532 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1533 * group, use it. 1534 */ 1535 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1536 if (!bus->self) 1537 continue; 1538 1539 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1540 break; 1541 1542 pdev = bus->self; 1543 1544 group = iommu_group_get(&pdev->dev); 1545 if (group) 1546 return group; 1547 } 1548 1549 /* 1550 * Look for existing groups on device aliases. If we alias another 1551 * device or another device aliases us, use the same group. 1552 */ 1553 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1554 if (group) 1555 return group; 1556 1557 /* 1558 * Look for existing groups on non-isolated functions on the same 1559 * slot and aliases of those funcions, if any. No need to clear 1560 * the search bitmap, the tested devfns are still valid. 1561 */ 1562 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1563 if (group) 1564 return group; 1565 1566 /* No shared group found, allocate new */ 1567 return iommu_group_alloc(); 1568 } 1569 EXPORT_SYMBOL_GPL(pci_device_group); 1570 1571 /* Get the IOMMU group for device on fsl-mc bus */ 1572 struct iommu_group *fsl_mc_device_group(struct device *dev) 1573 { 1574 struct device *cont_dev = fsl_mc_cont_dev(dev); 1575 struct iommu_group *group; 1576 1577 group = iommu_group_get(cont_dev); 1578 if (!group) 1579 group = iommu_group_alloc(); 1580 return group; 1581 } 1582 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1583 1584 static int iommu_get_def_domain_type(struct device *dev) 1585 { 1586 const struct iommu_ops *ops = dev_iommu_ops(dev); 1587 1588 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 1589 return IOMMU_DOMAIN_DMA; 1590 1591 if (ops->def_domain_type) 1592 return ops->def_domain_type(dev); 1593 1594 return 0; 1595 } 1596 1597 static int iommu_group_alloc_default_domain(struct bus_type *bus, 1598 struct iommu_group *group, 1599 unsigned int type) 1600 { 1601 struct iommu_domain *dom; 1602 1603 dom = __iommu_domain_alloc(bus, type); 1604 if (!dom && type != IOMMU_DOMAIN_DMA) { 1605 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA); 1606 if (dom) 1607 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1608 type, group->name); 1609 } 1610 1611 if (!dom) 1612 return -ENOMEM; 1613 1614 group->default_domain = dom; 1615 if (!group->domain) 1616 group->domain = dom; 1617 return 0; 1618 } 1619 1620 static int iommu_alloc_default_domain(struct iommu_group *group, 1621 struct device *dev) 1622 { 1623 unsigned int type; 1624 1625 if (group->default_domain) 1626 return 0; 1627 1628 type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type; 1629 1630 return iommu_group_alloc_default_domain(dev->bus, group, type); 1631 } 1632 1633 /** 1634 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1635 * @dev: target device 1636 * 1637 * This function is intended to be called by IOMMU drivers and extended to 1638 * support common, bus-defined algorithms when determining or creating the 1639 * IOMMU group for a device. On success, the caller will hold a reference 1640 * to the returned IOMMU group, which will already include the provided 1641 * device. The reference should be released with iommu_group_put(). 1642 */ 1643 static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1644 { 1645 const struct iommu_ops *ops = dev_iommu_ops(dev); 1646 struct iommu_group *group; 1647 int ret; 1648 1649 group = iommu_group_get(dev); 1650 if (group) 1651 return group; 1652 1653 group = ops->device_group(dev); 1654 if (WARN_ON_ONCE(group == NULL)) 1655 return ERR_PTR(-EINVAL); 1656 1657 if (IS_ERR(group)) 1658 return group; 1659 1660 ret = iommu_group_add_device(group, dev); 1661 if (ret) 1662 goto out_put_group; 1663 1664 return group; 1665 1666 out_put_group: 1667 iommu_group_put(group); 1668 1669 return ERR_PTR(ret); 1670 } 1671 1672 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1673 { 1674 return group->default_domain; 1675 } 1676 1677 static int probe_iommu_group(struct device *dev, void *data) 1678 { 1679 struct list_head *group_list = data; 1680 struct iommu_group *group; 1681 int ret; 1682 1683 /* Device is probed already if in a group */ 1684 group = iommu_group_get(dev); 1685 if (group) { 1686 iommu_group_put(group); 1687 return 0; 1688 } 1689 1690 ret = __iommu_probe_device(dev, group_list); 1691 if (ret == -ENODEV) 1692 ret = 0; 1693 1694 return ret; 1695 } 1696 1697 static int iommu_bus_notifier(struct notifier_block *nb, 1698 unsigned long action, void *data) 1699 { 1700 struct device *dev = data; 1701 1702 if (action == BUS_NOTIFY_ADD_DEVICE) { 1703 int ret; 1704 1705 ret = iommu_probe_device(dev); 1706 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1707 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1708 iommu_release_device(dev); 1709 return NOTIFY_OK; 1710 } 1711 1712 return 0; 1713 } 1714 1715 struct __group_domain_type { 1716 struct device *dev; 1717 unsigned int type; 1718 }; 1719 1720 static int probe_get_default_domain_type(struct device *dev, void *data) 1721 { 1722 struct __group_domain_type *gtype = data; 1723 unsigned int type = iommu_get_def_domain_type(dev); 1724 1725 if (type) { 1726 if (gtype->type && gtype->type != type) { 1727 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", 1728 iommu_domain_type_str(type), 1729 dev_name(gtype->dev), 1730 iommu_domain_type_str(gtype->type)); 1731 gtype->type = 0; 1732 } 1733 1734 if (!gtype->dev) { 1735 gtype->dev = dev; 1736 gtype->type = type; 1737 } 1738 } 1739 1740 return 0; 1741 } 1742 1743 static void probe_alloc_default_domain(struct bus_type *bus, 1744 struct iommu_group *group) 1745 { 1746 struct __group_domain_type gtype; 1747 1748 memset(>ype, 0, sizeof(gtype)); 1749 1750 /* Ask for default domain requirements of all devices in the group */ 1751 __iommu_group_for_each_dev(group, >ype, 1752 probe_get_default_domain_type); 1753 1754 if (!gtype.type) 1755 gtype.type = iommu_def_domain_type; 1756 1757 iommu_group_alloc_default_domain(bus, group, gtype.type); 1758 1759 } 1760 1761 static int iommu_group_do_dma_attach(struct device *dev, void *data) 1762 { 1763 struct iommu_domain *domain = data; 1764 int ret = 0; 1765 1766 if (!iommu_is_attach_deferred(dev)) 1767 ret = __iommu_attach_device(domain, dev); 1768 1769 return ret; 1770 } 1771 1772 static int __iommu_group_dma_attach(struct iommu_group *group) 1773 { 1774 return __iommu_group_for_each_dev(group, group->default_domain, 1775 iommu_group_do_dma_attach); 1776 } 1777 1778 static int iommu_group_do_probe_finalize(struct device *dev, void *data) 1779 { 1780 const struct iommu_ops *ops = dev_iommu_ops(dev); 1781 1782 if (ops->probe_finalize) 1783 ops->probe_finalize(dev); 1784 1785 return 0; 1786 } 1787 1788 static void __iommu_group_dma_finalize(struct iommu_group *group) 1789 { 1790 __iommu_group_for_each_dev(group, group->default_domain, 1791 iommu_group_do_probe_finalize); 1792 } 1793 1794 static int iommu_do_create_direct_mappings(struct device *dev, void *data) 1795 { 1796 struct iommu_group *group = data; 1797 1798 iommu_create_device_direct_mappings(group, dev); 1799 1800 return 0; 1801 } 1802 1803 static int iommu_group_create_direct_mappings(struct iommu_group *group) 1804 { 1805 return __iommu_group_for_each_dev(group, group, 1806 iommu_do_create_direct_mappings); 1807 } 1808 1809 int bus_iommu_probe(struct bus_type *bus) 1810 { 1811 struct iommu_group *group, *next; 1812 LIST_HEAD(group_list); 1813 int ret; 1814 1815 /* 1816 * This code-path does not allocate the default domain when 1817 * creating the iommu group, so do it after the groups are 1818 * created. 1819 */ 1820 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1821 if (ret) 1822 return ret; 1823 1824 list_for_each_entry_safe(group, next, &group_list, entry) { 1825 /* Remove item from the list */ 1826 list_del_init(&group->entry); 1827 1828 mutex_lock(&group->mutex); 1829 1830 /* Try to allocate default domain */ 1831 probe_alloc_default_domain(bus, group); 1832 1833 if (!group->default_domain) { 1834 mutex_unlock(&group->mutex); 1835 continue; 1836 } 1837 1838 iommu_group_create_direct_mappings(group); 1839 1840 ret = __iommu_group_dma_attach(group); 1841 1842 mutex_unlock(&group->mutex); 1843 1844 if (ret) 1845 break; 1846 1847 __iommu_group_dma_finalize(group); 1848 } 1849 1850 return ret; 1851 } 1852 1853 bool iommu_present(struct bus_type *bus) 1854 { 1855 return bus->iommu_ops != NULL; 1856 } 1857 EXPORT_SYMBOL_GPL(iommu_present); 1858 1859 /** 1860 * device_iommu_capable() - check for a general IOMMU capability 1861 * @dev: device to which the capability would be relevant, if available 1862 * @cap: IOMMU capability 1863 * 1864 * Return: true if an IOMMU is present and supports the given capability 1865 * for the given device, otherwise false. 1866 */ 1867 bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 1868 { 1869 const struct iommu_ops *ops; 1870 1871 if (!dev->iommu || !dev->iommu->iommu_dev) 1872 return false; 1873 1874 ops = dev_iommu_ops(dev); 1875 if (!ops->capable) 1876 return false; 1877 1878 return ops->capable(dev, cap); 1879 } 1880 EXPORT_SYMBOL_GPL(device_iommu_capable); 1881 1882 /** 1883 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1884 * @domain: iommu domain 1885 * @handler: fault handler 1886 * @token: user data, will be passed back to the fault handler 1887 * 1888 * This function should be used by IOMMU users which want to be notified 1889 * whenever an IOMMU fault happens. 1890 * 1891 * The fault handler itself should return 0 on success, and an appropriate 1892 * error code otherwise. 1893 */ 1894 void iommu_set_fault_handler(struct iommu_domain *domain, 1895 iommu_fault_handler_t handler, 1896 void *token) 1897 { 1898 BUG_ON(!domain); 1899 1900 domain->handler = handler; 1901 domain->handler_token = token; 1902 } 1903 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1904 1905 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 1906 unsigned type) 1907 { 1908 struct iommu_domain *domain; 1909 1910 if (bus == NULL || bus->iommu_ops == NULL) 1911 return NULL; 1912 1913 domain = bus->iommu_ops->domain_alloc(type); 1914 if (!domain) 1915 return NULL; 1916 1917 domain->type = type; 1918 /* Assume all sizes by default; the driver may override this later */ 1919 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1920 if (!domain->ops) 1921 domain->ops = bus->iommu_ops->default_domain_ops; 1922 1923 if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { 1924 iommu_domain_free(domain); 1925 domain = NULL; 1926 } 1927 return domain; 1928 } 1929 1930 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 1931 { 1932 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1933 } 1934 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1935 1936 void iommu_domain_free(struct iommu_domain *domain) 1937 { 1938 if (domain->type == IOMMU_DOMAIN_SVA) 1939 mmdrop(domain->mm); 1940 iommu_put_dma_cookie(domain); 1941 domain->ops->free(domain); 1942 } 1943 EXPORT_SYMBOL_GPL(iommu_domain_free); 1944 1945 /* 1946 * Put the group's domain back to the appropriate core-owned domain - either the 1947 * standard kernel-mode DMA configuration or an all-DMA-blocked domain. 1948 */ 1949 static void __iommu_group_set_core_domain(struct iommu_group *group) 1950 { 1951 struct iommu_domain *new_domain; 1952 int ret; 1953 1954 if (group->owner) 1955 new_domain = group->blocking_domain; 1956 else 1957 new_domain = group->default_domain; 1958 1959 ret = __iommu_group_set_domain(group, new_domain); 1960 WARN(ret, "iommu driver failed to attach the default/blocking domain"); 1961 } 1962 1963 static int __iommu_attach_device(struct iommu_domain *domain, 1964 struct device *dev) 1965 { 1966 int ret; 1967 1968 if (unlikely(domain->ops->attach_dev == NULL)) 1969 return -ENODEV; 1970 1971 ret = domain->ops->attach_dev(domain, dev); 1972 if (!ret) 1973 trace_attach_device_to_domain(dev); 1974 return ret; 1975 } 1976 1977 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 1978 { 1979 struct iommu_group *group; 1980 int ret; 1981 1982 group = iommu_group_get(dev); 1983 if (!group) 1984 return -ENODEV; 1985 1986 /* 1987 * Lock the group to make sure the device-count doesn't 1988 * change while we are attaching 1989 */ 1990 mutex_lock(&group->mutex); 1991 ret = -EINVAL; 1992 if (iommu_group_device_count(group) != 1) 1993 goto out_unlock; 1994 1995 ret = __iommu_attach_group(domain, group); 1996 1997 out_unlock: 1998 mutex_unlock(&group->mutex); 1999 iommu_group_put(group); 2000 2001 return ret; 2002 } 2003 EXPORT_SYMBOL_GPL(iommu_attach_device); 2004 2005 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2006 { 2007 if (iommu_is_attach_deferred(dev)) 2008 return __iommu_attach_device(domain, dev); 2009 2010 return 0; 2011 } 2012 2013 static void __iommu_detach_device(struct iommu_domain *domain, 2014 struct device *dev) 2015 { 2016 if (iommu_is_attach_deferred(dev)) 2017 return; 2018 2019 domain->ops->detach_dev(domain, dev); 2020 trace_detach_device_from_domain(dev); 2021 } 2022 2023 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2024 { 2025 struct iommu_group *group; 2026 2027 group = iommu_group_get(dev); 2028 if (!group) 2029 return; 2030 2031 mutex_lock(&group->mutex); 2032 if (WARN_ON(domain != group->domain) || 2033 WARN_ON(iommu_group_device_count(group) != 1)) 2034 goto out_unlock; 2035 __iommu_group_set_core_domain(group); 2036 2037 out_unlock: 2038 mutex_unlock(&group->mutex); 2039 iommu_group_put(group); 2040 } 2041 EXPORT_SYMBOL_GPL(iommu_detach_device); 2042 2043 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2044 { 2045 struct iommu_domain *domain; 2046 struct iommu_group *group; 2047 2048 group = iommu_group_get(dev); 2049 if (!group) 2050 return NULL; 2051 2052 domain = group->domain; 2053 2054 iommu_group_put(group); 2055 2056 return domain; 2057 } 2058 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2059 2060 /* 2061 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2062 * guarantees that the group and its default domain are valid and correct. 2063 */ 2064 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2065 { 2066 return dev->iommu_group->default_domain; 2067 } 2068 2069 /* 2070 * IOMMU groups are really the natural working unit of the IOMMU, but 2071 * the IOMMU API works on domains and devices. Bridge that gap by 2072 * iterating over the devices in a group. Ideally we'd have a single 2073 * device which represents the requestor ID of the group, but we also 2074 * allow IOMMU drivers to create policy defined minimum sets, where 2075 * the physical hardware may be able to distiguish members, but we 2076 * wish to group them at a higher level (ex. untrusted multi-function 2077 * PCI devices). Thus we attach each device. 2078 */ 2079 static int iommu_group_do_attach_device(struct device *dev, void *data) 2080 { 2081 struct iommu_domain *domain = data; 2082 2083 return __iommu_attach_device(domain, dev); 2084 } 2085 2086 static int __iommu_attach_group(struct iommu_domain *domain, 2087 struct iommu_group *group) 2088 { 2089 int ret; 2090 2091 if (group->domain && group->domain != group->default_domain && 2092 group->domain != group->blocking_domain) 2093 return -EBUSY; 2094 2095 ret = __iommu_group_for_each_dev(group, domain, 2096 iommu_group_do_attach_device); 2097 if (ret == 0) 2098 group->domain = domain; 2099 2100 return ret; 2101 } 2102 2103 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2104 { 2105 int ret; 2106 2107 mutex_lock(&group->mutex); 2108 ret = __iommu_attach_group(domain, group); 2109 mutex_unlock(&group->mutex); 2110 2111 return ret; 2112 } 2113 EXPORT_SYMBOL_GPL(iommu_attach_group); 2114 2115 static int iommu_group_do_detach_device(struct device *dev, void *data) 2116 { 2117 struct iommu_domain *domain = data; 2118 2119 __iommu_detach_device(domain, dev); 2120 2121 return 0; 2122 } 2123 2124 static int __iommu_group_set_domain(struct iommu_group *group, 2125 struct iommu_domain *new_domain) 2126 { 2127 int ret; 2128 2129 if (group->domain == new_domain) 2130 return 0; 2131 2132 /* 2133 * New drivers should support default domains and so the detach_dev() op 2134 * will never be called. Otherwise the NULL domain represents some 2135 * platform specific behavior. 2136 */ 2137 if (!new_domain) { 2138 if (WARN_ON(!group->domain->ops->detach_dev)) 2139 return -EINVAL; 2140 __iommu_group_for_each_dev(group, group->domain, 2141 iommu_group_do_detach_device); 2142 group->domain = NULL; 2143 return 0; 2144 } 2145 2146 /* 2147 * Changing the domain is done by calling attach_dev() on the new 2148 * domain. This switch does not have to be atomic and DMA can be 2149 * discarded during the transition. DMA must only be able to access 2150 * either new_domain or group->domain, never something else. 2151 * 2152 * Note that this is called in error unwind paths, attaching to a 2153 * domain that has already been attached cannot fail. 2154 */ 2155 ret = __iommu_group_for_each_dev(group, new_domain, 2156 iommu_group_do_attach_device); 2157 if (ret) 2158 return ret; 2159 group->domain = new_domain; 2160 return 0; 2161 } 2162 2163 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2164 { 2165 mutex_lock(&group->mutex); 2166 __iommu_group_set_core_domain(group); 2167 mutex_unlock(&group->mutex); 2168 } 2169 EXPORT_SYMBOL_GPL(iommu_detach_group); 2170 2171 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2172 { 2173 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2174 return iova; 2175 2176 if (domain->type == IOMMU_DOMAIN_BLOCKED) 2177 return 0; 2178 2179 return domain->ops->iova_to_phys(domain, iova); 2180 } 2181 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2182 2183 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, 2184 phys_addr_t paddr, size_t size, size_t *count) 2185 { 2186 unsigned int pgsize_idx, pgsize_idx_next; 2187 unsigned long pgsizes; 2188 size_t offset, pgsize, pgsize_next; 2189 unsigned long addr_merge = paddr | iova; 2190 2191 /* Page sizes supported by the hardware and small enough for @size */ 2192 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); 2193 2194 /* Constrain the page sizes further based on the maximum alignment */ 2195 if (likely(addr_merge)) 2196 pgsizes &= GENMASK(__ffs(addr_merge), 0); 2197 2198 /* Make sure we have at least one suitable page size */ 2199 BUG_ON(!pgsizes); 2200 2201 /* Pick the biggest page size remaining */ 2202 pgsize_idx = __fls(pgsizes); 2203 pgsize = BIT(pgsize_idx); 2204 if (!count) 2205 return pgsize; 2206 2207 /* Find the next biggest support page size, if it exists */ 2208 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); 2209 if (!pgsizes) 2210 goto out_set_count; 2211 2212 pgsize_idx_next = __ffs(pgsizes); 2213 pgsize_next = BIT(pgsize_idx_next); 2214 2215 /* 2216 * There's no point trying a bigger page size unless the virtual 2217 * and physical addresses are similarly offset within the larger page. 2218 */ 2219 if ((iova ^ paddr) & (pgsize_next - 1)) 2220 goto out_set_count; 2221 2222 /* Calculate the offset to the next page size alignment boundary */ 2223 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); 2224 2225 /* 2226 * If size is big enough to accommodate the larger page, reduce 2227 * the number of smaller pages. 2228 */ 2229 if (offset + pgsize_next <= size) 2230 size = offset; 2231 2232 out_set_count: 2233 *count = size >> pgsize_idx; 2234 return pgsize; 2235 } 2236 2237 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, 2238 phys_addr_t paddr, size_t size, int prot, 2239 gfp_t gfp, size_t *mapped) 2240 { 2241 const struct iommu_domain_ops *ops = domain->ops; 2242 size_t pgsize, count; 2243 int ret; 2244 2245 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); 2246 2247 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", 2248 iova, &paddr, pgsize, count); 2249 2250 if (ops->map_pages) { 2251 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, 2252 gfp, mapped); 2253 } else { 2254 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2255 *mapped = ret ? 0 : pgsize; 2256 } 2257 2258 return ret; 2259 } 2260 2261 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2262 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2263 { 2264 const struct iommu_domain_ops *ops = domain->ops; 2265 unsigned long orig_iova = iova; 2266 unsigned int min_pagesz; 2267 size_t orig_size = size; 2268 phys_addr_t orig_paddr = paddr; 2269 int ret = 0; 2270 2271 if (unlikely(!(ops->map || ops->map_pages) || 2272 domain->pgsize_bitmap == 0UL)) 2273 return -ENODEV; 2274 2275 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2276 return -EINVAL; 2277 2278 /* find out the minimum page size supported */ 2279 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2280 2281 /* 2282 * both the virtual address and the physical one, as well as 2283 * the size of the mapping, must be aligned (at least) to the 2284 * size of the smallest page supported by the hardware 2285 */ 2286 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2287 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2288 iova, &paddr, size, min_pagesz); 2289 return -EINVAL; 2290 } 2291 2292 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2293 2294 while (size) { 2295 size_t mapped = 0; 2296 2297 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, 2298 &mapped); 2299 /* 2300 * Some pages may have been mapped, even if an error occurred, 2301 * so we should account for those so they can be unmapped. 2302 */ 2303 size -= mapped; 2304 2305 if (ret) 2306 break; 2307 2308 iova += mapped; 2309 paddr += mapped; 2310 } 2311 2312 /* unroll mapping in case something went wrong */ 2313 if (ret) 2314 iommu_unmap(domain, orig_iova, orig_size - size); 2315 else 2316 trace_map(orig_iova, orig_paddr, orig_size); 2317 2318 return ret; 2319 } 2320 2321 static int _iommu_map(struct iommu_domain *domain, unsigned long iova, 2322 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2323 { 2324 const struct iommu_domain_ops *ops = domain->ops; 2325 int ret; 2326 2327 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2328 if (ret == 0 && ops->iotlb_sync_map) 2329 ops->iotlb_sync_map(domain, iova, size); 2330 2331 return ret; 2332 } 2333 2334 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2335 phys_addr_t paddr, size_t size, int prot) 2336 { 2337 might_sleep(); 2338 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); 2339 } 2340 EXPORT_SYMBOL_GPL(iommu_map); 2341 2342 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, 2343 phys_addr_t paddr, size_t size, int prot) 2344 { 2345 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); 2346 } 2347 EXPORT_SYMBOL_GPL(iommu_map_atomic); 2348 2349 static size_t __iommu_unmap_pages(struct iommu_domain *domain, 2350 unsigned long iova, size_t size, 2351 struct iommu_iotlb_gather *iotlb_gather) 2352 { 2353 const struct iommu_domain_ops *ops = domain->ops; 2354 size_t pgsize, count; 2355 2356 pgsize = iommu_pgsize(domain, iova, iova, size, &count); 2357 return ops->unmap_pages ? 2358 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : 2359 ops->unmap(domain, iova, pgsize, iotlb_gather); 2360 } 2361 2362 static size_t __iommu_unmap(struct iommu_domain *domain, 2363 unsigned long iova, size_t size, 2364 struct iommu_iotlb_gather *iotlb_gather) 2365 { 2366 const struct iommu_domain_ops *ops = domain->ops; 2367 size_t unmapped_page, unmapped = 0; 2368 unsigned long orig_iova = iova; 2369 unsigned int min_pagesz; 2370 2371 if (unlikely(!(ops->unmap || ops->unmap_pages) || 2372 domain->pgsize_bitmap == 0UL)) 2373 return 0; 2374 2375 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2376 return 0; 2377 2378 /* find out the minimum page size supported */ 2379 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2380 2381 /* 2382 * The virtual address, as well as the size of the mapping, must be 2383 * aligned (at least) to the size of the smallest page supported 2384 * by the hardware 2385 */ 2386 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2387 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2388 iova, size, min_pagesz); 2389 return 0; 2390 } 2391 2392 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2393 2394 /* 2395 * Keep iterating until we either unmap 'size' bytes (or more) 2396 * or we hit an area that isn't mapped. 2397 */ 2398 while (unmapped < size) { 2399 unmapped_page = __iommu_unmap_pages(domain, iova, 2400 size - unmapped, 2401 iotlb_gather); 2402 if (!unmapped_page) 2403 break; 2404 2405 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2406 iova, unmapped_page); 2407 2408 iova += unmapped_page; 2409 unmapped += unmapped_page; 2410 } 2411 2412 trace_unmap(orig_iova, size, unmapped); 2413 return unmapped; 2414 } 2415 2416 size_t iommu_unmap(struct iommu_domain *domain, 2417 unsigned long iova, size_t size) 2418 { 2419 struct iommu_iotlb_gather iotlb_gather; 2420 size_t ret; 2421 2422 iommu_iotlb_gather_init(&iotlb_gather); 2423 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2424 iommu_iotlb_sync(domain, &iotlb_gather); 2425 2426 return ret; 2427 } 2428 EXPORT_SYMBOL_GPL(iommu_unmap); 2429 2430 size_t iommu_unmap_fast(struct iommu_domain *domain, 2431 unsigned long iova, size_t size, 2432 struct iommu_iotlb_gather *iotlb_gather) 2433 { 2434 return __iommu_unmap(domain, iova, size, iotlb_gather); 2435 } 2436 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2437 2438 static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2439 struct scatterlist *sg, unsigned int nents, int prot, 2440 gfp_t gfp) 2441 { 2442 const struct iommu_domain_ops *ops = domain->ops; 2443 size_t len = 0, mapped = 0; 2444 phys_addr_t start; 2445 unsigned int i = 0; 2446 int ret; 2447 2448 while (i <= nents) { 2449 phys_addr_t s_phys = sg_phys(sg); 2450 2451 if (len && s_phys != start + len) { 2452 ret = __iommu_map(domain, iova + mapped, start, 2453 len, prot, gfp); 2454 2455 if (ret) 2456 goto out_err; 2457 2458 mapped += len; 2459 len = 0; 2460 } 2461 2462 if (sg_is_dma_bus_address(sg)) 2463 goto next; 2464 2465 if (len) { 2466 len += sg->length; 2467 } else { 2468 len = sg->length; 2469 start = s_phys; 2470 } 2471 2472 next: 2473 if (++i < nents) 2474 sg = sg_next(sg); 2475 } 2476 2477 if (ops->iotlb_sync_map) 2478 ops->iotlb_sync_map(domain, iova, mapped); 2479 return mapped; 2480 2481 out_err: 2482 /* undo mappings already done */ 2483 iommu_unmap(domain, iova, mapped); 2484 2485 return ret; 2486 } 2487 2488 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2489 struct scatterlist *sg, unsigned int nents, int prot) 2490 { 2491 might_sleep(); 2492 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); 2493 } 2494 EXPORT_SYMBOL_GPL(iommu_map_sg); 2495 2496 ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, 2497 struct scatterlist *sg, unsigned int nents, int prot) 2498 { 2499 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); 2500 } 2501 2502 /** 2503 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2504 * @domain: the iommu domain where the fault has happened 2505 * @dev: the device where the fault has happened 2506 * @iova: the faulting address 2507 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2508 * 2509 * This function should be called by the low-level IOMMU implementations 2510 * whenever IOMMU faults happen, to allow high-level users, that are 2511 * interested in such events, to know about them. 2512 * 2513 * This event may be useful for several possible use cases: 2514 * - mere logging of the event 2515 * - dynamic TLB/PTE loading 2516 * - if restarting of the faulting device is required 2517 * 2518 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2519 * PTE/TLB loading will one day be supported, implementations will be able 2520 * to tell whether it succeeded or not according to this return value). 2521 * 2522 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2523 * (though fault handlers can also return -ENOSYS, in case they want to 2524 * elicit the default behavior of the IOMMU drivers). 2525 */ 2526 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2527 unsigned long iova, int flags) 2528 { 2529 int ret = -ENOSYS; 2530 2531 /* 2532 * if upper layers showed interest and installed a fault handler, 2533 * invoke it. 2534 */ 2535 if (domain->handler) 2536 ret = domain->handler(domain, dev, iova, flags, 2537 domain->handler_token); 2538 2539 trace_io_page_fault(dev, iova, flags); 2540 return ret; 2541 } 2542 EXPORT_SYMBOL_GPL(report_iommu_fault); 2543 2544 static int __init iommu_init(void) 2545 { 2546 iommu_group_kset = kset_create_and_add("iommu_groups", 2547 NULL, kernel_kobj); 2548 BUG_ON(!iommu_group_kset); 2549 2550 iommu_debugfs_setup(); 2551 2552 return 0; 2553 } 2554 core_initcall(iommu_init); 2555 2556 int iommu_enable_nesting(struct iommu_domain *domain) 2557 { 2558 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2559 return -EINVAL; 2560 if (!domain->ops->enable_nesting) 2561 return -EINVAL; 2562 return domain->ops->enable_nesting(domain); 2563 } 2564 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2565 2566 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2567 unsigned long quirk) 2568 { 2569 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2570 return -EINVAL; 2571 if (!domain->ops->set_pgtable_quirks) 2572 return -EINVAL; 2573 return domain->ops->set_pgtable_quirks(domain, quirk); 2574 } 2575 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2576 2577 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2578 { 2579 const struct iommu_ops *ops = dev_iommu_ops(dev); 2580 2581 if (ops->get_resv_regions) 2582 ops->get_resv_regions(dev, list); 2583 } 2584 2585 /** 2586 * iommu_put_resv_regions - release resered regions 2587 * @dev: device for which to free reserved regions 2588 * @list: reserved region list for device 2589 * 2590 * This releases a reserved region list acquired by iommu_get_resv_regions(). 2591 */ 2592 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2593 { 2594 struct iommu_resv_region *entry, *next; 2595 2596 list_for_each_entry_safe(entry, next, list, list) { 2597 if (entry->free) 2598 entry->free(dev, entry); 2599 else 2600 kfree(entry); 2601 } 2602 } 2603 EXPORT_SYMBOL(iommu_put_resv_regions); 2604 2605 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2606 size_t length, int prot, 2607 enum iommu_resv_type type, 2608 gfp_t gfp) 2609 { 2610 struct iommu_resv_region *region; 2611 2612 region = kzalloc(sizeof(*region), gfp); 2613 if (!region) 2614 return NULL; 2615 2616 INIT_LIST_HEAD(®ion->list); 2617 region->start = start; 2618 region->length = length; 2619 region->prot = prot; 2620 region->type = type; 2621 return region; 2622 } 2623 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2624 2625 void iommu_set_default_passthrough(bool cmd_line) 2626 { 2627 if (cmd_line) 2628 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2629 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2630 } 2631 2632 void iommu_set_default_translated(bool cmd_line) 2633 { 2634 if (cmd_line) 2635 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2636 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2637 } 2638 2639 bool iommu_default_passthrough(void) 2640 { 2641 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2642 } 2643 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2644 2645 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2646 { 2647 const struct iommu_ops *ops = NULL; 2648 struct iommu_device *iommu; 2649 2650 spin_lock(&iommu_device_lock); 2651 list_for_each_entry(iommu, &iommu_device_list, list) 2652 if (iommu->fwnode == fwnode) { 2653 ops = iommu->ops; 2654 break; 2655 } 2656 spin_unlock(&iommu_device_lock); 2657 return ops; 2658 } 2659 2660 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2661 const struct iommu_ops *ops) 2662 { 2663 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2664 2665 if (fwspec) 2666 return ops == fwspec->ops ? 0 : -EINVAL; 2667 2668 if (!dev_iommu_get(dev)) 2669 return -ENOMEM; 2670 2671 /* Preallocate for the overwhelmingly common case of 1 ID */ 2672 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2673 if (!fwspec) 2674 return -ENOMEM; 2675 2676 of_node_get(to_of_node(iommu_fwnode)); 2677 fwspec->iommu_fwnode = iommu_fwnode; 2678 fwspec->ops = ops; 2679 dev_iommu_fwspec_set(dev, fwspec); 2680 return 0; 2681 } 2682 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2683 2684 void iommu_fwspec_free(struct device *dev) 2685 { 2686 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2687 2688 if (fwspec) { 2689 fwnode_handle_put(fwspec->iommu_fwnode); 2690 kfree(fwspec); 2691 dev_iommu_fwspec_set(dev, NULL); 2692 } 2693 } 2694 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2695 2696 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2697 { 2698 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2699 int i, new_num; 2700 2701 if (!fwspec) 2702 return -EINVAL; 2703 2704 new_num = fwspec->num_ids + num_ids; 2705 if (new_num > 1) { 2706 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2707 GFP_KERNEL); 2708 if (!fwspec) 2709 return -ENOMEM; 2710 2711 dev_iommu_fwspec_set(dev, fwspec); 2712 } 2713 2714 for (i = 0; i < num_ids; i++) 2715 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2716 2717 fwspec->num_ids = new_num; 2718 return 0; 2719 } 2720 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2721 2722 /* 2723 * Per device IOMMU features. 2724 */ 2725 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2726 { 2727 if (dev->iommu && dev->iommu->iommu_dev) { 2728 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2729 2730 if (ops->dev_enable_feat) 2731 return ops->dev_enable_feat(dev, feat); 2732 } 2733 2734 return -ENODEV; 2735 } 2736 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2737 2738 /* 2739 * The device drivers should do the necessary cleanups before calling this. 2740 */ 2741 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2742 { 2743 if (dev->iommu && dev->iommu->iommu_dev) { 2744 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2745 2746 if (ops->dev_disable_feat) 2747 return ops->dev_disable_feat(dev, feat); 2748 } 2749 2750 return -EBUSY; 2751 } 2752 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2753 2754 /* 2755 * Changes the default domain of an iommu group that has *only* one device 2756 * 2757 * @group: The group for which the default domain should be changed 2758 * @prev_dev: The device in the group (this is used to make sure that the device 2759 * hasn't changed after the caller has called this function) 2760 * @type: The type of the new default domain that gets associated with the group 2761 * 2762 * Returns 0 on success and error code on failure 2763 * 2764 * Note: 2765 * 1. Presently, this function is called only when user requests to change the 2766 * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type 2767 * Please take a closer look if intended to use for other purposes. 2768 */ 2769 static int iommu_change_dev_def_domain(struct iommu_group *group, 2770 struct device *prev_dev, int type) 2771 { 2772 struct iommu_domain *prev_dom; 2773 struct group_device *grp_dev; 2774 int ret, dev_def_dom; 2775 struct device *dev; 2776 2777 mutex_lock(&group->mutex); 2778 2779 if (group->default_domain != group->domain) { 2780 dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n"); 2781 ret = -EBUSY; 2782 goto out; 2783 } 2784 2785 /* 2786 * iommu group wasn't locked while acquiring device lock in 2787 * iommu_group_store_type(). So, make sure that the device count hasn't 2788 * changed while acquiring device lock. 2789 * 2790 * Changing default domain of an iommu group with two or more devices 2791 * isn't supported because there could be a potential deadlock. Consider 2792 * the following scenario. T1 is trying to acquire device locks of all 2793 * the devices in the group and before it could acquire all of them, 2794 * there could be another thread T2 (from different sub-system and use 2795 * case) that has already acquired some of the device locks and might be 2796 * waiting for T1 to release other device locks. 2797 */ 2798 if (iommu_group_device_count(group) != 1) { 2799 dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n"); 2800 ret = -EINVAL; 2801 goto out; 2802 } 2803 2804 /* Since group has only one device */ 2805 grp_dev = list_first_entry(&group->devices, struct group_device, list); 2806 dev = grp_dev->dev; 2807 2808 if (prev_dev != dev) { 2809 dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n"); 2810 ret = -EBUSY; 2811 goto out; 2812 } 2813 2814 prev_dom = group->default_domain; 2815 if (!prev_dom) { 2816 ret = -EINVAL; 2817 goto out; 2818 } 2819 2820 dev_def_dom = iommu_get_def_domain_type(dev); 2821 if (!type) { 2822 /* 2823 * If the user hasn't requested any specific type of domain and 2824 * if the device supports both the domains, then default to the 2825 * domain the device was booted with 2826 */ 2827 type = dev_def_dom ? : iommu_def_domain_type; 2828 } else if (dev_def_dom && type != dev_def_dom) { 2829 dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n", 2830 iommu_domain_type_str(type)); 2831 ret = -EINVAL; 2832 goto out; 2833 } 2834 2835 /* 2836 * Switch to a new domain only if the requested domain type is different 2837 * from the existing default domain type 2838 */ 2839 if (prev_dom->type == type) { 2840 ret = 0; 2841 goto out; 2842 } 2843 2844 /* We can bring up a flush queue without tearing down the domain */ 2845 if (type == IOMMU_DOMAIN_DMA_FQ && prev_dom->type == IOMMU_DOMAIN_DMA) { 2846 ret = iommu_dma_init_fq(prev_dom); 2847 if (!ret) 2848 prev_dom->type = IOMMU_DOMAIN_DMA_FQ; 2849 goto out; 2850 } 2851 2852 /* Sets group->default_domain to the newly allocated domain */ 2853 ret = iommu_group_alloc_default_domain(dev->bus, group, type); 2854 if (ret) 2855 goto out; 2856 2857 ret = iommu_create_device_direct_mappings(group, dev); 2858 if (ret) 2859 goto free_new_domain; 2860 2861 ret = __iommu_attach_device(group->default_domain, dev); 2862 if (ret) 2863 goto free_new_domain; 2864 2865 group->domain = group->default_domain; 2866 2867 /* 2868 * Release the mutex here because ops->probe_finalize() call-back of 2869 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 2870 * in-turn might call back into IOMMU core code, where it tries to take 2871 * group->mutex, resulting in a deadlock. 2872 */ 2873 mutex_unlock(&group->mutex); 2874 2875 /* Make sure dma_ops is appropriatley set */ 2876 iommu_group_do_probe_finalize(dev, group->default_domain); 2877 iommu_domain_free(prev_dom); 2878 return 0; 2879 2880 free_new_domain: 2881 iommu_domain_free(group->default_domain); 2882 group->default_domain = prev_dom; 2883 group->domain = prev_dom; 2884 2885 out: 2886 mutex_unlock(&group->mutex); 2887 2888 return ret; 2889 } 2890 2891 /* 2892 * Changing the default domain through sysfs requires the users to unbind the 2893 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ 2894 * transition. Return failure if this isn't met. 2895 * 2896 * We need to consider the race between this and the device release path. 2897 * device_lock(dev) is used here to guarantee that the device release path 2898 * will not be entered at the same time. 2899 */ 2900 static ssize_t iommu_group_store_type(struct iommu_group *group, 2901 const char *buf, size_t count) 2902 { 2903 struct group_device *grp_dev; 2904 struct device *dev; 2905 int ret, req_type; 2906 2907 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2908 return -EACCES; 2909 2910 if (WARN_ON(!group) || !group->default_domain) 2911 return -EINVAL; 2912 2913 if (sysfs_streq(buf, "identity")) 2914 req_type = IOMMU_DOMAIN_IDENTITY; 2915 else if (sysfs_streq(buf, "DMA")) 2916 req_type = IOMMU_DOMAIN_DMA; 2917 else if (sysfs_streq(buf, "DMA-FQ")) 2918 req_type = IOMMU_DOMAIN_DMA_FQ; 2919 else if (sysfs_streq(buf, "auto")) 2920 req_type = 0; 2921 else 2922 return -EINVAL; 2923 2924 /* 2925 * Lock/Unlock the group mutex here before device lock to 2926 * 1. Make sure that the iommu group has only one device (this is a 2927 * prerequisite for step 2) 2928 * 2. Get struct *dev which is needed to lock device 2929 */ 2930 mutex_lock(&group->mutex); 2931 if (iommu_group_device_count(group) != 1) { 2932 mutex_unlock(&group->mutex); 2933 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n"); 2934 return -EINVAL; 2935 } 2936 2937 /* Since group has only one device */ 2938 grp_dev = list_first_entry(&group->devices, struct group_device, list); 2939 dev = grp_dev->dev; 2940 get_device(dev); 2941 2942 /* 2943 * Don't hold the group mutex because taking group mutex first and then 2944 * the device lock could potentially cause a deadlock as below. Assume 2945 * two threads T1 and T2. T1 is trying to change default domain of an 2946 * iommu group and T2 is trying to hot unplug a device or release [1] VF 2947 * of a PCIe device which is in the same iommu group. T1 takes group 2948 * mutex and before it could take device lock assume T2 has taken device 2949 * lock and is yet to take group mutex. Now, both the threads will be 2950 * waiting for the other thread to release lock. Below, lock order was 2951 * suggested. 2952 * device_lock(dev); 2953 * mutex_lock(&group->mutex); 2954 * iommu_change_dev_def_domain(); 2955 * mutex_unlock(&group->mutex); 2956 * device_unlock(dev); 2957 * 2958 * [1] Typical device release path 2959 * device_lock() from device/driver core code 2960 * -> bus_notifier() 2961 * -> iommu_bus_notifier() 2962 * -> iommu_release_device() 2963 * -> ops->release_device() vendor driver calls back iommu core code 2964 * -> mutex_lock() from iommu core code 2965 */ 2966 mutex_unlock(&group->mutex); 2967 2968 /* Check if the device in the group still has a driver bound to it */ 2969 device_lock(dev); 2970 if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ && 2971 group->default_domain->type == IOMMU_DOMAIN_DMA)) { 2972 pr_err_ratelimited("Device is still bound to driver\n"); 2973 ret = -EBUSY; 2974 goto out; 2975 } 2976 2977 ret = iommu_change_dev_def_domain(group, dev, req_type); 2978 ret = ret ?: count; 2979 2980 out: 2981 device_unlock(dev); 2982 put_device(dev); 2983 2984 return ret; 2985 } 2986 2987 static bool iommu_is_default_domain(struct iommu_group *group) 2988 { 2989 if (group->domain == group->default_domain) 2990 return true; 2991 2992 /* 2993 * If the default domain was set to identity and it is still an identity 2994 * domain then we consider this a pass. This happens because of 2995 * amd_iommu_init_device() replacing the default idenytity domain with an 2996 * identity domain that has a different configuration for AMDGPU. 2997 */ 2998 if (group->default_domain && 2999 group->default_domain->type == IOMMU_DOMAIN_IDENTITY && 3000 group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY) 3001 return true; 3002 return false; 3003 } 3004 3005 /** 3006 * iommu_device_use_default_domain() - Device driver wants to handle device 3007 * DMA through the kernel DMA API. 3008 * @dev: The device. 3009 * 3010 * The device driver about to bind @dev wants to do DMA through the kernel 3011 * DMA API. Return 0 if it is allowed, otherwise an error. 3012 */ 3013 int iommu_device_use_default_domain(struct device *dev) 3014 { 3015 struct iommu_group *group = iommu_group_get(dev); 3016 int ret = 0; 3017 3018 if (!group) 3019 return 0; 3020 3021 mutex_lock(&group->mutex); 3022 if (group->owner_cnt) { 3023 if (group->owner || !iommu_is_default_domain(group) || 3024 !xa_empty(&group->pasid_array)) { 3025 ret = -EBUSY; 3026 goto unlock_out; 3027 } 3028 } 3029 3030 group->owner_cnt++; 3031 3032 unlock_out: 3033 mutex_unlock(&group->mutex); 3034 iommu_group_put(group); 3035 3036 return ret; 3037 } 3038 3039 /** 3040 * iommu_device_unuse_default_domain() - Device driver stops handling device 3041 * DMA through the kernel DMA API. 3042 * @dev: The device. 3043 * 3044 * The device driver doesn't want to do DMA through kernel DMA API anymore. 3045 * It must be called after iommu_device_use_default_domain(). 3046 */ 3047 void iommu_device_unuse_default_domain(struct device *dev) 3048 { 3049 struct iommu_group *group = iommu_group_get(dev); 3050 3051 if (!group) 3052 return; 3053 3054 mutex_lock(&group->mutex); 3055 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) 3056 group->owner_cnt--; 3057 3058 mutex_unlock(&group->mutex); 3059 iommu_group_put(group); 3060 } 3061 3062 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) 3063 { 3064 struct group_device *dev = 3065 list_first_entry(&group->devices, struct group_device, list); 3066 3067 if (group->blocking_domain) 3068 return 0; 3069 3070 group->blocking_domain = 3071 __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED); 3072 if (!group->blocking_domain) { 3073 /* 3074 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED 3075 * create an empty domain instead. 3076 */ 3077 group->blocking_domain = __iommu_domain_alloc( 3078 dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); 3079 if (!group->blocking_domain) 3080 return -EINVAL; 3081 } 3082 return 0; 3083 } 3084 3085 /** 3086 * iommu_group_claim_dma_owner() - Set DMA ownership of a group 3087 * @group: The group. 3088 * @owner: Caller specified pointer. Used for exclusive ownership. 3089 * 3090 * This is to support backward compatibility for vfio which manages 3091 * the dma ownership in iommu_group level. New invocations on this 3092 * interface should be prohibited. 3093 */ 3094 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 3095 { 3096 int ret = 0; 3097 3098 mutex_lock(&group->mutex); 3099 if (group->owner_cnt) { 3100 ret = -EPERM; 3101 goto unlock_out; 3102 } else { 3103 if ((group->domain && group->domain != group->default_domain) || 3104 !xa_empty(&group->pasid_array)) { 3105 ret = -EBUSY; 3106 goto unlock_out; 3107 } 3108 3109 ret = __iommu_group_alloc_blocking_domain(group); 3110 if (ret) 3111 goto unlock_out; 3112 3113 ret = __iommu_group_set_domain(group, group->blocking_domain); 3114 if (ret) 3115 goto unlock_out; 3116 group->owner = owner; 3117 } 3118 3119 group->owner_cnt++; 3120 unlock_out: 3121 mutex_unlock(&group->mutex); 3122 3123 return ret; 3124 } 3125 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); 3126 3127 /** 3128 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3129 * @group: The group. 3130 * 3131 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3132 */ 3133 void iommu_group_release_dma_owner(struct iommu_group *group) 3134 { 3135 int ret; 3136 3137 mutex_lock(&group->mutex); 3138 if (WARN_ON(!group->owner_cnt || !group->owner || 3139 !xa_empty(&group->pasid_array))) 3140 goto unlock_out; 3141 3142 group->owner_cnt = 0; 3143 group->owner = NULL; 3144 ret = __iommu_group_set_domain(group, group->default_domain); 3145 WARN(ret, "iommu driver failed to attach the default domain"); 3146 3147 unlock_out: 3148 mutex_unlock(&group->mutex); 3149 } 3150 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); 3151 3152 /** 3153 * iommu_group_dma_owner_claimed() - Query group dma ownership status 3154 * @group: The group. 3155 * 3156 * This provides status query on a given group. It is racy and only for 3157 * non-binding status reporting. 3158 */ 3159 bool iommu_group_dma_owner_claimed(struct iommu_group *group) 3160 { 3161 unsigned int user; 3162 3163 mutex_lock(&group->mutex); 3164 user = group->owner_cnt; 3165 mutex_unlock(&group->mutex); 3166 3167 return user; 3168 } 3169 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); 3170 3171 static int __iommu_set_group_pasid(struct iommu_domain *domain, 3172 struct iommu_group *group, ioasid_t pasid) 3173 { 3174 struct group_device *device; 3175 int ret = 0; 3176 3177 list_for_each_entry(device, &group->devices, list) { 3178 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); 3179 if (ret) 3180 break; 3181 } 3182 3183 return ret; 3184 } 3185 3186 static void __iommu_remove_group_pasid(struct iommu_group *group, 3187 ioasid_t pasid) 3188 { 3189 struct group_device *device; 3190 const struct iommu_ops *ops; 3191 3192 list_for_each_entry(device, &group->devices, list) { 3193 ops = dev_iommu_ops(device->dev); 3194 ops->remove_dev_pasid(device->dev, pasid); 3195 } 3196 } 3197 3198 /* 3199 * iommu_attach_device_pasid() - Attach a domain to pasid of device 3200 * @domain: the iommu domain. 3201 * @dev: the attached device. 3202 * @pasid: the pasid of the device. 3203 * 3204 * Return: 0 on success, or an error. 3205 */ 3206 int iommu_attach_device_pasid(struct iommu_domain *domain, 3207 struct device *dev, ioasid_t pasid) 3208 { 3209 struct iommu_group *group; 3210 void *curr; 3211 int ret; 3212 3213 if (!domain->ops->set_dev_pasid) 3214 return -EOPNOTSUPP; 3215 3216 group = iommu_group_get(dev); 3217 if (!group) 3218 return -ENODEV; 3219 3220 mutex_lock(&group->mutex); 3221 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); 3222 if (curr) { 3223 ret = xa_err(curr) ? : -EBUSY; 3224 goto out_unlock; 3225 } 3226 3227 ret = __iommu_set_group_pasid(domain, group, pasid); 3228 if (ret) { 3229 __iommu_remove_group_pasid(group, pasid); 3230 xa_erase(&group->pasid_array, pasid); 3231 } 3232 out_unlock: 3233 mutex_unlock(&group->mutex); 3234 iommu_group_put(group); 3235 3236 return ret; 3237 } 3238 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); 3239 3240 /* 3241 * iommu_detach_device_pasid() - Detach the domain from pasid of device 3242 * @domain: the iommu domain. 3243 * @dev: the attached device. 3244 * @pasid: the pasid of the device. 3245 * 3246 * The @domain must have been attached to @pasid of the @dev with 3247 * iommu_attach_device_pasid(). 3248 */ 3249 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, 3250 ioasid_t pasid) 3251 { 3252 struct iommu_group *group = iommu_group_get(dev); 3253 3254 mutex_lock(&group->mutex); 3255 __iommu_remove_group_pasid(group, pasid); 3256 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); 3257 mutex_unlock(&group->mutex); 3258 3259 iommu_group_put(group); 3260 } 3261 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); 3262 3263 /* 3264 * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev 3265 * @dev: the queried device 3266 * @pasid: the pasid of the device 3267 * @type: matched domain type, 0 for any match 3268 * 3269 * This is a variant of iommu_get_domain_for_dev(). It returns the existing 3270 * domain attached to pasid of a device. Callers must hold a lock around this 3271 * function, and both iommu_attach/detach_dev_pasid() whenever a domain of 3272 * type is being manipulated. This API does not internally resolve races with 3273 * attach/detach. 3274 * 3275 * Return: attached domain on success, NULL otherwise. 3276 */ 3277 struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, 3278 ioasid_t pasid, 3279 unsigned int type) 3280 { 3281 struct iommu_domain *domain; 3282 struct iommu_group *group; 3283 3284 group = iommu_group_get(dev); 3285 if (!group) 3286 return NULL; 3287 3288 xa_lock(&group->pasid_array); 3289 domain = xa_load(&group->pasid_array, pasid); 3290 if (type && domain && domain->type != type) 3291 domain = ERR_PTR(-EBUSY); 3292 xa_unlock(&group->pasid_array); 3293 iommu_group_put(group); 3294 3295 return domain; 3296 } 3297 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); 3298 3299 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 3300 struct mm_struct *mm) 3301 { 3302 const struct iommu_ops *ops = dev_iommu_ops(dev); 3303 struct iommu_domain *domain; 3304 3305 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); 3306 if (!domain) 3307 return NULL; 3308 3309 domain->type = IOMMU_DOMAIN_SVA; 3310 mmgrab(mm); 3311 domain->mm = mm; 3312 3313 return domain; 3314 } 3315