1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/amba/bus.h> 10 #include <linux/device.h> 11 #include <linux/kernel.h> 12 #include <linux/bits.h> 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/host1x_context_bus.h> 20 #include <linux/iommu.h> 21 #include <linux/idr.h> 22 #include <linux/err.h> 23 #include <linux/pci.h> 24 #include <linux/pci-ats.h> 25 #include <linux/bitops.h> 26 #include <linux/platform_device.h> 27 #include <linux/property.h> 28 #include <linux/fsl/mc.h> 29 #include <linux/module.h> 30 #include <linux/cc_platform.h> 31 #include <trace/events/iommu.h> 32 #include <linux/sched/mm.h> 33 34 #include "dma-iommu.h" 35 36 #include "iommu-sva.h" 37 38 static struct kset *iommu_group_kset; 39 static DEFINE_IDA(iommu_group_ida); 40 41 static unsigned int iommu_def_domain_type __read_mostly; 42 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); 43 static u32 iommu_cmd_line __read_mostly; 44 45 struct iommu_group { 46 struct kobject kobj; 47 struct kobject *devices_kobj; 48 struct list_head devices; 49 struct xarray pasid_array; 50 struct mutex mutex; 51 void *iommu_data; 52 void (*iommu_data_release)(void *iommu_data); 53 char *name; 54 int id; 55 struct iommu_domain *default_domain; 56 struct iommu_domain *blocking_domain; 57 struct iommu_domain *domain; 58 struct list_head entry; 59 unsigned int owner_cnt; 60 void *owner; 61 }; 62 63 struct group_device { 64 struct list_head list; 65 struct device *dev; 66 char *name; 67 }; 68 69 struct iommu_group_attribute { 70 struct attribute attr; 71 ssize_t (*show)(struct iommu_group *group, char *buf); 72 ssize_t (*store)(struct iommu_group *group, 73 const char *buf, size_t count); 74 }; 75 76 static const char * const iommu_group_resv_type_string[] = { 77 [IOMMU_RESV_DIRECT] = "direct", 78 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 79 [IOMMU_RESV_RESERVED] = "reserved", 80 [IOMMU_RESV_MSI] = "msi", 81 [IOMMU_RESV_SW_MSI] = "msi", 82 }; 83 84 #define IOMMU_CMD_LINE_DMA_API BIT(0) 85 #define IOMMU_CMD_LINE_STRICT BIT(1) 86 87 static int iommu_bus_notifier(struct notifier_block *nb, 88 unsigned long action, void *data); 89 static int iommu_alloc_default_domain(struct iommu_group *group, 90 struct device *dev); 91 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 92 unsigned type); 93 static int __iommu_attach_device(struct iommu_domain *domain, 94 struct device *dev); 95 static int __iommu_attach_group(struct iommu_domain *domain, 96 struct iommu_group *group); 97 static int __iommu_group_set_domain(struct iommu_group *group, 98 struct iommu_domain *new_domain); 99 static int iommu_create_device_direct_mappings(struct iommu_group *group, 100 struct device *dev); 101 static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 102 static ssize_t iommu_group_store_type(struct iommu_group *group, 103 const char *buf, size_t count); 104 105 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 106 struct iommu_group_attribute iommu_group_attr_##_name = \ 107 __ATTR(_name, _mode, _show, _store) 108 109 #define to_iommu_group_attr(_attr) \ 110 container_of(_attr, struct iommu_group_attribute, attr) 111 #define to_iommu_group(_kobj) \ 112 container_of(_kobj, struct iommu_group, kobj) 113 114 static LIST_HEAD(iommu_device_list); 115 static DEFINE_SPINLOCK(iommu_device_lock); 116 117 static struct bus_type * const iommu_buses[] = { 118 &platform_bus_type, 119 #ifdef CONFIG_PCI 120 &pci_bus_type, 121 #endif 122 #ifdef CONFIG_ARM_AMBA 123 &amba_bustype, 124 #endif 125 #ifdef CONFIG_FSL_MC_BUS 126 &fsl_mc_bus_type, 127 #endif 128 #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS 129 &host1x_context_device_bus_type, 130 #endif 131 }; 132 133 /* 134 * Use a function instead of an array here because the domain-type is a 135 * bit-field, so an array would waste memory. 136 */ 137 static const char *iommu_domain_type_str(unsigned int t) 138 { 139 switch (t) { 140 case IOMMU_DOMAIN_BLOCKED: 141 return "Blocked"; 142 case IOMMU_DOMAIN_IDENTITY: 143 return "Passthrough"; 144 case IOMMU_DOMAIN_UNMANAGED: 145 return "Unmanaged"; 146 case IOMMU_DOMAIN_DMA: 147 case IOMMU_DOMAIN_DMA_FQ: 148 return "Translated"; 149 default: 150 return "Unknown"; 151 } 152 } 153 154 static int __init iommu_subsys_init(void) 155 { 156 struct notifier_block *nb; 157 158 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 159 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 160 iommu_set_default_passthrough(false); 161 else 162 iommu_set_default_translated(false); 163 164 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 165 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 166 iommu_set_default_translated(false); 167 } 168 } 169 170 if (!iommu_default_passthrough() && !iommu_dma_strict) 171 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; 172 173 pr_info("Default domain type: %s %s\n", 174 iommu_domain_type_str(iommu_def_domain_type), 175 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 176 "(set via kernel command line)" : ""); 177 178 if (!iommu_default_passthrough()) 179 pr_info("DMA domain TLB invalidation policy: %s mode %s\n", 180 iommu_dma_strict ? "strict" : "lazy", 181 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? 182 "(set via kernel command line)" : ""); 183 184 nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); 185 if (!nb) 186 return -ENOMEM; 187 188 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { 189 nb[i].notifier_call = iommu_bus_notifier; 190 bus_register_notifier(iommu_buses[i], &nb[i]); 191 } 192 193 return 0; 194 } 195 subsys_initcall(iommu_subsys_init); 196 197 static int remove_iommu_group(struct device *dev, void *data) 198 { 199 if (dev->iommu && dev->iommu->iommu_dev == data) 200 iommu_release_device(dev); 201 202 return 0; 203 } 204 205 /** 206 * iommu_device_register() - Register an IOMMU hardware instance 207 * @iommu: IOMMU handle for the instance 208 * @ops: IOMMU ops to associate with the instance 209 * @hwdev: (optional) actual instance device, used for fwnode lookup 210 * 211 * Return: 0 on success, or an error. 212 */ 213 int iommu_device_register(struct iommu_device *iommu, 214 const struct iommu_ops *ops, struct device *hwdev) 215 { 216 int err = 0; 217 218 /* We need to be able to take module references appropriately */ 219 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 220 return -EINVAL; 221 /* 222 * Temporarily enforce global restriction to a single driver. This was 223 * already the de-facto behaviour, since any possible combination of 224 * existing drivers would compete for at least the PCI or platform bus. 225 */ 226 if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops) 227 return -EBUSY; 228 229 iommu->ops = ops; 230 if (hwdev) 231 iommu->fwnode = dev_fwnode(hwdev); 232 233 spin_lock(&iommu_device_lock); 234 list_add_tail(&iommu->list, &iommu_device_list); 235 spin_unlock(&iommu_device_lock); 236 237 for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) { 238 iommu_buses[i]->iommu_ops = ops; 239 err = bus_iommu_probe(iommu_buses[i]); 240 } 241 if (err) 242 iommu_device_unregister(iommu); 243 return err; 244 } 245 EXPORT_SYMBOL_GPL(iommu_device_register); 246 247 void iommu_device_unregister(struct iommu_device *iommu) 248 { 249 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) 250 bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); 251 252 spin_lock(&iommu_device_lock); 253 list_del(&iommu->list); 254 spin_unlock(&iommu_device_lock); 255 } 256 EXPORT_SYMBOL_GPL(iommu_device_unregister); 257 258 static struct dev_iommu *dev_iommu_get(struct device *dev) 259 { 260 struct dev_iommu *param = dev->iommu; 261 262 if (param) 263 return param; 264 265 param = kzalloc(sizeof(*param), GFP_KERNEL); 266 if (!param) 267 return NULL; 268 269 mutex_init(¶m->lock); 270 dev->iommu = param; 271 return param; 272 } 273 274 static void dev_iommu_free(struct device *dev) 275 { 276 struct dev_iommu *param = dev->iommu; 277 278 dev->iommu = NULL; 279 if (param->fwspec) { 280 fwnode_handle_put(param->fwspec->iommu_fwnode); 281 kfree(param->fwspec); 282 } 283 kfree(param); 284 } 285 286 static u32 dev_iommu_get_max_pasids(struct device *dev) 287 { 288 u32 max_pasids = 0, bits = 0; 289 int ret; 290 291 if (dev_is_pci(dev)) { 292 ret = pci_max_pasids(to_pci_dev(dev)); 293 if (ret > 0) 294 max_pasids = ret; 295 } else { 296 ret = device_property_read_u32(dev, "pasid-num-bits", &bits); 297 if (!ret) 298 max_pasids = 1UL << bits; 299 } 300 301 return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); 302 } 303 304 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 305 { 306 const struct iommu_ops *ops = dev->bus->iommu_ops; 307 struct iommu_device *iommu_dev; 308 struct iommu_group *group; 309 static DEFINE_MUTEX(iommu_probe_device_lock); 310 int ret; 311 312 if (!ops) 313 return -ENODEV; 314 /* 315 * Serialise to avoid races between IOMMU drivers registering in 316 * parallel and/or the "replay" calls from ACPI/OF code via client 317 * driver probe. Once the latter have been cleaned up we should 318 * probably be able to use device_lock() here to minimise the scope, 319 * but for now enforcing a simple global ordering is fine. 320 */ 321 mutex_lock(&iommu_probe_device_lock); 322 if (!dev_iommu_get(dev)) { 323 ret = -ENOMEM; 324 goto err_unlock; 325 } 326 327 if (!try_module_get(ops->owner)) { 328 ret = -EINVAL; 329 goto err_free; 330 } 331 332 iommu_dev = ops->probe_device(dev); 333 if (IS_ERR(iommu_dev)) { 334 ret = PTR_ERR(iommu_dev); 335 goto out_module_put; 336 } 337 338 dev->iommu->iommu_dev = iommu_dev; 339 dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); 340 341 group = iommu_group_get_for_dev(dev); 342 if (IS_ERR(group)) { 343 ret = PTR_ERR(group); 344 goto out_release; 345 } 346 347 mutex_lock(&group->mutex); 348 if (group_list && !group->default_domain && list_empty(&group->entry)) 349 list_add_tail(&group->entry, group_list); 350 mutex_unlock(&group->mutex); 351 iommu_group_put(group); 352 353 mutex_unlock(&iommu_probe_device_lock); 354 iommu_device_link(iommu_dev, dev); 355 356 return 0; 357 358 out_release: 359 if (ops->release_device) 360 ops->release_device(dev); 361 362 out_module_put: 363 module_put(ops->owner); 364 365 err_free: 366 dev_iommu_free(dev); 367 368 err_unlock: 369 mutex_unlock(&iommu_probe_device_lock); 370 371 return ret; 372 } 373 374 static bool iommu_is_attach_deferred(struct device *dev) 375 { 376 const struct iommu_ops *ops = dev_iommu_ops(dev); 377 378 if (ops->is_attach_deferred) 379 return ops->is_attach_deferred(dev); 380 381 return false; 382 } 383 384 static int iommu_group_do_dma_first_attach(struct device *dev, void *data) 385 { 386 struct iommu_domain *domain = data; 387 388 lockdep_assert_held(&dev->iommu_group->mutex); 389 390 if (iommu_is_attach_deferred(dev)) { 391 dev->iommu->attach_deferred = 1; 392 return 0; 393 } 394 395 return __iommu_attach_device(domain, dev); 396 } 397 398 int iommu_probe_device(struct device *dev) 399 { 400 const struct iommu_ops *ops; 401 struct iommu_group *group; 402 int ret; 403 404 ret = __iommu_probe_device(dev, NULL); 405 if (ret) 406 goto err_out; 407 408 group = iommu_group_get(dev); 409 if (!group) { 410 ret = -ENODEV; 411 goto err_release; 412 } 413 414 /* 415 * Try to allocate a default domain - needs support from the 416 * IOMMU driver. There are still some drivers which don't 417 * support default domains, so the return value is not yet 418 * checked. 419 */ 420 mutex_lock(&group->mutex); 421 iommu_alloc_default_domain(group, dev); 422 423 /* 424 * If device joined an existing group which has been claimed, don't 425 * attach the default domain. 426 */ 427 if (group->default_domain && !group->owner) { 428 ret = iommu_group_do_dma_first_attach(dev, group->default_domain); 429 if (ret) { 430 mutex_unlock(&group->mutex); 431 iommu_group_put(group); 432 goto err_release; 433 } 434 } 435 436 iommu_create_device_direct_mappings(group, dev); 437 438 mutex_unlock(&group->mutex); 439 iommu_group_put(group); 440 441 ops = dev_iommu_ops(dev); 442 if (ops->probe_finalize) 443 ops->probe_finalize(dev); 444 445 return 0; 446 447 err_release: 448 iommu_release_device(dev); 449 450 err_out: 451 return ret; 452 453 } 454 455 void iommu_release_device(struct device *dev) 456 { 457 const struct iommu_ops *ops; 458 459 if (!dev->iommu) 460 return; 461 462 iommu_device_unlink(dev->iommu->iommu_dev, dev); 463 464 ops = dev_iommu_ops(dev); 465 if (ops->release_device) 466 ops->release_device(dev); 467 468 iommu_group_remove_device(dev); 469 module_put(ops->owner); 470 dev_iommu_free(dev); 471 } 472 473 static int __init iommu_set_def_domain_type(char *str) 474 { 475 bool pt; 476 int ret; 477 478 ret = kstrtobool(str, &pt); 479 if (ret) 480 return ret; 481 482 if (pt) 483 iommu_set_default_passthrough(true); 484 else 485 iommu_set_default_translated(true); 486 487 return 0; 488 } 489 early_param("iommu.passthrough", iommu_set_def_domain_type); 490 491 static int __init iommu_dma_setup(char *str) 492 { 493 int ret = kstrtobool(str, &iommu_dma_strict); 494 495 if (!ret) 496 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 497 return ret; 498 } 499 early_param("iommu.strict", iommu_dma_setup); 500 501 void iommu_set_dma_strict(void) 502 { 503 iommu_dma_strict = true; 504 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) 505 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 506 } 507 508 static ssize_t iommu_group_attr_show(struct kobject *kobj, 509 struct attribute *__attr, char *buf) 510 { 511 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 512 struct iommu_group *group = to_iommu_group(kobj); 513 ssize_t ret = -EIO; 514 515 if (attr->show) 516 ret = attr->show(group, buf); 517 return ret; 518 } 519 520 static ssize_t iommu_group_attr_store(struct kobject *kobj, 521 struct attribute *__attr, 522 const char *buf, size_t count) 523 { 524 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 525 struct iommu_group *group = to_iommu_group(kobj); 526 ssize_t ret = -EIO; 527 528 if (attr->store) 529 ret = attr->store(group, buf, count); 530 return ret; 531 } 532 533 static const struct sysfs_ops iommu_group_sysfs_ops = { 534 .show = iommu_group_attr_show, 535 .store = iommu_group_attr_store, 536 }; 537 538 static int iommu_group_create_file(struct iommu_group *group, 539 struct iommu_group_attribute *attr) 540 { 541 return sysfs_create_file(&group->kobj, &attr->attr); 542 } 543 544 static void iommu_group_remove_file(struct iommu_group *group, 545 struct iommu_group_attribute *attr) 546 { 547 sysfs_remove_file(&group->kobj, &attr->attr); 548 } 549 550 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 551 { 552 return sprintf(buf, "%s\n", group->name); 553 } 554 555 /** 556 * iommu_insert_resv_region - Insert a new region in the 557 * list of reserved regions. 558 * @new: new region to insert 559 * @regions: list of regions 560 * 561 * Elements are sorted by start address and overlapping segments 562 * of the same type are merged. 563 */ 564 static int iommu_insert_resv_region(struct iommu_resv_region *new, 565 struct list_head *regions) 566 { 567 struct iommu_resv_region *iter, *tmp, *nr, *top; 568 LIST_HEAD(stack); 569 570 nr = iommu_alloc_resv_region(new->start, new->length, 571 new->prot, new->type, GFP_KERNEL); 572 if (!nr) 573 return -ENOMEM; 574 575 /* First add the new element based on start address sorting */ 576 list_for_each_entry(iter, regions, list) { 577 if (nr->start < iter->start || 578 (nr->start == iter->start && nr->type <= iter->type)) 579 break; 580 } 581 list_add_tail(&nr->list, &iter->list); 582 583 /* Merge overlapping segments of type nr->type in @regions, if any */ 584 list_for_each_entry_safe(iter, tmp, regions, list) { 585 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 586 587 /* no merge needed on elements of different types than @new */ 588 if (iter->type != new->type) { 589 list_move_tail(&iter->list, &stack); 590 continue; 591 } 592 593 /* look for the last stack element of same type as @iter */ 594 list_for_each_entry_reverse(top, &stack, list) 595 if (top->type == iter->type) 596 goto check_overlap; 597 598 list_move_tail(&iter->list, &stack); 599 continue; 600 601 check_overlap: 602 top_end = top->start + top->length - 1; 603 604 if (iter->start > top_end + 1) { 605 list_move_tail(&iter->list, &stack); 606 } else { 607 top->length = max(top_end, iter_end) - top->start + 1; 608 list_del(&iter->list); 609 kfree(iter); 610 } 611 } 612 list_splice(&stack, regions); 613 return 0; 614 } 615 616 static int 617 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 618 struct list_head *group_resv_regions) 619 { 620 struct iommu_resv_region *entry; 621 int ret = 0; 622 623 list_for_each_entry(entry, dev_resv_regions, list) { 624 ret = iommu_insert_resv_region(entry, group_resv_regions); 625 if (ret) 626 break; 627 } 628 return ret; 629 } 630 631 int iommu_get_group_resv_regions(struct iommu_group *group, 632 struct list_head *head) 633 { 634 struct group_device *device; 635 int ret = 0; 636 637 mutex_lock(&group->mutex); 638 list_for_each_entry(device, &group->devices, list) { 639 struct list_head dev_resv_regions; 640 641 /* 642 * Non-API groups still expose reserved_regions in sysfs, 643 * so filter out calls that get here that way. 644 */ 645 if (!device->dev->iommu) 646 break; 647 648 INIT_LIST_HEAD(&dev_resv_regions); 649 iommu_get_resv_regions(device->dev, &dev_resv_regions); 650 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 651 iommu_put_resv_regions(device->dev, &dev_resv_regions); 652 if (ret) 653 break; 654 } 655 mutex_unlock(&group->mutex); 656 return ret; 657 } 658 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 659 660 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 661 char *buf) 662 { 663 struct iommu_resv_region *region, *next; 664 struct list_head group_resv_regions; 665 char *str = buf; 666 667 INIT_LIST_HEAD(&group_resv_regions); 668 iommu_get_group_resv_regions(group, &group_resv_regions); 669 670 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 671 str += sprintf(str, "0x%016llx 0x%016llx %s\n", 672 (long long int)region->start, 673 (long long int)(region->start + 674 region->length - 1), 675 iommu_group_resv_type_string[region->type]); 676 kfree(region); 677 } 678 679 return (str - buf); 680 } 681 682 static ssize_t iommu_group_show_type(struct iommu_group *group, 683 char *buf) 684 { 685 char *type = "unknown\n"; 686 687 mutex_lock(&group->mutex); 688 if (group->default_domain) { 689 switch (group->default_domain->type) { 690 case IOMMU_DOMAIN_BLOCKED: 691 type = "blocked\n"; 692 break; 693 case IOMMU_DOMAIN_IDENTITY: 694 type = "identity\n"; 695 break; 696 case IOMMU_DOMAIN_UNMANAGED: 697 type = "unmanaged\n"; 698 break; 699 case IOMMU_DOMAIN_DMA: 700 type = "DMA\n"; 701 break; 702 case IOMMU_DOMAIN_DMA_FQ: 703 type = "DMA-FQ\n"; 704 break; 705 } 706 } 707 mutex_unlock(&group->mutex); 708 strcpy(buf, type); 709 710 return strlen(type); 711 } 712 713 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 714 715 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 716 iommu_group_show_resv_regions, NULL); 717 718 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 719 iommu_group_store_type); 720 721 static void iommu_group_release(struct kobject *kobj) 722 { 723 struct iommu_group *group = to_iommu_group(kobj); 724 725 pr_debug("Releasing group %d\n", group->id); 726 727 if (group->iommu_data_release) 728 group->iommu_data_release(group->iommu_data); 729 730 ida_free(&iommu_group_ida, group->id); 731 732 if (group->default_domain) 733 iommu_domain_free(group->default_domain); 734 if (group->blocking_domain) 735 iommu_domain_free(group->blocking_domain); 736 737 kfree(group->name); 738 kfree(group); 739 } 740 741 static struct kobj_type iommu_group_ktype = { 742 .sysfs_ops = &iommu_group_sysfs_ops, 743 .release = iommu_group_release, 744 }; 745 746 /** 747 * iommu_group_alloc - Allocate a new group 748 * 749 * This function is called by an iommu driver to allocate a new iommu 750 * group. The iommu group represents the minimum granularity of the iommu. 751 * Upon successful return, the caller holds a reference to the supplied 752 * group in order to hold the group until devices are added. Use 753 * iommu_group_put() to release this extra reference count, allowing the 754 * group to be automatically reclaimed once it has no devices or external 755 * references. 756 */ 757 struct iommu_group *iommu_group_alloc(void) 758 { 759 struct iommu_group *group; 760 int ret; 761 762 group = kzalloc(sizeof(*group), GFP_KERNEL); 763 if (!group) 764 return ERR_PTR(-ENOMEM); 765 766 group->kobj.kset = iommu_group_kset; 767 mutex_init(&group->mutex); 768 INIT_LIST_HEAD(&group->devices); 769 INIT_LIST_HEAD(&group->entry); 770 xa_init(&group->pasid_array); 771 772 ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); 773 if (ret < 0) { 774 kfree(group); 775 return ERR_PTR(ret); 776 } 777 group->id = ret; 778 779 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 780 NULL, "%d", group->id); 781 if (ret) { 782 kobject_put(&group->kobj); 783 return ERR_PTR(ret); 784 } 785 786 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 787 if (!group->devices_kobj) { 788 kobject_put(&group->kobj); /* triggers .release & free */ 789 return ERR_PTR(-ENOMEM); 790 } 791 792 /* 793 * The devices_kobj holds a reference on the group kobject, so 794 * as long as that exists so will the group. We can therefore 795 * use the devices_kobj for reference counting. 796 */ 797 kobject_put(&group->kobj); 798 799 ret = iommu_group_create_file(group, 800 &iommu_group_attr_reserved_regions); 801 if (ret) { 802 kobject_put(group->devices_kobj); 803 return ERR_PTR(ret); 804 } 805 806 ret = iommu_group_create_file(group, &iommu_group_attr_type); 807 if (ret) { 808 kobject_put(group->devices_kobj); 809 return ERR_PTR(ret); 810 } 811 812 pr_debug("Allocated group %d\n", group->id); 813 814 return group; 815 } 816 EXPORT_SYMBOL_GPL(iommu_group_alloc); 817 818 struct iommu_group *iommu_group_get_by_id(int id) 819 { 820 struct kobject *group_kobj; 821 struct iommu_group *group; 822 const char *name; 823 824 if (!iommu_group_kset) 825 return NULL; 826 827 name = kasprintf(GFP_KERNEL, "%d", id); 828 if (!name) 829 return NULL; 830 831 group_kobj = kset_find_obj(iommu_group_kset, name); 832 kfree(name); 833 834 if (!group_kobj) 835 return NULL; 836 837 group = container_of(group_kobj, struct iommu_group, kobj); 838 BUG_ON(group->id != id); 839 840 kobject_get(group->devices_kobj); 841 kobject_put(&group->kobj); 842 843 return group; 844 } 845 EXPORT_SYMBOL_GPL(iommu_group_get_by_id); 846 847 /** 848 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 849 * @group: the group 850 * 851 * iommu drivers can store data in the group for use when doing iommu 852 * operations. This function provides a way to retrieve it. Caller 853 * should hold a group reference. 854 */ 855 void *iommu_group_get_iommudata(struct iommu_group *group) 856 { 857 return group->iommu_data; 858 } 859 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 860 861 /** 862 * iommu_group_set_iommudata - set iommu_data for a group 863 * @group: the group 864 * @iommu_data: new data 865 * @release: release function for iommu_data 866 * 867 * iommu drivers can store data in the group for use when doing iommu 868 * operations. This function provides a way to set the data after 869 * the group has been allocated. Caller should hold a group reference. 870 */ 871 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 872 void (*release)(void *iommu_data)) 873 { 874 group->iommu_data = iommu_data; 875 group->iommu_data_release = release; 876 } 877 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 878 879 /** 880 * iommu_group_set_name - set name for a group 881 * @group: the group 882 * @name: name 883 * 884 * Allow iommu driver to set a name for a group. When set it will 885 * appear in a name attribute file under the group in sysfs. 886 */ 887 int iommu_group_set_name(struct iommu_group *group, const char *name) 888 { 889 int ret; 890 891 if (group->name) { 892 iommu_group_remove_file(group, &iommu_group_attr_name); 893 kfree(group->name); 894 group->name = NULL; 895 if (!name) 896 return 0; 897 } 898 899 group->name = kstrdup(name, GFP_KERNEL); 900 if (!group->name) 901 return -ENOMEM; 902 903 ret = iommu_group_create_file(group, &iommu_group_attr_name); 904 if (ret) { 905 kfree(group->name); 906 group->name = NULL; 907 return ret; 908 } 909 910 return 0; 911 } 912 EXPORT_SYMBOL_GPL(iommu_group_set_name); 913 914 static int iommu_create_device_direct_mappings(struct iommu_group *group, 915 struct device *dev) 916 { 917 struct iommu_domain *domain = group->default_domain; 918 struct iommu_resv_region *entry; 919 struct list_head mappings; 920 unsigned long pg_size; 921 int ret = 0; 922 923 if (!domain || !iommu_is_dma_domain(domain)) 924 return 0; 925 926 BUG_ON(!domain->pgsize_bitmap); 927 928 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 929 INIT_LIST_HEAD(&mappings); 930 931 iommu_get_resv_regions(dev, &mappings); 932 933 /* We need to consider overlapping regions for different devices */ 934 list_for_each_entry(entry, &mappings, list) { 935 dma_addr_t start, end, addr; 936 size_t map_size = 0; 937 938 start = ALIGN(entry->start, pg_size); 939 end = ALIGN(entry->start + entry->length, pg_size); 940 941 if (entry->type != IOMMU_RESV_DIRECT && 942 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 943 continue; 944 945 for (addr = start; addr <= end; addr += pg_size) { 946 phys_addr_t phys_addr; 947 948 if (addr == end) 949 goto map_end; 950 951 phys_addr = iommu_iova_to_phys(domain, addr); 952 if (!phys_addr) { 953 map_size += pg_size; 954 continue; 955 } 956 957 map_end: 958 if (map_size) { 959 ret = iommu_map(domain, addr - map_size, 960 addr - map_size, map_size, 961 entry->prot, GFP_KERNEL); 962 if (ret) 963 goto out; 964 map_size = 0; 965 } 966 } 967 968 } 969 970 iommu_flush_iotlb_all(domain); 971 972 out: 973 iommu_put_resv_regions(dev, &mappings); 974 975 return ret; 976 } 977 978 /** 979 * iommu_group_add_device - add a device to an iommu group 980 * @group: the group into which to add the device (reference should be held) 981 * @dev: the device 982 * 983 * This function is called by an iommu driver to add a device into a 984 * group. Adding a device increments the group reference count. 985 */ 986 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 987 { 988 int ret, i = 0; 989 struct group_device *device; 990 991 device = kzalloc(sizeof(*device), GFP_KERNEL); 992 if (!device) 993 return -ENOMEM; 994 995 device->dev = dev; 996 997 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 998 if (ret) 999 goto err_free_device; 1000 1001 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 1002 rename: 1003 if (!device->name) { 1004 ret = -ENOMEM; 1005 goto err_remove_link; 1006 } 1007 1008 ret = sysfs_create_link_nowarn(group->devices_kobj, 1009 &dev->kobj, device->name); 1010 if (ret) { 1011 if (ret == -EEXIST && i >= 0) { 1012 /* 1013 * Account for the slim chance of collision 1014 * and append an instance to the name. 1015 */ 1016 kfree(device->name); 1017 device->name = kasprintf(GFP_KERNEL, "%s.%d", 1018 kobject_name(&dev->kobj), i++); 1019 goto rename; 1020 } 1021 goto err_free_name; 1022 } 1023 1024 kobject_get(group->devices_kobj); 1025 1026 dev->iommu_group = group; 1027 1028 mutex_lock(&group->mutex); 1029 list_add_tail(&device->list, &group->devices); 1030 if (group->domain) 1031 ret = iommu_group_do_dma_first_attach(dev, group->domain); 1032 mutex_unlock(&group->mutex); 1033 if (ret) 1034 goto err_put_group; 1035 1036 trace_add_device_to_group(group->id, dev); 1037 1038 dev_info(dev, "Adding to iommu group %d\n", group->id); 1039 1040 return 0; 1041 1042 err_put_group: 1043 mutex_lock(&group->mutex); 1044 list_del(&device->list); 1045 mutex_unlock(&group->mutex); 1046 dev->iommu_group = NULL; 1047 kobject_put(group->devices_kobj); 1048 sysfs_remove_link(group->devices_kobj, device->name); 1049 err_free_name: 1050 kfree(device->name); 1051 err_remove_link: 1052 sysfs_remove_link(&dev->kobj, "iommu_group"); 1053 err_free_device: 1054 kfree(device); 1055 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 1056 return ret; 1057 } 1058 EXPORT_SYMBOL_GPL(iommu_group_add_device); 1059 1060 /** 1061 * iommu_group_remove_device - remove a device from it's current group 1062 * @dev: device to be removed 1063 * 1064 * This function is called by an iommu driver to remove the device from 1065 * it's current group. This decrements the iommu group reference count. 1066 */ 1067 void iommu_group_remove_device(struct device *dev) 1068 { 1069 struct iommu_group *group = dev->iommu_group; 1070 struct group_device *tmp_device, *device = NULL; 1071 1072 if (!group) 1073 return; 1074 1075 dev_info(dev, "Removing from iommu group %d\n", group->id); 1076 1077 mutex_lock(&group->mutex); 1078 list_for_each_entry(tmp_device, &group->devices, list) { 1079 if (tmp_device->dev == dev) { 1080 device = tmp_device; 1081 list_del(&device->list); 1082 break; 1083 } 1084 } 1085 mutex_unlock(&group->mutex); 1086 1087 if (!device) 1088 return; 1089 1090 sysfs_remove_link(group->devices_kobj, device->name); 1091 sysfs_remove_link(&dev->kobj, "iommu_group"); 1092 1093 trace_remove_device_from_group(group->id, dev); 1094 1095 kfree(device->name); 1096 kfree(device); 1097 dev->iommu_group = NULL; 1098 kobject_put(group->devices_kobj); 1099 } 1100 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 1101 1102 static int iommu_group_device_count(struct iommu_group *group) 1103 { 1104 struct group_device *entry; 1105 int ret = 0; 1106 1107 list_for_each_entry(entry, &group->devices, list) 1108 ret++; 1109 1110 return ret; 1111 } 1112 1113 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 1114 int (*fn)(struct device *, void *)) 1115 { 1116 struct group_device *device; 1117 int ret = 0; 1118 1119 list_for_each_entry(device, &group->devices, list) { 1120 ret = fn(device->dev, data); 1121 if (ret) 1122 break; 1123 } 1124 return ret; 1125 } 1126 1127 /** 1128 * iommu_group_for_each_dev - iterate over each device in the group 1129 * @group: the group 1130 * @data: caller opaque data to be passed to callback function 1131 * @fn: caller supplied callback function 1132 * 1133 * This function is called by group users to iterate over group devices. 1134 * Callers should hold a reference count to the group during callback. 1135 * The group->mutex is held across callbacks, which will block calls to 1136 * iommu_group_add/remove_device. 1137 */ 1138 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 1139 int (*fn)(struct device *, void *)) 1140 { 1141 int ret; 1142 1143 mutex_lock(&group->mutex); 1144 ret = __iommu_group_for_each_dev(group, data, fn); 1145 mutex_unlock(&group->mutex); 1146 1147 return ret; 1148 } 1149 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1150 1151 /** 1152 * iommu_group_get - Return the group for a device and increment reference 1153 * @dev: get the group that this device belongs to 1154 * 1155 * This function is called by iommu drivers and users to get the group 1156 * for the specified device. If found, the group is returned and the group 1157 * reference in incremented, else NULL. 1158 */ 1159 struct iommu_group *iommu_group_get(struct device *dev) 1160 { 1161 struct iommu_group *group = dev->iommu_group; 1162 1163 if (group) 1164 kobject_get(group->devices_kobj); 1165 1166 return group; 1167 } 1168 EXPORT_SYMBOL_GPL(iommu_group_get); 1169 1170 /** 1171 * iommu_group_ref_get - Increment reference on a group 1172 * @group: the group to use, must not be NULL 1173 * 1174 * This function is called by iommu drivers to take additional references on an 1175 * existing group. Returns the given group for convenience. 1176 */ 1177 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1178 { 1179 kobject_get(group->devices_kobj); 1180 return group; 1181 } 1182 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1183 1184 /** 1185 * iommu_group_put - Decrement group reference 1186 * @group: the group to use 1187 * 1188 * This function is called by iommu drivers and users to release the 1189 * iommu group. Once the reference count is zero, the group is released. 1190 */ 1191 void iommu_group_put(struct iommu_group *group) 1192 { 1193 if (group) 1194 kobject_put(group->devices_kobj); 1195 } 1196 EXPORT_SYMBOL_GPL(iommu_group_put); 1197 1198 /** 1199 * iommu_register_device_fault_handler() - Register a device fault handler 1200 * @dev: the device 1201 * @handler: the fault handler 1202 * @data: private data passed as argument to the handler 1203 * 1204 * When an IOMMU fault event is received, this handler gets called with the 1205 * fault event and data as argument. The handler should return 0 on success. If 1206 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1207 * complete the fault by calling iommu_page_response() with one of the following 1208 * response code: 1209 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1210 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1211 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1212 * page faults if possible. 1213 * 1214 * Return 0 if the fault handler was installed successfully, or an error. 1215 */ 1216 int iommu_register_device_fault_handler(struct device *dev, 1217 iommu_dev_fault_handler_t handler, 1218 void *data) 1219 { 1220 struct dev_iommu *param = dev->iommu; 1221 int ret = 0; 1222 1223 if (!param) 1224 return -EINVAL; 1225 1226 mutex_lock(¶m->lock); 1227 /* Only allow one fault handler registered for each device */ 1228 if (param->fault_param) { 1229 ret = -EBUSY; 1230 goto done_unlock; 1231 } 1232 1233 get_device(dev); 1234 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1235 if (!param->fault_param) { 1236 put_device(dev); 1237 ret = -ENOMEM; 1238 goto done_unlock; 1239 } 1240 param->fault_param->handler = handler; 1241 param->fault_param->data = data; 1242 mutex_init(¶m->fault_param->lock); 1243 INIT_LIST_HEAD(¶m->fault_param->faults); 1244 1245 done_unlock: 1246 mutex_unlock(¶m->lock); 1247 1248 return ret; 1249 } 1250 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1251 1252 /** 1253 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1254 * @dev: the device 1255 * 1256 * Remove the device fault handler installed with 1257 * iommu_register_device_fault_handler(). 1258 * 1259 * Return 0 on success, or an error. 1260 */ 1261 int iommu_unregister_device_fault_handler(struct device *dev) 1262 { 1263 struct dev_iommu *param = dev->iommu; 1264 int ret = 0; 1265 1266 if (!param) 1267 return -EINVAL; 1268 1269 mutex_lock(¶m->lock); 1270 1271 if (!param->fault_param) 1272 goto unlock; 1273 1274 /* we cannot unregister handler if there are pending faults */ 1275 if (!list_empty(¶m->fault_param->faults)) { 1276 ret = -EBUSY; 1277 goto unlock; 1278 } 1279 1280 kfree(param->fault_param); 1281 param->fault_param = NULL; 1282 put_device(dev); 1283 unlock: 1284 mutex_unlock(¶m->lock); 1285 1286 return ret; 1287 } 1288 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1289 1290 /** 1291 * iommu_report_device_fault() - Report fault event to device driver 1292 * @dev: the device 1293 * @evt: fault event data 1294 * 1295 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1296 * handler. When this function fails and the fault is recoverable, it is the 1297 * caller's responsibility to complete the fault. 1298 * 1299 * Return 0 on success, or an error. 1300 */ 1301 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1302 { 1303 struct dev_iommu *param = dev->iommu; 1304 struct iommu_fault_event *evt_pending = NULL; 1305 struct iommu_fault_param *fparam; 1306 int ret = 0; 1307 1308 if (!param || !evt) 1309 return -EINVAL; 1310 1311 /* we only report device fault if there is a handler registered */ 1312 mutex_lock(¶m->lock); 1313 fparam = param->fault_param; 1314 if (!fparam || !fparam->handler) { 1315 ret = -EINVAL; 1316 goto done_unlock; 1317 } 1318 1319 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1320 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1321 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1322 GFP_KERNEL); 1323 if (!evt_pending) { 1324 ret = -ENOMEM; 1325 goto done_unlock; 1326 } 1327 mutex_lock(&fparam->lock); 1328 list_add_tail(&evt_pending->list, &fparam->faults); 1329 mutex_unlock(&fparam->lock); 1330 } 1331 1332 ret = fparam->handler(&evt->fault, fparam->data); 1333 if (ret && evt_pending) { 1334 mutex_lock(&fparam->lock); 1335 list_del(&evt_pending->list); 1336 mutex_unlock(&fparam->lock); 1337 kfree(evt_pending); 1338 } 1339 done_unlock: 1340 mutex_unlock(¶m->lock); 1341 return ret; 1342 } 1343 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1344 1345 int iommu_page_response(struct device *dev, 1346 struct iommu_page_response *msg) 1347 { 1348 bool needs_pasid; 1349 int ret = -EINVAL; 1350 struct iommu_fault_event *evt; 1351 struct iommu_fault_page_request *prm; 1352 struct dev_iommu *param = dev->iommu; 1353 const struct iommu_ops *ops = dev_iommu_ops(dev); 1354 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1355 1356 if (!ops->page_response) 1357 return -ENODEV; 1358 1359 if (!param || !param->fault_param) 1360 return -EINVAL; 1361 1362 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1363 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1364 return -EINVAL; 1365 1366 /* Only send response if there is a fault report pending */ 1367 mutex_lock(¶m->fault_param->lock); 1368 if (list_empty(¶m->fault_param->faults)) { 1369 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1370 goto done_unlock; 1371 } 1372 /* 1373 * Check if we have a matching page request pending to respond, 1374 * otherwise return -EINVAL 1375 */ 1376 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1377 prm = &evt->fault.prm; 1378 if (prm->grpid != msg->grpid) 1379 continue; 1380 1381 /* 1382 * If the PASID is required, the corresponding request is 1383 * matched using the group ID, the PASID valid bit and the PASID 1384 * value. Otherwise only the group ID matches request and 1385 * response. 1386 */ 1387 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1388 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1389 continue; 1390 1391 if (!needs_pasid && has_pasid) { 1392 /* No big deal, just clear it. */ 1393 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1394 msg->pasid = 0; 1395 } 1396 1397 ret = ops->page_response(dev, evt, msg); 1398 list_del(&evt->list); 1399 kfree(evt); 1400 break; 1401 } 1402 1403 done_unlock: 1404 mutex_unlock(¶m->fault_param->lock); 1405 return ret; 1406 } 1407 EXPORT_SYMBOL_GPL(iommu_page_response); 1408 1409 /** 1410 * iommu_group_id - Return ID for a group 1411 * @group: the group to ID 1412 * 1413 * Return the unique ID for the group matching the sysfs group number. 1414 */ 1415 int iommu_group_id(struct iommu_group *group) 1416 { 1417 return group->id; 1418 } 1419 EXPORT_SYMBOL_GPL(iommu_group_id); 1420 1421 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1422 unsigned long *devfns); 1423 1424 /* 1425 * To consider a PCI device isolated, we require ACS to support Source 1426 * Validation, Request Redirection, Completer Redirection, and Upstream 1427 * Forwarding. This effectively means that devices cannot spoof their 1428 * requester ID, requests and completions cannot be redirected, and all 1429 * transactions are forwarded upstream, even as it passes through a 1430 * bridge where the target device is downstream. 1431 */ 1432 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1433 1434 /* 1435 * For multifunction devices which are not isolated from each other, find 1436 * all the other non-isolated functions and look for existing groups. For 1437 * each function, we also need to look for aliases to or from other devices 1438 * that may already have a group. 1439 */ 1440 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1441 unsigned long *devfns) 1442 { 1443 struct pci_dev *tmp = NULL; 1444 struct iommu_group *group; 1445 1446 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1447 return NULL; 1448 1449 for_each_pci_dev(tmp) { 1450 if (tmp == pdev || tmp->bus != pdev->bus || 1451 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1452 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1453 continue; 1454 1455 group = get_pci_alias_group(tmp, devfns); 1456 if (group) { 1457 pci_dev_put(tmp); 1458 return group; 1459 } 1460 } 1461 1462 return NULL; 1463 } 1464 1465 /* 1466 * Look for aliases to or from the given device for existing groups. DMA 1467 * aliases are only supported on the same bus, therefore the search 1468 * space is quite small (especially since we're really only looking at pcie 1469 * device, and therefore only expect multiple slots on the root complex or 1470 * downstream switch ports). It's conceivable though that a pair of 1471 * multifunction devices could have aliases between them that would cause a 1472 * loop. To prevent this, we use a bitmap to track where we've been. 1473 */ 1474 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1475 unsigned long *devfns) 1476 { 1477 struct pci_dev *tmp = NULL; 1478 struct iommu_group *group; 1479 1480 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1481 return NULL; 1482 1483 group = iommu_group_get(&pdev->dev); 1484 if (group) 1485 return group; 1486 1487 for_each_pci_dev(tmp) { 1488 if (tmp == pdev || tmp->bus != pdev->bus) 1489 continue; 1490 1491 /* We alias them or they alias us */ 1492 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1493 group = get_pci_alias_group(tmp, devfns); 1494 if (group) { 1495 pci_dev_put(tmp); 1496 return group; 1497 } 1498 1499 group = get_pci_function_alias_group(tmp, devfns); 1500 if (group) { 1501 pci_dev_put(tmp); 1502 return group; 1503 } 1504 } 1505 } 1506 1507 return NULL; 1508 } 1509 1510 struct group_for_pci_data { 1511 struct pci_dev *pdev; 1512 struct iommu_group *group; 1513 }; 1514 1515 /* 1516 * DMA alias iterator callback, return the last seen device. Stop and return 1517 * the IOMMU group if we find one along the way. 1518 */ 1519 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1520 { 1521 struct group_for_pci_data *data = opaque; 1522 1523 data->pdev = pdev; 1524 data->group = iommu_group_get(&pdev->dev); 1525 1526 return data->group != NULL; 1527 } 1528 1529 /* 1530 * Generic device_group call-back function. It just allocates one 1531 * iommu-group per device. 1532 */ 1533 struct iommu_group *generic_device_group(struct device *dev) 1534 { 1535 return iommu_group_alloc(); 1536 } 1537 EXPORT_SYMBOL_GPL(generic_device_group); 1538 1539 /* 1540 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1541 * to find or create an IOMMU group for a device. 1542 */ 1543 struct iommu_group *pci_device_group(struct device *dev) 1544 { 1545 struct pci_dev *pdev = to_pci_dev(dev); 1546 struct group_for_pci_data data; 1547 struct pci_bus *bus; 1548 struct iommu_group *group = NULL; 1549 u64 devfns[4] = { 0 }; 1550 1551 if (WARN_ON(!dev_is_pci(dev))) 1552 return ERR_PTR(-EINVAL); 1553 1554 /* 1555 * Find the upstream DMA alias for the device. A device must not 1556 * be aliased due to topology in order to have its own IOMMU group. 1557 * If we find an alias along the way that already belongs to a 1558 * group, use it. 1559 */ 1560 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1561 return data.group; 1562 1563 pdev = data.pdev; 1564 1565 /* 1566 * Continue upstream from the point of minimum IOMMU granularity 1567 * due to aliases to the point where devices are protected from 1568 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1569 * group, use it. 1570 */ 1571 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1572 if (!bus->self) 1573 continue; 1574 1575 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1576 break; 1577 1578 pdev = bus->self; 1579 1580 group = iommu_group_get(&pdev->dev); 1581 if (group) 1582 return group; 1583 } 1584 1585 /* 1586 * Look for existing groups on device aliases. If we alias another 1587 * device or another device aliases us, use the same group. 1588 */ 1589 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1590 if (group) 1591 return group; 1592 1593 /* 1594 * Look for existing groups on non-isolated functions on the same 1595 * slot and aliases of those funcions, if any. No need to clear 1596 * the search bitmap, the tested devfns are still valid. 1597 */ 1598 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1599 if (group) 1600 return group; 1601 1602 /* No shared group found, allocate new */ 1603 return iommu_group_alloc(); 1604 } 1605 EXPORT_SYMBOL_GPL(pci_device_group); 1606 1607 /* Get the IOMMU group for device on fsl-mc bus */ 1608 struct iommu_group *fsl_mc_device_group(struct device *dev) 1609 { 1610 struct device *cont_dev = fsl_mc_cont_dev(dev); 1611 struct iommu_group *group; 1612 1613 group = iommu_group_get(cont_dev); 1614 if (!group) 1615 group = iommu_group_alloc(); 1616 return group; 1617 } 1618 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1619 1620 static int iommu_get_def_domain_type(struct device *dev) 1621 { 1622 const struct iommu_ops *ops = dev_iommu_ops(dev); 1623 1624 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 1625 return IOMMU_DOMAIN_DMA; 1626 1627 if (ops->def_domain_type) 1628 return ops->def_domain_type(dev); 1629 1630 return 0; 1631 } 1632 1633 static int iommu_group_alloc_default_domain(struct bus_type *bus, 1634 struct iommu_group *group, 1635 unsigned int type) 1636 { 1637 struct iommu_domain *dom; 1638 1639 dom = __iommu_domain_alloc(bus, type); 1640 if (!dom && type != IOMMU_DOMAIN_DMA) { 1641 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA); 1642 if (dom) 1643 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1644 type, group->name); 1645 } 1646 1647 if (!dom) 1648 return -ENOMEM; 1649 1650 group->default_domain = dom; 1651 if (!group->domain) 1652 group->domain = dom; 1653 return 0; 1654 } 1655 1656 static int iommu_alloc_default_domain(struct iommu_group *group, 1657 struct device *dev) 1658 { 1659 unsigned int type; 1660 1661 if (group->default_domain) 1662 return 0; 1663 1664 type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type; 1665 1666 return iommu_group_alloc_default_domain(dev->bus, group, type); 1667 } 1668 1669 /** 1670 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1671 * @dev: target device 1672 * 1673 * This function is intended to be called by IOMMU drivers and extended to 1674 * support common, bus-defined algorithms when determining or creating the 1675 * IOMMU group for a device. On success, the caller will hold a reference 1676 * to the returned IOMMU group, which will already include the provided 1677 * device. The reference should be released with iommu_group_put(). 1678 */ 1679 static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1680 { 1681 const struct iommu_ops *ops = dev_iommu_ops(dev); 1682 struct iommu_group *group; 1683 int ret; 1684 1685 group = iommu_group_get(dev); 1686 if (group) 1687 return group; 1688 1689 group = ops->device_group(dev); 1690 if (WARN_ON_ONCE(group == NULL)) 1691 return ERR_PTR(-EINVAL); 1692 1693 if (IS_ERR(group)) 1694 return group; 1695 1696 ret = iommu_group_add_device(group, dev); 1697 if (ret) 1698 goto out_put_group; 1699 1700 return group; 1701 1702 out_put_group: 1703 iommu_group_put(group); 1704 1705 return ERR_PTR(ret); 1706 } 1707 1708 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1709 { 1710 return group->default_domain; 1711 } 1712 1713 static int probe_iommu_group(struct device *dev, void *data) 1714 { 1715 struct list_head *group_list = data; 1716 struct iommu_group *group; 1717 int ret; 1718 1719 /* Device is probed already if in a group */ 1720 group = iommu_group_get(dev); 1721 if (group) { 1722 iommu_group_put(group); 1723 return 0; 1724 } 1725 1726 ret = __iommu_probe_device(dev, group_list); 1727 if (ret == -ENODEV) 1728 ret = 0; 1729 1730 return ret; 1731 } 1732 1733 static int iommu_bus_notifier(struct notifier_block *nb, 1734 unsigned long action, void *data) 1735 { 1736 struct device *dev = data; 1737 1738 if (action == BUS_NOTIFY_ADD_DEVICE) { 1739 int ret; 1740 1741 ret = iommu_probe_device(dev); 1742 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1743 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1744 iommu_release_device(dev); 1745 return NOTIFY_OK; 1746 } 1747 1748 return 0; 1749 } 1750 1751 struct __group_domain_type { 1752 struct device *dev; 1753 unsigned int type; 1754 }; 1755 1756 static int probe_get_default_domain_type(struct device *dev, void *data) 1757 { 1758 struct __group_domain_type *gtype = data; 1759 unsigned int type = iommu_get_def_domain_type(dev); 1760 1761 if (type) { 1762 if (gtype->type && gtype->type != type) { 1763 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", 1764 iommu_domain_type_str(type), 1765 dev_name(gtype->dev), 1766 iommu_domain_type_str(gtype->type)); 1767 gtype->type = 0; 1768 } 1769 1770 if (!gtype->dev) { 1771 gtype->dev = dev; 1772 gtype->type = type; 1773 } 1774 } 1775 1776 return 0; 1777 } 1778 1779 static void probe_alloc_default_domain(struct bus_type *bus, 1780 struct iommu_group *group) 1781 { 1782 struct __group_domain_type gtype; 1783 1784 memset(>ype, 0, sizeof(gtype)); 1785 1786 /* Ask for default domain requirements of all devices in the group */ 1787 __iommu_group_for_each_dev(group, >ype, 1788 probe_get_default_domain_type); 1789 1790 if (!gtype.type) 1791 gtype.type = iommu_def_domain_type; 1792 1793 iommu_group_alloc_default_domain(bus, group, gtype.type); 1794 1795 } 1796 1797 static int __iommu_group_dma_first_attach(struct iommu_group *group) 1798 { 1799 return __iommu_group_for_each_dev(group, group->default_domain, 1800 iommu_group_do_dma_first_attach); 1801 } 1802 1803 static int iommu_group_do_probe_finalize(struct device *dev, void *data) 1804 { 1805 const struct iommu_ops *ops = dev_iommu_ops(dev); 1806 1807 if (ops->probe_finalize) 1808 ops->probe_finalize(dev); 1809 1810 return 0; 1811 } 1812 1813 static void __iommu_group_dma_finalize(struct iommu_group *group) 1814 { 1815 __iommu_group_for_each_dev(group, group->default_domain, 1816 iommu_group_do_probe_finalize); 1817 } 1818 1819 static int iommu_do_create_direct_mappings(struct device *dev, void *data) 1820 { 1821 struct iommu_group *group = data; 1822 1823 iommu_create_device_direct_mappings(group, dev); 1824 1825 return 0; 1826 } 1827 1828 static int iommu_group_create_direct_mappings(struct iommu_group *group) 1829 { 1830 return __iommu_group_for_each_dev(group, group, 1831 iommu_do_create_direct_mappings); 1832 } 1833 1834 int bus_iommu_probe(struct bus_type *bus) 1835 { 1836 struct iommu_group *group, *next; 1837 LIST_HEAD(group_list); 1838 int ret; 1839 1840 /* 1841 * This code-path does not allocate the default domain when 1842 * creating the iommu group, so do it after the groups are 1843 * created. 1844 */ 1845 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1846 if (ret) 1847 return ret; 1848 1849 list_for_each_entry_safe(group, next, &group_list, entry) { 1850 mutex_lock(&group->mutex); 1851 1852 /* Remove item from the list */ 1853 list_del_init(&group->entry); 1854 1855 /* Try to allocate default domain */ 1856 probe_alloc_default_domain(bus, group); 1857 1858 if (!group->default_domain) { 1859 mutex_unlock(&group->mutex); 1860 continue; 1861 } 1862 1863 iommu_group_create_direct_mappings(group); 1864 1865 ret = __iommu_group_dma_first_attach(group); 1866 1867 mutex_unlock(&group->mutex); 1868 1869 if (ret) 1870 break; 1871 1872 __iommu_group_dma_finalize(group); 1873 } 1874 1875 return ret; 1876 } 1877 1878 bool iommu_present(struct bus_type *bus) 1879 { 1880 return bus->iommu_ops != NULL; 1881 } 1882 EXPORT_SYMBOL_GPL(iommu_present); 1883 1884 /** 1885 * device_iommu_capable() - check for a general IOMMU capability 1886 * @dev: device to which the capability would be relevant, if available 1887 * @cap: IOMMU capability 1888 * 1889 * Return: true if an IOMMU is present and supports the given capability 1890 * for the given device, otherwise false. 1891 */ 1892 bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 1893 { 1894 const struct iommu_ops *ops; 1895 1896 if (!dev->iommu || !dev->iommu->iommu_dev) 1897 return false; 1898 1899 ops = dev_iommu_ops(dev); 1900 if (!ops->capable) 1901 return false; 1902 1903 return ops->capable(dev, cap); 1904 } 1905 EXPORT_SYMBOL_GPL(device_iommu_capable); 1906 1907 /** 1908 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1909 * @domain: iommu domain 1910 * @handler: fault handler 1911 * @token: user data, will be passed back to the fault handler 1912 * 1913 * This function should be used by IOMMU users which want to be notified 1914 * whenever an IOMMU fault happens. 1915 * 1916 * The fault handler itself should return 0 on success, and an appropriate 1917 * error code otherwise. 1918 */ 1919 void iommu_set_fault_handler(struct iommu_domain *domain, 1920 iommu_fault_handler_t handler, 1921 void *token) 1922 { 1923 BUG_ON(!domain); 1924 1925 domain->handler = handler; 1926 domain->handler_token = token; 1927 } 1928 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1929 1930 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 1931 unsigned type) 1932 { 1933 struct iommu_domain *domain; 1934 1935 if (bus == NULL || bus->iommu_ops == NULL) 1936 return NULL; 1937 1938 domain = bus->iommu_ops->domain_alloc(type); 1939 if (!domain) 1940 return NULL; 1941 1942 domain->type = type; 1943 /* Assume all sizes by default; the driver may override this later */ 1944 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1945 if (!domain->ops) 1946 domain->ops = bus->iommu_ops->default_domain_ops; 1947 1948 if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { 1949 iommu_domain_free(domain); 1950 domain = NULL; 1951 } 1952 return domain; 1953 } 1954 1955 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 1956 { 1957 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1958 } 1959 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1960 1961 void iommu_domain_free(struct iommu_domain *domain) 1962 { 1963 if (domain->type == IOMMU_DOMAIN_SVA) 1964 mmdrop(domain->mm); 1965 iommu_put_dma_cookie(domain); 1966 domain->ops->free(domain); 1967 } 1968 EXPORT_SYMBOL_GPL(iommu_domain_free); 1969 1970 /* 1971 * Put the group's domain back to the appropriate core-owned domain - either the 1972 * standard kernel-mode DMA configuration or an all-DMA-blocked domain. 1973 */ 1974 static void __iommu_group_set_core_domain(struct iommu_group *group) 1975 { 1976 struct iommu_domain *new_domain; 1977 int ret; 1978 1979 if (group->owner) 1980 new_domain = group->blocking_domain; 1981 else 1982 new_domain = group->default_domain; 1983 1984 ret = __iommu_group_set_domain(group, new_domain); 1985 WARN(ret, "iommu driver failed to attach the default/blocking domain"); 1986 } 1987 1988 static int __iommu_attach_device(struct iommu_domain *domain, 1989 struct device *dev) 1990 { 1991 int ret; 1992 1993 if (unlikely(domain->ops->attach_dev == NULL)) 1994 return -ENODEV; 1995 1996 ret = domain->ops->attach_dev(domain, dev); 1997 if (ret) 1998 return ret; 1999 dev->iommu->attach_deferred = 0; 2000 trace_attach_device_to_domain(dev); 2001 return 0; 2002 } 2003 2004 /** 2005 * iommu_attach_device - Attach an IOMMU domain to a device 2006 * @domain: IOMMU domain to attach 2007 * @dev: Device that will be attached 2008 * 2009 * Returns 0 on success and error code on failure 2010 * 2011 * Note that EINVAL can be treated as a soft failure, indicating 2012 * that certain configuration of the domain is incompatible with 2013 * the device. In this case attaching a different domain to the 2014 * device may succeed. 2015 */ 2016 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 2017 { 2018 struct iommu_group *group; 2019 int ret; 2020 2021 group = iommu_group_get(dev); 2022 if (!group) 2023 return -ENODEV; 2024 2025 /* 2026 * Lock the group to make sure the device-count doesn't 2027 * change while we are attaching 2028 */ 2029 mutex_lock(&group->mutex); 2030 ret = -EINVAL; 2031 if (iommu_group_device_count(group) != 1) 2032 goto out_unlock; 2033 2034 ret = __iommu_attach_group(domain, group); 2035 2036 out_unlock: 2037 mutex_unlock(&group->mutex); 2038 iommu_group_put(group); 2039 2040 return ret; 2041 } 2042 EXPORT_SYMBOL_GPL(iommu_attach_device); 2043 2044 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2045 { 2046 if (dev->iommu && dev->iommu->attach_deferred) 2047 return __iommu_attach_device(domain, dev); 2048 2049 return 0; 2050 } 2051 2052 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2053 { 2054 struct iommu_group *group; 2055 2056 group = iommu_group_get(dev); 2057 if (!group) 2058 return; 2059 2060 mutex_lock(&group->mutex); 2061 if (WARN_ON(domain != group->domain) || 2062 WARN_ON(iommu_group_device_count(group) != 1)) 2063 goto out_unlock; 2064 __iommu_group_set_core_domain(group); 2065 2066 out_unlock: 2067 mutex_unlock(&group->mutex); 2068 iommu_group_put(group); 2069 } 2070 EXPORT_SYMBOL_GPL(iommu_detach_device); 2071 2072 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2073 { 2074 struct iommu_domain *domain; 2075 struct iommu_group *group; 2076 2077 group = iommu_group_get(dev); 2078 if (!group) 2079 return NULL; 2080 2081 domain = group->domain; 2082 2083 iommu_group_put(group); 2084 2085 return domain; 2086 } 2087 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2088 2089 /* 2090 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2091 * guarantees that the group and its default domain are valid and correct. 2092 */ 2093 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2094 { 2095 return dev->iommu_group->default_domain; 2096 } 2097 2098 /* 2099 * IOMMU groups are really the natural working unit of the IOMMU, but 2100 * the IOMMU API works on domains and devices. Bridge that gap by 2101 * iterating over the devices in a group. Ideally we'd have a single 2102 * device which represents the requestor ID of the group, but we also 2103 * allow IOMMU drivers to create policy defined minimum sets, where 2104 * the physical hardware may be able to distiguish members, but we 2105 * wish to group them at a higher level (ex. untrusted multi-function 2106 * PCI devices). Thus we attach each device. 2107 */ 2108 static int iommu_group_do_attach_device(struct device *dev, void *data) 2109 { 2110 struct iommu_domain *domain = data; 2111 2112 return __iommu_attach_device(domain, dev); 2113 } 2114 2115 static int __iommu_attach_group(struct iommu_domain *domain, 2116 struct iommu_group *group) 2117 { 2118 int ret; 2119 2120 if (group->domain && group->domain != group->default_domain && 2121 group->domain != group->blocking_domain) 2122 return -EBUSY; 2123 2124 ret = __iommu_group_for_each_dev(group, domain, 2125 iommu_group_do_attach_device); 2126 if (ret == 0) 2127 group->domain = domain; 2128 2129 return ret; 2130 } 2131 2132 /** 2133 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group 2134 * @domain: IOMMU domain to attach 2135 * @group: IOMMU group that will be attached 2136 * 2137 * Returns 0 on success and error code on failure 2138 * 2139 * Note that EINVAL can be treated as a soft failure, indicating 2140 * that certain configuration of the domain is incompatible with 2141 * the group. In this case attaching a different domain to the 2142 * group may succeed. 2143 */ 2144 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2145 { 2146 int ret; 2147 2148 mutex_lock(&group->mutex); 2149 ret = __iommu_attach_group(domain, group); 2150 mutex_unlock(&group->mutex); 2151 2152 return ret; 2153 } 2154 EXPORT_SYMBOL_GPL(iommu_attach_group); 2155 2156 static int iommu_group_do_set_platform_dma(struct device *dev, void *data) 2157 { 2158 const struct iommu_ops *ops = dev_iommu_ops(dev); 2159 2160 if (!WARN_ON(!ops->set_platform_dma_ops)) 2161 ops->set_platform_dma_ops(dev); 2162 2163 return 0; 2164 } 2165 2166 static int __iommu_group_set_domain(struct iommu_group *group, 2167 struct iommu_domain *new_domain) 2168 { 2169 int ret; 2170 2171 if (group->domain == new_domain) 2172 return 0; 2173 2174 /* 2175 * New drivers should support default domains, so set_platform_dma() 2176 * op will never be called. Otherwise the NULL domain represents some 2177 * platform specific behavior. 2178 */ 2179 if (!new_domain) { 2180 __iommu_group_for_each_dev(group, NULL, 2181 iommu_group_do_set_platform_dma); 2182 group->domain = NULL; 2183 return 0; 2184 } 2185 2186 /* 2187 * Changing the domain is done by calling attach_dev() on the new 2188 * domain. This switch does not have to be atomic and DMA can be 2189 * discarded during the transition. DMA must only be able to access 2190 * either new_domain or group->domain, never something else. 2191 * 2192 * Note that this is called in error unwind paths, attaching to a 2193 * domain that has already been attached cannot fail. 2194 */ 2195 ret = __iommu_group_for_each_dev(group, new_domain, 2196 iommu_group_do_attach_device); 2197 if (ret) 2198 return ret; 2199 group->domain = new_domain; 2200 return 0; 2201 } 2202 2203 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2204 { 2205 mutex_lock(&group->mutex); 2206 __iommu_group_set_core_domain(group); 2207 mutex_unlock(&group->mutex); 2208 } 2209 EXPORT_SYMBOL_GPL(iommu_detach_group); 2210 2211 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2212 { 2213 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2214 return iova; 2215 2216 if (domain->type == IOMMU_DOMAIN_BLOCKED) 2217 return 0; 2218 2219 return domain->ops->iova_to_phys(domain, iova); 2220 } 2221 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2222 2223 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, 2224 phys_addr_t paddr, size_t size, size_t *count) 2225 { 2226 unsigned int pgsize_idx, pgsize_idx_next; 2227 unsigned long pgsizes; 2228 size_t offset, pgsize, pgsize_next; 2229 unsigned long addr_merge = paddr | iova; 2230 2231 /* Page sizes supported by the hardware and small enough for @size */ 2232 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); 2233 2234 /* Constrain the page sizes further based on the maximum alignment */ 2235 if (likely(addr_merge)) 2236 pgsizes &= GENMASK(__ffs(addr_merge), 0); 2237 2238 /* Make sure we have at least one suitable page size */ 2239 BUG_ON(!pgsizes); 2240 2241 /* Pick the biggest page size remaining */ 2242 pgsize_idx = __fls(pgsizes); 2243 pgsize = BIT(pgsize_idx); 2244 if (!count) 2245 return pgsize; 2246 2247 /* Find the next biggest support page size, if it exists */ 2248 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); 2249 if (!pgsizes) 2250 goto out_set_count; 2251 2252 pgsize_idx_next = __ffs(pgsizes); 2253 pgsize_next = BIT(pgsize_idx_next); 2254 2255 /* 2256 * There's no point trying a bigger page size unless the virtual 2257 * and physical addresses are similarly offset within the larger page. 2258 */ 2259 if ((iova ^ paddr) & (pgsize_next - 1)) 2260 goto out_set_count; 2261 2262 /* Calculate the offset to the next page size alignment boundary */ 2263 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); 2264 2265 /* 2266 * If size is big enough to accommodate the larger page, reduce 2267 * the number of smaller pages. 2268 */ 2269 if (offset + pgsize_next <= size) 2270 size = offset; 2271 2272 out_set_count: 2273 *count = size >> pgsize_idx; 2274 return pgsize; 2275 } 2276 2277 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, 2278 phys_addr_t paddr, size_t size, int prot, 2279 gfp_t gfp, size_t *mapped) 2280 { 2281 const struct iommu_domain_ops *ops = domain->ops; 2282 size_t pgsize, count; 2283 int ret; 2284 2285 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); 2286 2287 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", 2288 iova, &paddr, pgsize, count); 2289 2290 if (ops->map_pages) { 2291 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, 2292 gfp, mapped); 2293 } else { 2294 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2295 *mapped = ret ? 0 : pgsize; 2296 } 2297 2298 return ret; 2299 } 2300 2301 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2302 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2303 { 2304 const struct iommu_domain_ops *ops = domain->ops; 2305 unsigned long orig_iova = iova; 2306 unsigned int min_pagesz; 2307 size_t orig_size = size; 2308 phys_addr_t orig_paddr = paddr; 2309 int ret = 0; 2310 2311 if (unlikely(!(ops->map || ops->map_pages) || 2312 domain->pgsize_bitmap == 0UL)) 2313 return -ENODEV; 2314 2315 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2316 return -EINVAL; 2317 2318 /* find out the minimum page size supported */ 2319 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2320 2321 /* 2322 * both the virtual address and the physical one, as well as 2323 * the size of the mapping, must be aligned (at least) to the 2324 * size of the smallest page supported by the hardware 2325 */ 2326 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2327 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2328 iova, &paddr, size, min_pagesz); 2329 return -EINVAL; 2330 } 2331 2332 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2333 2334 while (size) { 2335 size_t mapped = 0; 2336 2337 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, 2338 &mapped); 2339 /* 2340 * Some pages may have been mapped, even if an error occurred, 2341 * so we should account for those so they can be unmapped. 2342 */ 2343 size -= mapped; 2344 2345 if (ret) 2346 break; 2347 2348 iova += mapped; 2349 paddr += mapped; 2350 } 2351 2352 /* unroll mapping in case something went wrong */ 2353 if (ret) 2354 iommu_unmap(domain, orig_iova, orig_size - size); 2355 else 2356 trace_map(orig_iova, orig_paddr, orig_size); 2357 2358 return ret; 2359 } 2360 2361 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2362 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2363 { 2364 const struct iommu_domain_ops *ops = domain->ops; 2365 int ret; 2366 2367 might_sleep_if(gfpflags_allow_blocking(gfp)); 2368 2369 /* Discourage passing strange GFP flags */ 2370 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2371 __GFP_HIGHMEM))) 2372 return -EINVAL; 2373 2374 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2375 if (ret == 0 && ops->iotlb_sync_map) 2376 ops->iotlb_sync_map(domain, iova, size); 2377 2378 return ret; 2379 } 2380 EXPORT_SYMBOL_GPL(iommu_map); 2381 2382 static size_t __iommu_unmap_pages(struct iommu_domain *domain, 2383 unsigned long iova, size_t size, 2384 struct iommu_iotlb_gather *iotlb_gather) 2385 { 2386 const struct iommu_domain_ops *ops = domain->ops; 2387 size_t pgsize, count; 2388 2389 pgsize = iommu_pgsize(domain, iova, iova, size, &count); 2390 return ops->unmap_pages ? 2391 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : 2392 ops->unmap(domain, iova, pgsize, iotlb_gather); 2393 } 2394 2395 static size_t __iommu_unmap(struct iommu_domain *domain, 2396 unsigned long iova, size_t size, 2397 struct iommu_iotlb_gather *iotlb_gather) 2398 { 2399 const struct iommu_domain_ops *ops = domain->ops; 2400 size_t unmapped_page, unmapped = 0; 2401 unsigned long orig_iova = iova; 2402 unsigned int min_pagesz; 2403 2404 if (unlikely(!(ops->unmap || ops->unmap_pages) || 2405 domain->pgsize_bitmap == 0UL)) 2406 return 0; 2407 2408 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2409 return 0; 2410 2411 /* find out the minimum page size supported */ 2412 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2413 2414 /* 2415 * The virtual address, as well as the size of the mapping, must be 2416 * aligned (at least) to the size of the smallest page supported 2417 * by the hardware 2418 */ 2419 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2420 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2421 iova, size, min_pagesz); 2422 return 0; 2423 } 2424 2425 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2426 2427 /* 2428 * Keep iterating until we either unmap 'size' bytes (or more) 2429 * or we hit an area that isn't mapped. 2430 */ 2431 while (unmapped < size) { 2432 unmapped_page = __iommu_unmap_pages(domain, iova, 2433 size - unmapped, 2434 iotlb_gather); 2435 if (!unmapped_page) 2436 break; 2437 2438 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2439 iova, unmapped_page); 2440 2441 iova += unmapped_page; 2442 unmapped += unmapped_page; 2443 } 2444 2445 trace_unmap(orig_iova, size, unmapped); 2446 return unmapped; 2447 } 2448 2449 size_t iommu_unmap(struct iommu_domain *domain, 2450 unsigned long iova, size_t size) 2451 { 2452 struct iommu_iotlb_gather iotlb_gather; 2453 size_t ret; 2454 2455 iommu_iotlb_gather_init(&iotlb_gather); 2456 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2457 iommu_iotlb_sync(domain, &iotlb_gather); 2458 2459 return ret; 2460 } 2461 EXPORT_SYMBOL_GPL(iommu_unmap); 2462 2463 size_t iommu_unmap_fast(struct iommu_domain *domain, 2464 unsigned long iova, size_t size, 2465 struct iommu_iotlb_gather *iotlb_gather) 2466 { 2467 return __iommu_unmap(domain, iova, size, iotlb_gather); 2468 } 2469 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2470 2471 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2472 struct scatterlist *sg, unsigned int nents, int prot, 2473 gfp_t gfp) 2474 { 2475 const struct iommu_domain_ops *ops = domain->ops; 2476 size_t len = 0, mapped = 0; 2477 phys_addr_t start; 2478 unsigned int i = 0; 2479 int ret; 2480 2481 might_sleep_if(gfpflags_allow_blocking(gfp)); 2482 2483 /* Discourage passing strange GFP flags */ 2484 if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | 2485 __GFP_HIGHMEM))) 2486 return -EINVAL; 2487 2488 while (i <= nents) { 2489 phys_addr_t s_phys = sg_phys(sg); 2490 2491 if (len && s_phys != start + len) { 2492 ret = __iommu_map(domain, iova + mapped, start, 2493 len, prot, gfp); 2494 2495 if (ret) 2496 goto out_err; 2497 2498 mapped += len; 2499 len = 0; 2500 } 2501 2502 if (sg_is_dma_bus_address(sg)) 2503 goto next; 2504 2505 if (len) { 2506 len += sg->length; 2507 } else { 2508 len = sg->length; 2509 start = s_phys; 2510 } 2511 2512 next: 2513 if (++i < nents) 2514 sg = sg_next(sg); 2515 } 2516 2517 if (ops->iotlb_sync_map) 2518 ops->iotlb_sync_map(domain, iova, mapped); 2519 return mapped; 2520 2521 out_err: 2522 /* undo mappings already done */ 2523 iommu_unmap(domain, iova, mapped); 2524 2525 return ret; 2526 } 2527 EXPORT_SYMBOL_GPL(iommu_map_sg); 2528 2529 /** 2530 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2531 * @domain: the iommu domain where the fault has happened 2532 * @dev: the device where the fault has happened 2533 * @iova: the faulting address 2534 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2535 * 2536 * This function should be called by the low-level IOMMU implementations 2537 * whenever IOMMU faults happen, to allow high-level users, that are 2538 * interested in such events, to know about them. 2539 * 2540 * This event may be useful for several possible use cases: 2541 * - mere logging of the event 2542 * - dynamic TLB/PTE loading 2543 * - if restarting of the faulting device is required 2544 * 2545 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2546 * PTE/TLB loading will one day be supported, implementations will be able 2547 * to tell whether it succeeded or not according to this return value). 2548 * 2549 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2550 * (though fault handlers can also return -ENOSYS, in case they want to 2551 * elicit the default behavior of the IOMMU drivers). 2552 */ 2553 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2554 unsigned long iova, int flags) 2555 { 2556 int ret = -ENOSYS; 2557 2558 /* 2559 * if upper layers showed interest and installed a fault handler, 2560 * invoke it. 2561 */ 2562 if (domain->handler) 2563 ret = domain->handler(domain, dev, iova, flags, 2564 domain->handler_token); 2565 2566 trace_io_page_fault(dev, iova, flags); 2567 return ret; 2568 } 2569 EXPORT_SYMBOL_GPL(report_iommu_fault); 2570 2571 static int __init iommu_init(void) 2572 { 2573 iommu_group_kset = kset_create_and_add("iommu_groups", 2574 NULL, kernel_kobj); 2575 BUG_ON(!iommu_group_kset); 2576 2577 iommu_debugfs_setup(); 2578 2579 return 0; 2580 } 2581 core_initcall(iommu_init); 2582 2583 int iommu_enable_nesting(struct iommu_domain *domain) 2584 { 2585 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2586 return -EINVAL; 2587 if (!domain->ops->enable_nesting) 2588 return -EINVAL; 2589 return domain->ops->enable_nesting(domain); 2590 } 2591 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2592 2593 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2594 unsigned long quirk) 2595 { 2596 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2597 return -EINVAL; 2598 if (!domain->ops->set_pgtable_quirks) 2599 return -EINVAL; 2600 return domain->ops->set_pgtable_quirks(domain, quirk); 2601 } 2602 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2603 2604 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2605 { 2606 const struct iommu_ops *ops = dev_iommu_ops(dev); 2607 2608 if (ops->get_resv_regions) 2609 ops->get_resv_regions(dev, list); 2610 } 2611 2612 /** 2613 * iommu_put_resv_regions - release resered regions 2614 * @dev: device for which to free reserved regions 2615 * @list: reserved region list for device 2616 * 2617 * This releases a reserved region list acquired by iommu_get_resv_regions(). 2618 */ 2619 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2620 { 2621 struct iommu_resv_region *entry, *next; 2622 2623 list_for_each_entry_safe(entry, next, list, list) { 2624 if (entry->free) 2625 entry->free(dev, entry); 2626 else 2627 kfree(entry); 2628 } 2629 } 2630 EXPORT_SYMBOL(iommu_put_resv_regions); 2631 2632 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2633 size_t length, int prot, 2634 enum iommu_resv_type type, 2635 gfp_t gfp) 2636 { 2637 struct iommu_resv_region *region; 2638 2639 region = kzalloc(sizeof(*region), gfp); 2640 if (!region) 2641 return NULL; 2642 2643 INIT_LIST_HEAD(®ion->list); 2644 region->start = start; 2645 region->length = length; 2646 region->prot = prot; 2647 region->type = type; 2648 return region; 2649 } 2650 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2651 2652 void iommu_set_default_passthrough(bool cmd_line) 2653 { 2654 if (cmd_line) 2655 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2656 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2657 } 2658 2659 void iommu_set_default_translated(bool cmd_line) 2660 { 2661 if (cmd_line) 2662 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2663 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2664 } 2665 2666 bool iommu_default_passthrough(void) 2667 { 2668 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2669 } 2670 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2671 2672 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2673 { 2674 const struct iommu_ops *ops = NULL; 2675 struct iommu_device *iommu; 2676 2677 spin_lock(&iommu_device_lock); 2678 list_for_each_entry(iommu, &iommu_device_list, list) 2679 if (iommu->fwnode == fwnode) { 2680 ops = iommu->ops; 2681 break; 2682 } 2683 spin_unlock(&iommu_device_lock); 2684 return ops; 2685 } 2686 2687 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2688 const struct iommu_ops *ops) 2689 { 2690 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2691 2692 if (fwspec) 2693 return ops == fwspec->ops ? 0 : -EINVAL; 2694 2695 if (!dev_iommu_get(dev)) 2696 return -ENOMEM; 2697 2698 /* Preallocate for the overwhelmingly common case of 1 ID */ 2699 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2700 if (!fwspec) 2701 return -ENOMEM; 2702 2703 of_node_get(to_of_node(iommu_fwnode)); 2704 fwspec->iommu_fwnode = iommu_fwnode; 2705 fwspec->ops = ops; 2706 dev_iommu_fwspec_set(dev, fwspec); 2707 return 0; 2708 } 2709 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2710 2711 void iommu_fwspec_free(struct device *dev) 2712 { 2713 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2714 2715 if (fwspec) { 2716 fwnode_handle_put(fwspec->iommu_fwnode); 2717 kfree(fwspec); 2718 dev_iommu_fwspec_set(dev, NULL); 2719 } 2720 } 2721 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2722 2723 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2724 { 2725 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2726 int i, new_num; 2727 2728 if (!fwspec) 2729 return -EINVAL; 2730 2731 new_num = fwspec->num_ids + num_ids; 2732 if (new_num > 1) { 2733 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2734 GFP_KERNEL); 2735 if (!fwspec) 2736 return -ENOMEM; 2737 2738 dev_iommu_fwspec_set(dev, fwspec); 2739 } 2740 2741 for (i = 0; i < num_ids; i++) 2742 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2743 2744 fwspec->num_ids = new_num; 2745 return 0; 2746 } 2747 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2748 2749 /* 2750 * Per device IOMMU features. 2751 */ 2752 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2753 { 2754 if (dev->iommu && dev->iommu->iommu_dev) { 2755 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2756 2757 if (ops->dev_enable_feat) 2758 return ops->dev_enable_feat(dev, feat); 2759 } 2760 2761 return -ENODEV; 2762 } 2763 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2764 2765 /* 2766 * The device drivers should do the necessary cleanups before calling this. 2767 */ 2768 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2769 { 2770 if (dev->iommu && dev->iommu->iommu_dev) { 2771 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2772 2773 if (ops->dev_disable_feat) 2774 return ops->dev_disable_feat(dev, feat); 2775 } 2776 2777 return -EBUSY; 2778 } 2779 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2780 2781 /* 2782 * Changes the default domain of an iommu group that has *only* one device 2783 * 2784 * @group: The group for which the default domain should be changed 2785 * @prev_dev: The device in the group (this is used to make sure that the device 2786 * hasn't changed after the caller has called this function) 2787 * @type: The type of the new default domain that gets associated with the group 2788 * 2789 * Returns 0 on success and error code on failure 2790 * 2791 * Note: 2792 * 1. Presently, this function is called only when user requests to change the 2793 * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type 2794 * Please take a closer look if intended to use for other purposes. 2795 */ 2796 static int iommu_change_dev_def_domain(struct iommu_group *group, 2797 struct device *prev_dev, int type) 2798 { 2799 struct iommu_domain *prev_dom; 2800 struct group_device *grp_dev; 2801 int ret, dev_def_dom; 2802 struct device *dev; 2803 2804 mutex_lock(&group->mutex); 2805 2806 if (group->default_domain != group->domain) { 2807 dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n"); 2808 ret = -EBUSY; 2809 goto out; 2810 } 2811 2812 /* 2813 * iommu group wasn't locked while acquiring device lock in 2814 * iommu_group_store_type(). So, make sure that the device count hasn't 2815 * changed while acquiring device lock. 2816 * 2817 * Changing default domain of an iommu group with two or more devices 2818 * isn't supported because there could be a potential deadlock. Consider 2819 * the following scenario. T1 is trying to acquire device locks of all 2820 * the devices in the group and before it could acquire all of them, 2821 * there could be another thread T2 (from different sub-system and use 2822 * case) that has already acquired some of the device locks and might be 2823 * waiting for T1 to release other device locks. 2824 */ 2825 if (iommu_group_device_count(group) != 1) { 2826 dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n"); 2827 ret = -EINVAL; 2828 goto out; 2829 } 2830 2831 /* Since group has only one device */ 2832 grp_dev = list_first_entry(&group->devices, struct group_device, list); 2833 dev = grp_dev->dev; 2834 2835 if (prev_dev != dev) { 2836 dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n"); 2837 ret = -EBUSY; 2838 goto out; 2839 } 2840 2841 prev_dom = group->default_domain; 2842 if (!prev_dom) { 2843 ret = -EINVAL; 2844 goto out; 2845 } 2846 2847 dev_def_dom = iommu_get_def_domain_type(dev); 2848 if (!type) { 2849 /* 2850 * If the user hasn't requested any specific type of domain and 2851 * if the device supports both the domains, then default to the 2852 * domain the device was booted with 2853 */ 2854 type = dev_def_dom ? : iommu_def_domain_type; 2855 } else if (dev_def_dom && type != dev_def_dom) { 2856 dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n", 2857 iommu_domain_type_str(type)); 2858 ret = -EINVAL; 2859 goto out; 2860 } 2861 2862 /* 2863 * Switch to a new domain only if the requested domain type is different 2864 * from the existing default domain type 2865 */ 2866 if (prev_dom->type == type) { 2867 ret = 0; 2868 goto out; 2869 } 2870 2871 /* We can bring up a flush queue without tearing down the domain */ 2872 if (type == IOMMU_DOMAIN_DMA_FQ && prev_dom->type == IOMMU_DOMAIN_DMA) { 2873 ret = iommu_dma_init_fq(prev_dom); 2874 if (!ret) 2875 prev_dom->type = IOMMU_DOMAIN_DMA_FQ; 2876 goto out; 2877 } 2878 2879 /* Sets group->default_domain to the newly allocated domain */ 2880 ret = iommu_group_alloc_default_domain(dev->bus, group, type); 2881 if (ret) 2882 goto out; 2883 2884 ret = iommu_create_device_direct_mappings(group, dev); 2885 if (ret) 2886 goto free_new_domain; 2887 2888 ret = __iommu_attach_device(group->default_domain, dev); 2889 if (ret) 2890 goto free_new_domain; 2891 2892 group->domain = group->default_domain; 2893 2894 /* 2895 * Release the mutex here because ops->probe_finalize() call-back of 2896 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 2897 * in-turn might call back into IOMMU core code, where it tries to take 2898 * group->mutex, resulting in a deadlock. 2899 */ 2900 mutex_unlock(&group->mutex); 2901 2902 /* Make sure dma_ops is appropriatley set */ 2903 iommu_group_do_probe_finalize(dev, group->default_domain); 2904 iommu_domain_free(prev_dom); 2905 return 0; 2906 2907 free_new_domain: 2908 iommu_domain_free(group->default_domain); 2909 group->default_domain = prev_dom; 2910 group->domain = prev_dom; 2911 2912 out: 2913 mutex_unlock(&group->mutex); 2914 2915 return ret; 2916 } 2917 2918 /* 2919 * Changing the default domain through sysfs requires the users to unbind the 2920 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ 2921 * transition. Return failure if this isn't met. 2922 * 2923 * We need to consider the race between this and the device release path. 2924 * device_lock(dev) is used here to guarantee that the device release path 2925 * will not be entered at the same time. 2926 */ 2927 static ssize_t iommu_group_store_type(struct iommu_group *group, 2928 const char *buf, size_t count) 2929 { 2930 struct group_device *grp_dev; 2931 struct device *dev; 2932 int ret, req_type; 2933 2934 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2935 return -EACCES; 2936 2937 if (WARN_ON(!group) || !group->default_domain) 2938 return -EINVAL; 2939 2940 if (sysfs_streq(buf, "identity")) 2941 req_type = IOMMU_DOMAIN_IDENTITY; 2942 else if (sysfs_streq(buf, "DMA")) 2943 req_type = IOMMU_DOMAIN_DMA; 2944 else if (sysfs_streq(buf, "DMA-FQ")) 2945 req_type = IOMMU_DOMAIN_DMA_FQ; 2946 else if (sysfs_streq(buf, "auto")) 2947 req_type = 0; 2948 else 2949 return -EINVAL; 2950 2951 /* 2952 * Lock/Unlock the group mutex here before device lock to 2953 * 1. Make sure that the iommu group has only one device (this is a 2954 * prerequisite for step 2) 2955 * 2. Get struct *dev which is needed to lock device 2956 */ 2957 mutex_lock(&group->mutex); 2958 if (iommu_group_device_count(group) != 1) { 2959 mutex_unlock(&group->mutex); 2960 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n"); 2961 return -EINVAL; 2962 } 2963 2964 /* Since group has only one device */ 2965 grp_dev = list_first_entry(&group->devices, struct group_device, list); 2966 dev = grp_dev->dev; 2967 get_device(dev); 2968 2969 /* 2970 * Don't hold the group mutex because taking group mutex first and then 2971 * the device lock could potentially cause a deadlock as below. Assume 2972 * two threads T1 and T2. T1 is trying to change default domain of an 2973 * iommu group and T2 is trying to hot unplug a device or release [1] VF 2974 * of a PCIe device which is in the same iommu group. T1 takes group 2975 * mutex and before it could take device lock assume T2 has taken device 2976 * lock and is yet to take group mutex. Now, both the threads will be 2977 * waiting for the other thread to release lock. Below, lock order was 2978 * suggested. 2979 * device_lock(dev); 2980 * mutex_lock(&group->mutex); 2981 * iommu_change_dev_def_domain(); 2982 * mutex_unlock(&group->mutex); 2983 * device_unlock(dev); 2984 * 2985 * [1] Typical device release path 2986 * device_lock() from device/driver core code 2987 * -> bus_notifier() 2988 * -> iommu_bus_notifier() 2989 * -> iommu_release_device() 2990 * -> ops->release_device() vendor driver calls back iommu core code 2991 * -> mutex_lock() from iommu core code 2992 */ 2993 mutex_unlock(&group->mutex); 2994 2995 /* Check if the device in the group still has a driver bound to it */ 2996 device_lock(dev); 2997 if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ && 2998 group->default_domain->type == IOMMU_DOMAIN_DMA)) { 2999 pr_err_ratelimited("Device is still bound to driver\n"); 3000 ret = -EBUSY; 3001 goto out; 3002 } 3003 3004 ret = iommu_change_dev_def_domain(group, dev, req_type); 3005 ret = ret ?: count; 3006 3007 out: 3008 device_unlock(dev); 3009 put_device(dev); 3010 3011 return ret; 3012 } 3013 3014 static bool iommu_is_default_domain(struct iommu_group *group) 3015 { 3016 if (group->domain == group->default_domain) 3017 return true; 3018 3019 /* 3020 * If the default domain was set to identity and it is still an identity 3021 * domain then we consider this a pass. This happens because of 3022 * amd_iommu_init_device() replacing the default idenytity domain with an 3023 * identity domain that has a different configuration for AMDGPU. 3024 */ 3025 if (group->default_domain && 3026 group->default_domain->type == IOMMU_DOMAIN_IDENTITY && 3027 group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY) 3028 return true; 3029 return false; 3030 } 3031 3032 /** 3033 * iommu_device_use_default_domain() - Device driver wants to handle device 3034 * DMA through the kernel DMA API. 3035 * @dev: The device. 3036 * 3037 * The device driver about to bind @dev wants to do DMA through the kernel 3038 * DMA API. Return 0 if it is allowed, otherwise an error. 3039 */ 3040 int iommu_device_use_default_domain(struct device *dev) 3041 { 3042 struct iommu_group *group = iommu_group_get(dev); 3043 int ret = 0; 3044 3045 if (!group) 3046 return 0; 3047 3048 mutex_lock(&group->mutex); 3049 if (group->owner_cnt) { 3050 if (group->owner || !iommu_is_default_domain(group) || 3051 !xa_empty(&group->pasid_array)) { 3052 ret = -EBUSY; 3053 goto unlock_out; 3054 } 3055 } 3056 3057 group->owner_cnt++; 3058 3059 unlock_out: 3060 mutex_unlock(&group->mutex); 3061 iommu_group_put(group); 3062 3063 return ret; 3064 } 3065 3066 /** 3067 * iommu_device_unuse_default_domain() - Device driver stops handling device 3068 * DMA through the kernel DMA API. 3069 * @dev: The device. 3070 * 3071 * The device driver doesn't want to do DMA through kernel DMA API anymore. 3072 * It must be called after iommu_device_use_default_domain(). 3073 */ 3074 void iommu_device_unuse_default_domain(struct device *dev) 3075 { 3076 struct iommu_group *group = iommu_group_get(dev); 3077 3078 if (!group) 3079 return; 3080 3081 mutex_lock(&group->mutex); 3082 if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) 3083 group->owner_cnt--; 3084 3085 mutex_unlock(&group->mutex); 3086 iommu_group_put(group); 3087 } 3088 3089 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) 3090 { 3091 struct group_device *dev = 3092 list_first_entry(&group->devices, struct group_device, list); 3093 3094 if (group->blocking_domain) 3095 return 0; 3096 3097 group->blocking_domain = 3098 __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED); 3099 if (!group->blocking_domain) { 3100 /* 3101 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED 3102 * create an empty domain instead. 3103 */ 3104 group->blocking_domain = __iommu_domain_alloc( 3105 dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); 3106 if (!group->blocking_domain) 3107 return -EINVAL; 3108 } 3109 return 0; 3110 } 3111 3112 static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) 3113 { 3114 int ret; 3115 3116 if ((group->domain && group->domain != group->default_domain) || 3117 !xa_empty(&group->pasid_array)) 3118 return -EBUSY; 3119 3120 ret = __iommu_group_alloc_blocking_domain(group); 3121 if (ret) 3122 return ret; 3123 ret = __iommu_group_set_domain(group, group->blocking_domain); 3124 if (ret) 3125 return ret; 3126 3127 group->owner = owner; 3128 group->owner_cnt++; 3129 return 0; 3130 } 3131 3132 /** 3133 * iommu_group_claim_dma_owner() - Set DMA ownership of a group 3134 * @group: The group. 3135 * @owner: Caller specified pointer. Used for exclusive ownership. 3136 * 3137 * This is to support backward compatibility for vfio which manages the dma 3138 * ownership in iommu_group level. New invocations on this interface should be 3139 * prohibited. Only a single owner may exist for a group. 3140 */ 3141 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 3142 { 3143 int ret = 0; 3144 3145 if (WARN_ON(!owner)) 3146 return -EINVAL; 3147 3148 mutex_lock(&group->mutex); 3149 if (group->owner_cnt) { 3150 ret = -EPERM; 3151 goto unlock_out; 3152 } 3153 3154 ret = __iommu_take_dma_ownership(group, owner); 3155 unlock_out: 3156 mutex_unlock(&group->mutex); 3157 3158 return ret; 3159 } 3160 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); 3161 3162 /** 3163 * iommu_device_claim_dma_owner() - Set DMA ownership of a device 3164 * @dev: The device. 3165 * @owner: Caller specified pointer. Used for exclusive ownership. 3166 * 3167 * Claim the DMA ownership of a device. Multiple devices in the same group may 3168 * concurrently claim ownership if they present the same owner value. Returns 0 3169 * on success and error code on failure 3170 */ 3171 int iommu_device_claim_dma_owner(struct device *dev, void *owner) 3172 { 3173 struct iommu_group *group = iommu_group_get(dev); 3174 int ret = 0; 3175 3176 if (!group) 3177 return -ENODEV; 3178 if (WARN_ON(!owner)) 3179 return -EINVAL; 3180 3181 mutex_lock(&group->mutex); 3182 if (group->owner_cnt) { 3183 if (group->owner != owner) { 3184 ret = -EPERM; 3185 goto unlock_out; 3186 } 3187 group->owner_cnt++; 3188 goto unlock_out; 3189 } 3190 3191 ret = __iommu_take_dma_ownership(group, owner); 3192 unlock_out: 3193 mutex_unlock(&group->mutex); 3194 iommu_group_put(group); 3195 3196 return ret; 3197 } 3198 EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); 3199 3200 static void __iommu_release_dma_ownership(struct iommu_group *group) 3201 { 3202 int ret; 3203 3204 if (WARN_ON(!group->owner_cnt || !group->owner || 3205 !xa_empty(&group->pasid_array))) 3206 return; 3207 3208 group->owner_cnt = 0; 3209 group->owner = NULL; 3210 ret = __iommu_group_set_domain(group, group->default_domain); 3211 WARN(ret, "iommu driver failed to attach the default domain"); 3212 } 3213 3214 /** 3215 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3216 * @dev: The device 3217 * 3218 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3219 */ 3220 void iommu_group_release_dma_owner(struct iommu_group *group) 3221 { 3222 mutex_lock(&group->mutex); 3223 __iommu_release_dma_ownership(group); 3224 mutex_unlock(&group->mutex); 3225 } 3226 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); 3227 3228 /** 3229 * iommu_device_release_dma_owner() - Release DMA ownership of a device 3230 * @group: The device. 3231 * 3232 * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). 3233 */ 3234 void iommu_device_release_dma_owner(struct device *dev) 3235 { 3236 struct iommu_group *group = iommu_group_get(dev); 3237 3238 mutex_lock(&group->mutex); 3239 if (group->owner_cnt > 1) 3240 group->owner_cnt--; 3241 else 3242 __iommu_release_dma_ownership(group); 3243 mutex_unlock(&group->mutex); 3244 iommu_group_put(group); 3245 } 3246 EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); 3247 3248 /** 3249 * iommu_group_dma_owner_claimed() - Query group dma ownership status 3250 * @group: The group. 3251 * 3252 * This provides status query on a given group. It is racy and only for 3253 * non-binding status reporting. 3254 */ 3255 bool iommu_group_dma_owner_claimed(struct iommu_group *group) 3256 { 3257 unsigned int user; 3258 3259 mutex_lock(&group->mutex); 3260 user = group->owner_cnt; 3261 mutex_unlock(&group->mutex); 3262 3263 return user; 3264 } 3265 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); 3266 3267 static int __iommu_set_group_pasid(struct iommu_domain *domain, 3268 struct iommu_group *group, ioasid_t pasid) 3269 { 3270 struct group_device *device; 3271 int ret = 0; 3272 3273 list_for_each_entry(device, &group->devices, list) { 3274 ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); 3275 if (ret) 3276 break; 3277 } 3278 3279 return ret; 3280 } 3281 3282 static void __iommu_remove_group_pasid(struct iommu_group *group, 3283 ioasid_t pasid) 3284 { 3285 struct group_device *device; 3286 const struct iommu_ops *ops; 3287 3288 list_for_each_entry(device, &group->devices, list) { 3289 ops = dev_iommu_ops(device->dev); 3290 ops->remove_dev_pasid(device->dev, pasid); 3291 } 3292 } 3293 3294 /* 3295 * iommu_attach_device_pasid() - Attach a domain to pasid of device 3296 * @domain: the iommu domain. 3297 * @dev: the attached device. 3298 * @pasid: the pasid of the device. 3299 * 3300 * Return: 0 on success, or an error. 3301 */ 3302 int iommu_attach_device_pasid(struct iommu_domain *domain, 3303 struct device *dev, ioasid_t pasid) 3304 { 3305 struct iommu_group *group; 3306 void *curr; 3307 int ret; 3308 3309 if (!domain->ops->set_dev_pasid) 3310 return -EOPNOTSUPP; 3311 3312 group = iommu_group_get(dev); 3313 if (!group) 3314 return -ENODEV; 3315 3316 mutex_lock(&group->mutex); 3317 curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); 3318 if (curr) { 3319 ret = xa_err(curr) ? : -EBUSY; 3320 goto out_unlock; 3321 } 3322 3323 ret = __iommu_set_group_pasid(domain, group, pasid); 3324 if (ret) { 3325 __iommu_remove_group_pasid(group, pasid); 3326 xa_erase(&group->pasid_array, pasid); 3327 } 3328 out_unlock: 3329 mutex_unlock(&group->mutex); 3330 iommu_group_put(group); 3331 3332 return ret; 3333 } 3334 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); 3335 3336 /* 3337 * iommu_detach_device_pasid() - Detach the domain from pasid of device 3338 * @domain: the iommu domain. 3339 * @dev: the attached device. 3340 * @pasid: the pasid of the device. 3341 * 3342 * The @domain must have been attached to @pasid of the @dev with 3343 * iommu_attach_device_pasid(). 3344 */ 3345 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, 3346 ioasid_t pasid) 3347 { 3348 struct iommu_group *group = iommu_group_get(dev); 3349 3350 mutex_lock(&group->mutex); 3351 __iommu_remove_group_pasid(group, pasid); 3352 WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); 3353 mutex_unlock(&group->mutex); 3354 3355 iommu_group_put(group); 3356 } 3357 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); 3358 3359 /* 3360 * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev 3361 * @dev: the queried device 3362 * @pasid: the pasid of the device 3363 * @type: matched domain type, 0 for any match 3364 * 3365 * This is a variant of iommu_get_domain_for_dev(). It returns the existing 3366 * domain attached to pasid of a device. Callers must hold a lock around this 3367 * function, and both iommu_attach/detach_dev_pasid() whenever a domain of 3368 * type is being manipulated. This API does not internally resolve races with 3369 * attach/detach. 3370 * 3371 * Return: attached domain on success, NULL otherwise. 3372 */ 3373 struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, 3374 ioasid_t pasid, 3375 unsigned int type) 3376 { 3377 struct iommu_domain *domain; 3378 struct iommu_group *group; 3379 3380 group = iommu_group_get(dev); 3381 if (!group) 3382 return NULL; 3383 3384 xa_lock(&group->pasid_array); 3385 domain = xa_load(&group->pasid_array, pasid); 3386 if (type && domain && domain->type != type) 3387 domain = ERR_PTR(-EBUSY); 3388 xa_unlock(&group->pasid_array); 3389 iommu_group_put(group); 3390 3391 return domain; 3392 } 3393 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); 3394 3395 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 3396 struct mm_struct *mm) 3397 { 3398 const struct iommu_ops *ops = dev_iommu_ops(dev); 3399 struct iommu_domain *domain; 3400 3401 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); 3402 if (!domain) 3403 return NULL; 3404 3405 domain->type = IOMMU_DOMAIN_SVA; 3406 mmgrab(mm); 3407 domain->mm = mm; 3408 domain->iopf_handler = iommu_sva_handle_iopf; 3409 domain->fault_data = mm; 3410 3411 return domain; 3412 } 3413