1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/device.h> 10 #include <linux/dma-iommu.h> 11 #include <linux/kernel.h> 12 #include <linux/bits.h> 13 #include <linux/bug.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/iommu.h> 20 #include <linux/idr.h> 21 #include <linux/notifier.h> 22 #include <linux/err.h> 23 #include <linux/pci.h> 24 #include <linux/bitops.h> 25 #include <linux/property.h> 26 #include <linux/fsl/mc.h> 27 #include <linux/module.h> 28 #include <linux/cc_platform.h> 29 #include <trace/events/iommu.h> 30 31 static struct kset *iommu_group_kset; 32 static DEFINE_IDA(iommu_group_ida); 33 34 static unsigned int iommu_def_domain_type __read_mostly; 35 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); 36 static u32 iommu_cmd_line __read_mostly; 37 38 struct iommu_group { 39 struct kobject kobj; 40 struct kobject *devices_kobj; 41 struct list_head devices; 42 struct mutex mutex; 43 struct blocking_notifier_head notifier; 44 void *iommu_data; 45 void (*iommu_data_release)(void *iommu_data); 46 char *name; 47 int id; 48 struct iommu_domain *default_domain; 49 struct iommu_domain *domain; 50 struct list_head entry; 51 unsigned int owner_cnt; 52 void *owner; 53 }; 54 55 struct group_device { 56 struct list_head list; 57 struct device *dev; 58 char *name; 59 }; 60 61 struct iommu_group_attribute { 62 struct attribute attr; 63 ssize_t (*show)(struct iommu_group *group, char *buf); 64 ssize_t (*store)(struct iommu_group *group, 65 const char *buf, size_t count); 66 }; 67 68 static const char * const iommu_group_resv_type_string[] = { 69 [IOMMU_RESV_DIRECT] = "direct", 70 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 71 [IOMMU_RESV_RESERVED] = "reserved", 72 [IOMMU_RESV_MSI] = "msi", 73 [IOMMU_RESV_SW_MSI] = "msi", 74 }; 75 76 #define IOMMU_CMD_LINE_DMA_API BIT(0) 77 #define IOMMU_CMD_LINE_STRICT BIT(1) 78 79 static int iommu_alloc_default_domain(struct iommu_group *group, 80 struct device *dev); 81 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 82 unsigned type); 83 static int __iommu_attach_device(struct iommu_domain *domain, 84 struct device *dev); 85 static int __iommu_attach_group(struct iommu_domain *domain, 86 struct iommu_group *group); 87 static void __iommu_detach_group(struct iommu_domain *domain, 88 struct iommu_group *group); 89 static int iommu_create_device_direct_mappings(struct iommu_group *group, 90 struct device *dev); 91 static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 92 static ssize_t iommu_group_store_type(struct iommu_group *group, 93 const char *buf, size_t count); 94 95 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 96 struct iommu_group_attribute iommu_group_attr_##_name = \ 97 __ATTR(_name, _mode, _show, _store) 98 99 #define to_iommu_group_attr(_attr) \ 100 container_of(_attr, struct iommu_group_attribute, attr) 101 #define to_iommu_group(_kobj) \ 102 container_of(_kobj, struct iommu_group, kobj) 103 104 static LIST_HEAD(iommu_device_list); 105 static DEFINE_SPINLOCK(iommu_device_lock); 106 107 /* 108 * Use a function instead of an array here because the domain-type is a 109 * bit-field, so an array would waste memory. 110 */ 111 static const char *iommu_domain_type_str(unsigned int t) 112 { 113 switch (t) { 114 case IOMMU_DOMAIN_BLOCKED: 115 return "Blocked"; 116 case IOMMU_DOMAIN_IDENTITY: 117 return "Passthrough"; 118 case IOMMU_DOMAIN_UNMANAGED: 119 return "Unmanaged"; 120 case IOMMU_DOMAIN_DMA: 121 case IOMMU_DOMAIN_DMA_FQ: 122 return "Translated"; 123 default: 124 return "Unknown"; 125 } 126 } 127 128 static int __init iommu_subsys_init(void) 129 { 130 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 131 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 132 iommu_set_default_passthrough(false); 133 else 134 iommu_set_default_translated(false); 135 136 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { 137 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 138 iommu_set_default_translated(false); 139 } 140 } 141 142 if (!iommu_default_passthrough() && !iommu_dma_strict) 143 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; 144 145 pr_info("Default domain type: %s %s\n", 146 iommu_domain_type_str(iommu_def_domain_type), 147 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 148 "(set via kernel command line)" : ""); 149 150 if (!iommu_default_passthrough()) 151 pr_info("DMA domain TLB invalidation policy: %s mode %s\n", 152 iommu_dma_strict ? "strict" : "lazy", 153 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? 154 "(set via kernel command line)" : ""); 155 156 return 0; 157 } 158 subsys_initcall(iommu_subsys_init); 159 160 /** 161 * iommu_device_register() - Register an IOMMU hardware instance 162 * @iommu: IOMMU handle for the instance 163 * @ops: IOMMU ops to associate with the instance 164 * @hwdev: (optional) actual instance device, used for fwnode lookup 165 * 166 * Return: 0 on success, or an error. 167 */ 168 int iommu_device_register(struct iommu_device *iommu, 169 const struct iommu_ops *ops, struct device *hwdev) 170 { 171 /* We need to be able to take module references appropriately */ 172 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) 173 return -EINVAL; 174 175 iommu->ops = ops; 176 if (hwdev) 177 iommu->fwnode = hwdev->fwnode; 178 179 spin_lock(&iommu_device_lock); 180 list_add_tail(&iommu->list, &iommu_device_list); 181 spin_unlock(&iommu_device_lock); 182 return 0; 183 } 184 EXPORT_SYMBOL_GPL(iommu_device_register); 185 186 void iommu_device_unregister(struct iommu_device *iommu) 187 { 188 spin_lock(&iommu_device_lock); 189 list_del(&iommu->list); 190 spin_unlock(&iommu_device_lock); 191 } 192 EXPORT_SYMBOL_GPL(iommu_device_unregister); 193 194 static struct dev_iommu *dev_iommu_get(struct device *dev) 195 { 196 struct dev_iommu *param = dev->iommu; 197 198 if (param) 199 return param; 200 201 param = kzalloc(sizeof(*param), GFP_KERNEL); 202 if (!param) 203 return NULL; 204 205 mutex_init(¶m->lock); 206 dev->iommu = param; 207 return param; 208 } 209 210 static void dev_iommu_free(struct device *dev) 211 { 212 struct dev_iommu *param = dev->iommu; 213 214 dev->iommu = NULL; 215 if (param->fwspec) { 216 fwnode_handle_put(param->fwspec->iommu_fwnode); 217 kfree(param->fwspec); 218 } 219 kfree(param); 220 } 221 222 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 223 { 224 const struct iommu_ops *ops = dev->bus->iommu_ops; 225 struct iommu_device *iommu_dev; 226 struct iommu_group *group; 227 int ret; 228 229 if (!ops) 230 return -ENODEV; 231 232 if (!dev_iommu_get(dev)) 233 return -ENOMEM; 234 235 if (!try_module_get(ops->owner)) { 236 ret = -EINVAL; 237 goto err_free; 238 } 239 240 iommu_dev = ops->probe_device(dev); 241 if (IS_ERR(iommu_dev)) { 242 ret = PTR_ERR(iommu_dev); 243 goto out_module_put; 244 } 245 246 dev->iommu->iommu_dev = iommu_dev; 247 248 group = iommu_group_get_for_dev(dev); 249 if (IS_ERR(group)) { 250 ret = PTR_ERR(group); 251 goto out_release; 252 } 253 iommu_group_put(group); 254 255 if (group_list && !group->default_domain && list_empty(&group->entry)) 256 list_add_tail(&group->entry, group_list); 257 258 iommu_device_link(iommu_dev, dev); 259 260 return 0; 261 262 out_release: 263 ops->release_device(dev); 264 265 out_module_put: 266 module_put(ops->owner); 267 268 err_free: 269 dev_iommu_free(dev); 270 271 return ret; 272 } 273 274 int iommu_probe_device(struct device *dev) 275 { 276 const struct iommu_ops *ops = dev->bus->iommu_ops; 277 struct iommu_group *group; 278 int ret; 279 280 ret = __iommu_probe_device(dev, NULL); 281 if (ret) 282 goto err_out; 283 284 group = iommu_group_get(dev); 285 if (!group) { 286 ret = -ENODEV; 287 goto err_release; 288 } 289 290 /* 291 * Try to allocate a default domain - needs support from the 292 * IOMMU driver. There are still some drivers which don't 293 * support default domains, so the return value is not yet 294 * checked. 295 */ 296 mutex_lock(&group->mutex); 297 iommu_alloc_default_domain(group, dev); 298 299 /* 300 * If device joined an existing group which has been claimed, don't 301 * attach the default domain. 302 */ 303 if (group->default_domain && !group->owner) { 304 ret = __iommu_attach_device(group->default_domain, dev); 305 if (ret) { 306 mutex_unlock(&group->mutex); 307 iommu_group_put(group); 308 goto err_release; 309 } 310 } 311 312 iommu_create_device_direct_mappings(group, dev); 313 314 mutex_unlock(&group->mutex); 315 iommu_group_put(group); 316 317 if (ops->probe_finalize) 318 ops->probe_finalize(dev); 319 320 return 0; 321 322 err_release: 323 iommu_release_device(dev); 324 325 err_out: 326 return ret; 327 328 } 329 330 void iommu_release_device(struct device *dev) 331 { 332 const struct iommu_ops *ops; 333 334 if (!dev->iommu) 335 return; 336 337 iommu_device_unlink(dev->iommu->iommu_dev, dev); 338 339 ops = dev_iommu_ops(dev); 340 ops->release_device(dev); 341 342 iommu_group_remove_device(dev); 343 module_put(ops->owner); 344 dev_iommu_free(dev); 345 } 346 347 static int __init iommu_set_def_domain_type(char *str) 348 { 349 bool pt; 350 int ret; 351 352 ret = kstrtobool(str, &pt); 353 if (ret) 354 return ret; 355 356 if (pt) 357 iommu_set_default_passthrough(true); 358 else 359 iommu_set_default_translated(true); 360 361 return 0; 362 } 363 early_param("iommu.passthrough", iommu_set_def_domain_type); 364 365 static int __init iommu_dma_setup(char *str) 366 { 367 int ret = kstrtobool(str, &iommu_dma_strict); 368 369 if (!ret) 370 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 371 return ret; 372 } 373 early_param("iommu.strict", iommu_dma_setup); 374 375 void iommu_set_dma_strict(void) 376 { 377 iommu_dma_strict = true; 378 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) 379 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 380 } 381 382 static ssize_t iommu_group_attr_show(struct kobject *kobj, 383 struct attribute *__attr, char *buf) 384 { 385 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 386 struct iommu_group *group = to_iommu_group(kobj); 387 ssize_t ret = -EIO; 388 389 if (attr->show) 390 ret = attr->show(group, buf); 391 return ret; 392 } 393 394 static ssize_t iommu_group_attr_store(struct kobject *kobj, 395 struct attribute *__attr, 396 const char *buf, size_t count) 397 { 398 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 399 struct iommu_group *group = to_iommu_group(kobj); 400 ssize_t ret = -EIO; 401 402 if (attr->store) 403 ret = attr->store(group, buf, count); 404 return ret; 405 } 406 407 static const struct sysfs_ops iommu_group_sysfs_ops = { 408 .show = iommu_group_attr_show, 409 .store = iommu_group_attr_store, 410 }; 411 412 static int iommu_group_create_file(struct iommu_group *group, 413 struct iommu_group_attribute *attr) 414 { 415 return sysfs_create_file(&group->kobj, &attr->attr); 416 } 417 418 static void iommu_group_remove_file(struct iommu_group *group, 419 struct iommu_group_attribute *attr) 420 { 421 sysfs_remove_file(&group->kobj, &attr->attr); 422 } 423 424 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 425 { 426 return sprintf(buf, "%s\n", group->name); 427 } 428 429 /** 430 * iommu_insert_resv_region - Insert a new region in the 431 * list of reserved regions. 432 * @new: new region to insert 433 * @regions: list of regions 434 * 435 * Elements are sorted by start address and overlapping segments 436 * of the same type are merged. 437 */ 438 static int iommu_insert_resv_region(struct iommu_resv_region *new, 439 struct list_head *regions) 440 { 441 struct iommu_resv_region *iter, *tmp, *nr, *top; 442 LIST_HEAD(stack); 443 444 nr = iommu_alloc_resv_region(new->start, new->length, 445 new->prot, new->type); 446 if (!nr) 447 return -ENOMEM; 448 449 /* First add the new element based on start address sorting */ 450 list_for_each_entry(iter, regions, list) { 451 if (nr->start < iter->start || 452 (nr->start == iter->start && nr->type <= iter->type)) 453 break; 454 } 455 list_add_tail(&nr->list, &iter->list); 456 457 /* Merge overlapping segments of type nr->type in @regions, if any */ 458 list_for_each_entry_safe(iter, tmp, regions, list) { 459 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 460 461 /* no merge needed on elements of different types than @new */ 462 if (iter->type != new->type) { 463 list_move_tail(&iter->list, &stack); 464 continue; 465 } 466 467 /* look for the last stack element of same type as @iter */ 468 list_for_each_entry_reverse(top, &stack, list) 469 if (top->type == iter->type) 470 goto check_overlap; 471 472 list_move_tail(&iter->list, &stack); 473 continue; 474 475 check_overlap: 476 top_end = top->start + top->length - 1; 477 478 if (iter->start > top_end + 1) { 479 list_move_tail(&iter->list, &stack); 480 } else { 481 top->length = max(top_end, iter_end) - top->start + 1; 482 list_del(&iter->list); 483 kfree(iter); 484 } 485 } 486 list_splice(&stack, regions); 487 return 0; 488 } 489 490 static int 491 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 492 struct list_head *group_resv_regions) 493 { 494 struct iommu_resv_region *entry; 495 int ret = 0; 496 497 list_for_each_entry(entry, dev_resv_regions, list) { 498 ret = iommu_insert_resv_region(entry, group_resv_regions); 499 if (ret) 500 break; 501 } 502 return ret; 503 } 504 505 int iommu_get_group_resv_regions(struct iommu_group *group, 506 struct list_head *head) 507 { 508 struct group_device *device; 509 int ret = 0; 510 511 mutex_lock(&group->mutex); 512 list_for_each_entry(device, &group->devices, list) { 513 struct list_head dev_resv_regions; 514 515 INIT_LIST_HEAD(&dev_resv_regions); 516 iommu_get_resv_regions(device->dev, &dev_resv_regions); 517 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 518 iommu_put_resv_regions(device->dev, &dev_resv_regions); 519 if (ret) 520 break; 521 } 522 mutex_unlock(&group->mutex); 523 return ret; 524 } 525 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 526 527 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 528 char *buf) 529 { 530 struct iommu_resv_region *region, *next; 531 struct list_head group_resv_regions; 532 char *str = buf; 533 534 INIT_LIST_HEAD(&group_resv_regions); 535 iommu_get_group_resv_regions(group, &group_resv_regions); 536 537 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 538 str += sprintf(str, "0x%016llx 0x%016llx %s\n", 539 (long long int)region->start, 540 (long long int)(region->start + 541 region->length - 1), 542 iommu_group_resv_type_string[region->type]); 543 kfree(region); 544 } 545 546 return (str - buf); 547 } 548 549 static ssize_t iommu_group_show_type(struct iommu_group *group, 550 char *buf) 551 { 552 char *type = "unknown\n"; 553 554 mutex_lock(&group->mutex); 555 if (group->default_domain) { 556 switch (group->default_domain->type) { 557 case IOMMU_DOMAIN_BLOCKED: 558 type = "blocked\n"; 559 break; 560 case IOMMU_DOMAIN_IDENTITY: 561 type = "identity\n"; 562 break; 563 case IOMMU_DOMAIN_UNMANAGED: 564 type = "unmanaged\n"; 565 break; 566 case IOMMU_DOMAIN_DMA: 567 type = "DMA\n"; 568 break; 569 case IOMMU_DOMAIN_DMA_FQ: 570 type = "DMA-FQ\n"; 571 break; 572 } 573 } 574 mutex_unlock(&group->mutex); 575 strcpy(buf, type); 576 577 return strlen(type); 578 } 579 580 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 581 582 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 583 iommu_group_show_resv_regions, NULL); 584 585 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 586 iommu_group_store_type); 587 588 static void iommu_group_release(struct kobject *kobj) 589 { 590 struct iommu_group *group = to_iommu_group(kobj); 591 592 pr_debug("Releasing group %d\n", group->id); 593 594 if (group->iommu_data_release) 595 group->iommu_data_release(group->iommu_data); 596 597 ida_simple_remove(&iommu_group_ida, group->id); 598 599 if (group->default_domain) 600 iommu_domain_free(group->default_domain); 601 602 kfree(group->name); 603 kfree(group); 604 } 605 606 static struct kobj_type iommu_group_ktype = { 607 .sysfs_ops = &iommu_group_sysfs_ops, 608 .release = iommu_group_release, 609 }; 610 611 /** 612 * iommu_group_alloc - Allocate a new group 613 * 614 * This function is called by an iommu driver to allocate a new iommu 615 * group. The iommu group represents the minimum granularity of the iommu. 616 * Upon successful return, the caller holds a reference to the supplied 617 * group in order to hold the group until devices are added. Use 618 * iommu_group_put() to release this extra reference count, allowing the 619 * group to be automatically reclaimed once it has no devices or external 620 * references. 621 */ 622 struct iommu_group *iommu_group_alloc(void) 623 { 624 struct iommu_group *group; 625 int ret; 626 627 group = kzalloc(sizeof(*group), GFP_KERNEL); 628 if (!group) 629 return ERR_PTR(-ENOMEM); 630 631 group->kobj.kset = iommu_group_kset; 632 mutex_init(&group->mutex); 633 INIT_LIST_HEAD(&group->devices); 634 INIT_LIST_HEAD(&group->entry); 635 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); 636 637 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); 638 if (ret < 0) { 639 kfree(group); 640 return ERR_PTR(ret); 641 } 642 group->id = ret; 643 644 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 645 NULL, "%d", group->id); 646 if (ret) { 647 ida_simple_remove(&iommu_group_ida, group->id); 648 kobject_put(&group->kobj); 649 return ERR_PTR(ret); 650 } 651 652 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 653 if (!group->devices_kobj) { 654 kobject_put(&group->kobj); /* triggers .release & free */ 655 return ERR_PTR(-ENOMEM); 656 } 657 658 /* 659 * The devices_kobj holds a reference on the group kobject, so 660 * as long as that exists so will the group. We can therefore 661 * use the devices_kobj for reference counting. 662 */ 663 kobject_put(&group->kobj); 664 665 ret = iommu_group_create_file(group, 666 &iommu_group_attr_reserved_regions); 667 if (ret) 668 return ERR_PTR(ret); 669 670 ret = iommu_group_create_file(group, &iommu_group_attr_type); 671 if (ret) 672 return ERR_PTR(ret); 673 674 pr_debug("Allocated group %d\n", group->id); 675 676 return group; 677 } 678 EXPORT_SYMBOL_GPL(iommu_group_alloc); 679 680 struct iommu_group *iommu_group_get_by_id(int id) 681 { 682 struct kobject *group_kobj; 683 struct iommu_group *group; 684 const char *name; 685 686 if (!iommu_group_kset) 687 return NULL; 688 689 name = kasprintf(GFP_KERNEL, "%d", id); 690 if (!name) 691 return NULL; 692 693 group_kobj = kset_find_obj(iommu_group_kset, name); 694 kfree(name); 695 696 if (!group_kobj) 697 return NULL; 698 699 group = container_of(group_kobj, struct iommu_group, kobj); 700 BUG_ON(group->id != id); 701 702 kobject_get(group->devices_kobj); 703 kobject_put(&group->kobj); 704 705 return group; 706 } 707 EXPORT_SYMBOL_GPL(iommu_group_get_by_id); 708 709 /** 710 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 711 * @group: the group 712 * 713 * iommu drivers can store data in the group for use when doing iommu 714 * operations. This function provides a way to retrieve it. Caller 715 * should hold a group reference. 716 */ 717 void *iommu_group_get_iommudata(struct iommu_group *group) 718 { 719 return group->iommu_data; 720 } 721 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 722 723 /** 724 * iommu_group_set_iommudata - set iommu_data for a group 725 * @group: the group 726 * @iommu_data: new data 727 * @release: release function for iommu_data 728 * 729 * iommu drivers can store data in the group for use when doing iommu 730 * operations. This function provides a way to set the data after 731 * the group has been allocated. Caller should hold a group reference. 732 */ 733 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 734 void (*release)(void *iommu_data)) 735 { 736 group->iommu_data = iommu_data; 737 group->iommu_data_release = release; 738 } 739 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 740 741 /** 742 * iommu_group_set_name - set name for a group 743 * @group: the group 744 * @name: name 745 * 746 * Allow iommu driver to set a name for a group. When set it will 747 * appear in a name attribute file under the group in sysfs. 748 */ 749 int iommu_group_set_name(struct iommu_group *group, const char *name) 750 { 751 int ret; 752 753 if (group->name) { 754 iommu_group_remove_file(group, &iommu_group_attr_name); 755 kfree(group->name); 756 group->name = NULL; 757 if (!name) 758 return 0; 759 } 760 761 group->name = kstrdup(name, GFP_KERNEL); 762 if (!group->name) 763 return -ENOMEM; 764 765 ret = iommu_group_create_file(group, &iommu_group_attr_name); 766 if (ret) { 767 kfree(group->name); 768 group->name = NULL; 769 return ret; 770 } 771 772 return 0; 773 } 774 EXPORT_SYMBOL_GPL(iommu_group_set_name); 775 776 static int iommu_create_device_direct_mappings(struct iommu_group *group, 777 struct device *dev) 778 { 779 struct iommu_domain *domain = group->default_domain; 780 struct iommu_resv_region *entry; 781 struct list_head mappings; 782 unsigned long pg_size; 783 int ret = 0; 784 785 if (!domain || !iommu_is_dma_domain(domain)) 786 return 0; 787 788 BUG_ON(!domain->pgsize_bitmap); 789 790 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 791 INIT_LIST_HEAD(&mappings); 792 793 iommu_get_resv_regions(dev, &mappings); 794 795 /* We need to consider overlapping regions for different devices */ 796 list_for_each_entry(entry, &mappings, list) { 797 dma_addr_t start, end, addr; 798 size_t map_size = 0; 799 800 start = ALIGN(entry->start, pg_size); 801 end = ALIGN(entry->start + entry->length, pg_size); 802 803 if (entry->type != IOMMU_RESV_DIRECT && 804 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 805 continue; 806 807 for (addr = start; addr <= end; addr += pg_size) { 808 phys_addr_t phys_addr; 809 810 if (addr == end) 811 goto map_end; 812 813 phys_addr = iommu_iova_to_phys(domain, addr); 814 if (!phys_addr) { 815 map_size += pg_size; 816 continue; 817 } 818 819 map_end: 820 if (map_size) { 821 ret = iommu_map(domain, addr - map_size, 822 addr - map_size, map_size, 823 entry->prot); 824 if (ret) 825 goto out; 826 map_size = 0; 827 } 828 } 829 830 } 831 832 iommu_flush_iotlb_all(domain); 833 834 out: 835 iommu_put_resv_regions(dev, &mappings); 836 837 return ret; 838 } 839 840 static bool iommu_is_attach_deferred(struct device *dev) 841 { 842 const struct iommu_ops *ops = dev_iommu_ops(dev); 843 844 if (ops->is_attach_deferred) 845 return ops->is_attach_deferred(dev); 846 847 return false; 848 } 849 850 /** 851 * iommu_group_add_device - add a device to an iommu group 852 * @group: the group into which to add the device (reference should be held) 853 * @dev: the device 854 * 855 * This function is called by an iommu driver to add a device into a 856 * group. Adding a device increments the group reference count. 857 */ 858 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 859 { 860 int ret, i = 0; 861 struct group_device *device; 862 863 device = kzalloc(sizeof(*device), GFP_KERNEL); 864 if (!device) 865 return -ENOMEM; 866 867 device->dev = dev; 868 869 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 870 if (ret) 871 goto err_free_device; 872 873 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 874 rename: 875 if (!device->name) { 876 ret = -ENOMEM; 877 goto err_remove_link; 878 } 879 880 ret = sysfs_create_link_nowarn(group->devices_kobj, 881 &dev->kobj, device->name); 882 if (ret) { 883 if (ret == -EEXIST && i >= 0) { 884 /* 885 * Account for the slim chance of collision 886 * and append an instance to the name. 887 */ 888 kfree(device->name); 889 device->name = kasprintf(GFP_KERNEL, "%s.%d", 890 kobject_name(&dev->kobj), i++); 891 goto rename; 892 } 893 goto err_free_name; 894 } 895 896 kobject_get(group->devices_kobj); 897 898 dev->iommu_group = group; 899 900 mutex_lock(&group->mutex); 901 list_add_tail(&device->list, &group->devices); 902 if (group->domain && !iommu_is_attach_deferred(dev)) 903 ret = __iommu_attach_device(group->domain, dev); 904 mutex_unlock(&group->mutex); 905 if (ret) 906 goto err_put_group; 907 908 /* Notify any listeners about change to group. */ 909 blocking_notifier_call_chain(&group->notifier, 910 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); 911 912 trace_add_device_to_group(group->id, dev); 913 914 dev_info(dev, "Adding to iommu group %d\n", group->id); 915 916 return 0; 917 918 err_put_group: 919 mutex_lock(&group->mutex); 920 list_del(&device->list); 921 mutex_unlock(&group->mutex); 922 dev->iommu_group = NULL; 923 kobject_put(group->devices_kobj); 924 sysfs_remove_link(group->devices_kobj, device->name); 925 err_free_name: 926 kfree(device->name); 927 err_remove_link: 928 sysfs_remove_link(&dev->kobj, "iommu_group"); 929 err_free_device: 930 kfree(device); 931 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 932 return ret; 933 } 934 EXPORT_SYMBOL_GPL(iommu_group_add_device); 935 936 /** 937 * iommu_group_remove_device - remove a device from it's current group 938 * @dev: device to be removed 939 * 940 * This function is called by an iommu driver to remove the device from 941 * it's current group. This decrements the iommu group reference count. 942 */ 943 void iommu_group_remove_device(struct device *dev) 944 { 945 struct iommu_group *group = dev->iommu_group; 946 struct group_device *tmp_device, *device = NULL; 947 948 if (!group) 949 return; 950 951 dev_info(dev, "Removing from iommu group %d\n", group->id); 952 953 /* Pre-notify listeners that a device is being removed. */ 954 blocking_notifier_call_chain(&group->notifier, 955 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); 956 957 mutex_lock(&group->mutex); 958 list_for_each_entry(tmp_device, &group->devices, list) { 959 if (tmp_device->dev == dev) { 960 device = tmp_device; 961 list_del(&device->list); 962 break; 963 } 964 } 965 mutex_unlock(&group->mutex); 966 967 if (!device) 968 return; 969 970 sysfs_remove_link(group->devices_kobj, device->name); 971 sysfs_remove_link(&dev->kobj, "iommu_group"); 972 973 trace_remove_device_from_group(group->id, dev); 974 975 kfree(device->name); 976 kfree(device); 977 dev->iommu_group = NULL; 978 kobject_put(group->devices_kobj); 979 } 980 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 981 982 static int iommu_group_device_count(struct iommu_group *group) 983 { 984 struct group_device *entry; 985 int ret = 0; 986 987 list_for_each_entry(entry, &group->devices, list) 988 ret++; 989 990 return ret; 991 } 992 993 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 994 int (*fn)(struct device *, void *)) 995 { 996 struct group_device *device; 997 int ret = 0; 998 999 list_for_each_entry(device, &group->devices, list) { 1000 ret = fn(device->dev, data); 1001 if (ret) 1002 break; 1003 } 1004 return ret; 1005 } 1006 1007 /** 1008 * iommu_group_for_each_dev - iterate over each device in the group 1009 * @group: the group 1010 * @data: caller opaque data to be passed to callback function 1011 * @fn: caller supplied callback function 1012 * 1013 * This function is called by group users to iterate over group devices. 1014 * Callers should hold a reference count to the group during callback. 1015 * The group->mutex is held across callbacks, which will block calls to 1016 * iommu_group_add/remove_device. 1017 */ 1018 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 1019 int (*fn)(struct device *, void *)) 1020 { 1021 int ret; 1022 1023 mutex_lock(&group->mutex); 1024 ret = __iommu_group_for_each_dev(group, data, fn); 1025 mutex_unlock(&group->mutex); 1026 1027 return ret; 1028 } 1029 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 1030 1031 /** 1032 * iommu_group_get - Return the group for a device and increment reference 1033 * @dev: get the group that this device belongs to 1034 * 1035 * This function is called by iommu drivers and users to get the group 1036 * for the specified device. If found, the group is returned and the group 1037 * reference in incremented, else NULL. 1038 */ 1039 struct iommu_group *iommu_group_get(struct device *dev) 1040 { 1041 struct iommu_group *group = dev->iommu_group; 1042 1043 if (group) 1044 kobject_get(group->devices_kobj); 1045 1046 return group; 1047 } 1048 EXPORT_SYMBOL_GPL(iommu_group_get); 1049 1050 /** 1051 * iommu_group_ref_get - Increment reference on a group 1052 * @group: the group to use, must not be NULL 1053 * 1054 * This function is called by iommu drivers to take additional references on an 1055 * existing group. Returns the given group for convenience. 1056 */ 1057 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1058 { 1059 kobject_get(group->devices_kobj); 1060 return group; 1061 } 1062 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1063 1064 /** 1065 * iommu_group_put - Decrement group reference 1066 * @group: the group to use 1067 * 1068 * This function is called by iommu drivers and users to release the 1069 * iommu group. Once the reference count is zero, the group is released. 1070 */ 1071 void iommu_group_put(struct iommu_group *group) 1072 { 1073 if (group) 1074 kobject_put(group->devices_kobj); 1075 } 1076 EXPORT_SYMBOL_GPL(iommu_group_put); 1077 1078 /** 1079 * iommu_group_register_notifier - Register a notifier for group changes 1080 * @group: the group to watch 1081 * @nb: notifier block to signal 1082 * 1083 * This function allows iommu group users to track changes in a group. 1084 * See include/linux/iommu.h for actions sent via this notifier. Caller 1085 * should hold a reference to the group throughout notifier registration. 1086 */ 1087 int iommu_group_register_notifier(struct iommu_group *group, 1088 struct notifier_block *nb) 1089 { 1090 return blocking_notifier_chain_register(&group->notifier, nb); 1091 } 1092 EXPORT_SYMBOL_GPL(iommu_group_register_notifier); 1093 1094 /** 1095 * iommu_group_unregister_notifier - Unregister a notifier 1096 * @group: the group to watch 1097 * @nb: notifier block to signal 1098 * 1099 * Unregister a previously registered group notifier block. 1100 */ 1101 int iommu_group_unregister_notifier(struct iommu_group *group, 1102 struct notifier_block *nb) 1103 { 1104 return blocking_notifier_chain_unregister(&group->notifier, nb); 1105 } 1106 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); 1107 1108 /** 1109 * iommu_register_device_fault_handler() - Register a device fault handler 1110 * @dev: the device 1111 * @handler: the fault handler 1112 * @data: private data passed as argument to the handler 1113 * 1114 * When an IOMMU fault event is received, this handler gets called with the 1115 * fault event and data as argument. The handler should return 0 on success. If 1116 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1117 * complete the fault by calling iommu_page_response() with one of the following 1118 * response code: 1119 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1120 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1121 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1122 * page faults if possible. 1123 * 1124 * Return 0 if the fault handler was installed successfully, or an error. 1125 */ 1126 int iommu_register_device_fault_handler(struct device *dev, 1127 iommu_dev_fault_handler_t handler, 1128 void *data) 1129 { 1130 struct dev_iommu *param = dev->iommu; 1131 int ret = 0; 1132 1133 if (!param) 1134 return -EINVAL; 1135 1136 mutex_lock(¶m->lock); 1137 /* Only allow one fault handler registered for each device */ 1138 if (param->fault_param) { 1139 ret = -EBUSY; 1140 goto done_unlock; 1141 } 1142 1143 get_device(dev); 1144 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1145 if (!param->fault_param) { 1146 put_device(dev); 1147 ret = -ENOMEM; 1148 goto done_unlock; 1149 } 1150 param->fault_param->handler = handler; 1151 param->fault_param->data = data; 1152 mutex_init(¶m->fault_param->lock); 1153 INIT_LIST_HEAD(¶m->fault_param->faults); 1154 1155 done_unlock: 1156 mutex_unlock(¶m->lock); 1157 1158 return ret; 1159 } 1160 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1161 1162 /** 1163 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1164 * @dev: the device 1165 * 1166 * Remove the device fault handler installed with 1167 * iommu_register_device_fault_handler(). 1168 * 1169 * Return 0 on success, or an error. 1170 */ 1171 int iommu_unregister_device_fault_handler(struct device *dev) 1172 { 1173 struct dev_iommu *param = dev->iommu; 1174 int ret = 0; 1175 1176 if (!param) 1177 return -EINVAL; 1178 1179 mutex_lock(¶m->lock); 1180 1181 if (!param->fault_param) 1182 goto unlock; 1183 1184 /* we cannot unregister handler if there are pending faults */ 1185 if (!list_empty(¶m->fault_param->faults)) { 1186 ret = -EBUSY; 1187 goto unlock; 1188 } 1189 1190 kfree(param->fault_param); 1191 param->fault_param = NULL; 1192 put_device(dev); 1193 unlock: 1194 mutex_unlock(¶m->lock); 1195 1196 return ret; 1197 } 1198 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1199 1200 /** 1201 * iommu_report_device_fault() - Report fault event to device driver 1202 * @dev: the device 1203 * @evt: fault event data 1204 * 1205 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1206 * handler. When this function fails and the fault is recoverable, it is the 1207 * caller's responsibility to complete the fault. 1208 * 1209 * Return 0 on success, or an error. 1210 */ 1211 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1212 { 1213 struct dev_iommu *param = dev->iommu; 1214 struct iommu_fault_event *evt_pending = NULL; 1215 struct iommu_fault_param *fparam; 1216 int ret = 0; 1217 1218 if (!param || !evt) 1219 return -EINVAL; 1220 1221 /* we only report device fault if there is a handler registered */ 1222 mutex_lock(¶m->lock); 1223 fparam = param->fault_param; 1224 if (!fparam || !fparam->handler) { 1225 ret = -EINVAL; 1226 goto done_unlock; 1227 } 1228 1229 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1230 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1231 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1232 GFP_KERNEL); 1233 if (!evt_pending) { 1234 ret = -ENOMEM; 1235 goto done_unlock; 1236 } 1237 mutex_lock(&fparam->lock); 1238 list_add_tail(&evt_pending->list, &fparam->faults); 1239 mutex_unlock(&fparam->lock); 1240 } 1241 1242 ret = fparam->handler(&evt->fault, fparam->data); 1243 if (ret && evt_pending) { 1244 mutex_lock(&fparam->lock); 1245 list_del(&evt_pending->list); 1246 mutex_unlock(&fparam->lock); 1247 kfree(evt_pending); 1248 } 1249 done_unlock: 1250 mutex_unlock(¶m->lock); 1251 return ret; 1252 } 1253 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1254 1255 int iommu_page_response(struct device *dev, 1256 struct iommu_page_response *msg) 1257 { 1258 bool needs_pasid; 1259 int ret = -EINVAL; 1260 struct iommu_fault_event *evt; 1261 struct iommu_fault_page_request *prm; 1262 struct dev_iommu *param = dev->iommu; 1263 const struct iommu_ops *ops = dev_iommu_ops(dev); 1264 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1265 1266 if (!ops->page_response) 1267 return -ENODEV; 1268 1269 if (!param || !param->fault_param) 1270 return -EINVAL; 1271 1272 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1273 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1274 return -EINVAL; 1275 1276 /* Only send response if there is a fault report pending */ 1277 mutex_lock(¶m->fault_param->lock); 1278 if (list_empty(¶m->fault_param->faults)) { 1279 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1280 goto done_unlock; 1281 } 1282 /* 1283 * Check if we have a matching page request pending to respond, 1284 * otherwise return -EINVAL 1285 */ 1286 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1287 prm = &evt->fault.prm; 1288 if (prm->grpid != msg->grpid) 1289 continue; 1290 1291 /* 1292 * If the PASID is required, the corresponding request is 1293 * matched using the group ID, the PASID valid bit and the PASID 1294 * value. Otherwise only the group ID matches request and 1295 * response. 1296 */ 1297 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1298 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1299 continue; 1300 1301 if (!needs_pasid && has_pasid) { 1302 /* No big deal, just clear it. */ 1303 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1304 msg->pasid = 0; 1305 } 1306 1307 ret = ops->page_response(dev, evt, msg); 1308 list_del(&evt->list); 1309 kfree(evt); 1310 break; 1311 } 1312 1313 done_unlock: 1314 mutex_unlock(¶m->fault_param->lock); 1315 return ret; 1316 } 1317 EXPORT_SYMBOL_GPL(iommu_page_response); 1318 1319 /** 1320 * iommu_group_id - Return ID for a group 1321 * @group: the group to ID 1322 * 1323 * Return the unique ID for the group matching the sysfs group number. 1324 */ 1325 int iommu_group_id(struct iommu_group *group) 1326 { 1327 return group->id; 1328 } 1329 EXPORT_SYMBOL_GPL(iommu_group_id); 1330 1331 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1332 unsigned long *devfns); 1333 1334 /* 1335 * To consider a PCI device isolated, we require ACS to support Source 1336 * Validation, Request Redirection, Completer Redirection, and Upstream 1337 * Forwarding. This effectively means that devices cannot spoof their 1338 * requester ID, requests and completions cannot be redirected, and all 1339 * transactions are forwarded upstream, even as it passes through a 1340 * bridge where the target device is downstream. 1341 */ 1342 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1343 1344 /* 1345 * For multifunction devices which are not isolated from each other, find 1346 * all the other non-isolated functions and look for existing groups. For 1347 * each function, we also need to look for aliases to or from other devices 1348 * that may already have a group. 1349 */ 1350 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1351 unsigned long *devfns) 1352 { 1353 struct pci_dev *tmp = NULL; 1354 struct iommu_group *group; 1355 1356 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1357 return NULL; 1358 1359 for_each_pci_dev(tmp) { 1360 if (tmp == pdev || tmp->bus != pdev->bus || 1361 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1362 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1363 continue; 1364 1365 group = get_pci_alias_group(tmp, devfns); 1366 if (group) { 1367 pci_dev_put(tmp); 1368 return group; 1369 } 1370 } 1371 1372 return NULL; 1373 } 1374 1375 /* 1376 * Look for aliases to or from the given device for existing groups. DMA 1377 * aliases are only supported on the same bus, therefore the search 1378 * space is quite small (especially since we're really only looking at pcie 1379 * device, and therefore only expect multiple slots on the root complex or 1380 * downstream switch ports). It's conceivable though that a pair of 1381 * multifunction devices could have aliases between them that would cause a 1382 * loop. To prevent this, we use a bitmap to track where we've been. 1383 */ 1384 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1385 unsigned long *devfns) 1386 { 1387 struct pci_dev *tmp = NULL; 1388 struct iommu_group *group; 1389 1390 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1391 return NULL; 1392 1393 group = iommu_group_get(&pdev->dev); 1394 if (group) 1395 return group; 1396 1397 for_each_pci_dev(tmp) { 1398 if (tmp == pdev || tmp->bus != pdev->bus) 1399 continue; 1400 1401 /* We alias them or they alias us */ 1402 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1403 group = get_pci_alias_group(tmp, devfns); 1404 if (group) { 1405 pci_dev_put(tmp); 1406 return group; 1407 } 1408 1409 group = get_pci_function_alias_group(tmp, devfns); 1410 if (group) { 1411 pci_dev_put(tmp); 1412 return group; 1413 } 1414 } 1415 } 1416 1417 return NULL; 1418 } 1419 1420 struct group_for_pci_data { 1421 struct pci_dev *pdev; 1422 struct iommu_group *group; 1423 }; 1424 1425 /* 1426 * DMA alias iterator callback, return the last seen device. Stop and return 1427 * the IOMMU group if we find one along the way. 1428 */ 1429 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1430 { 1431 struct group_for_pci_data *data = opaque; 1432 1433 data->pdev = pdev; 1434 data->group = iommu_group_get(&pdev->dev); 1435 1436 return data->group != NULL; 1437 } 1438 1439 /* 1440 * Generic device_group call-back function. It just allocates one 1441 * iommu-group per device. 1442 */ 1443 struct iommu_group *generic_device_group(struct device *dev) 1444 { 1445 return iommu_group_alloc(); 1446 } 1447 EXPORT_SYMBOL_GPL(generic_device_group); 1448 1449 /* 1450 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1451 * to find or create an IOMMU group for a device. 1452 */ 1453 struct iommu_group *pci_device_group(struct device *dev) 1454 { 1455 struct pci_dev *pdev = to_pci_dev(dev); 1456 struct group_for_pci_data data; 1457 struct pci_bus *bus; 1458 struct iommu_group *group = NULL; 1459 u64 devfns[4] = { 0 }; 1460 1461 if (WARN_ON(!dev_is_pci(dev))) 1462 return ERR_PTR(-EINVAL); 1463 1464 /* 1465 * Find the upstream DMA alias for the device. A device must not 1466 * be aliased due to topology in order to have its own IOMMU group. 1467 * If we find an alias along the way that already belongs to a 1468 * group, use it. 1469 */ 1470 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1471 return data.group; 1472 1473 pdev = data.pdev; 1474 1475 /* 1476 * Continue upstream from the point of minimum IOMMU granularity 1477 * due to aliases to the point where devices are protected from 1478 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1479 * group, use it. 1480 */ 1481 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1482 if (!bus->self) 1483 continue; 1484 1485 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1486 break; 1487 1488 pdev = bus->self; 1489 1490 group = iommu_group_get(&pdev->dev); 1491 if (group) 1492 return group; 1493 } 1494 1495 /* 1496 * Look for existing groups on device aliases. If we alias another 1497 * device or another device aliases us, use the same group. 1498 */ 1499 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1500 if (group) 1501 return group; 1502 1503 /* 1504 * Look for existing groups on non-isolated functions on the same 1505 * slot and aliases of those funcions, if any. No need to clear 1506 * the search bitmap, the tested devfns are still valid. 1507 */ 1508 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1509 if (group) 1510 return group; 1511 1512 /* No shared group found, allocate new */ 1513 return iommu_group_alloc(); 1514 } 1515 EXPORT_SYMBOL_GPL(pci_device_group); 1516 1517 /* Get the IOMMU group for device on fsl-mc bus */ 1518 struct iommu_group *fsl_mc_device_group(struct device *dev) 1519 { 1520 struct device *cont_dev = fsl_mc_cont_dev(dev); 1521 struct iommu_group *group; 1522 1523 group = iommu_group_get(cont_dev); 1524 if (!group) 1525 group = iommu_group_alloc(); 1526 return group; 1527 } 1528 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1529 1530 static int iommu_get_def_domain_type(struct device *dev) 1531 { 1532 const struct iommu_ops *ops = dev_iommu_ops(dev); 1533 1534 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 1535 return IOMMU_DOMAIN_DMA; 1536 1537 if (ops->def_domain_type) 1538 return ops->def_domain_type(dev); 1539 1540 return 0; 1541 } 1542 1543 static int iommu_group_alloc_default_domain(struct bus_type *bus, 1544 struct iommu_group *group, 1545 unsigned int type) 1546 { 1547 struct iommu_domain *dom; 1548 1549 dom = __iommu_domain_alloc(bus, type); 1550 if (!dom && type != IOMMU_DOMAIN_DMA) { 1551 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA); 1552 if (dom) 1553 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1554 type, group->name); 1555 } 1556 1557 if (!dom) 1558 return -ENOMEM; 1559 1560 group->default_domain = dom; 1561 if (!group->domain) 1562 group->domain = dom; 1563 return 0; 1564 } 1565 1566 static int iommu_alloc_default_domain(struct iommu_group *group, 1567 struct device *dev) 1568 { 1569 unsigned int type; 1570 1571 if (group->default_domain) 1572 return 0; 1573 1574 type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type; 1575 1576 return iommu_group_alloc_default_domain(dev->bus, group, type); 1577 } 1578 1579 /** 1580 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1581 * @dev: target device 1582 * 1583 * This function is intended to be called by IOMMU drivers and extended to 1584 * support common, bus-defined algorithms when determining or creating the 1585 * IOMMU group for a device. On success, the caller will hold a reference 1586 * to the returned IOMMU group, which will already include the provided 1587 * device. The reference should be released with iommu_group_put(). 1588 */ 1589 static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1590 { 1591 const struct iommu_ops *ops = dev_iommu_ops(dev); 1592 struct iommu_group *group; 1593 int ret; 1594 1595 group = iommu_group_get(dev); 1596 if (group) 1597 return group; 1598 1599 group = ops->device_group(dev); 1600 if (WARN_ON_ONCE(group == NULL)) 1601 return ERR_PTR(-EINVAL); 1602 1603 if (IS_ERR(group)) 1604 return group; 1605 1606 ret = iommu_group_add_device(group, dev); 1607 if (ret) 1608 goto out_put_group; 1609 1610 return group; 1611 1612 out_put_group: 1613 iommu_group_put(group); 1614 1615 return ERR_PTR(ret); 1616 } 1617 1618 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1619 { 1620 return group->default_domain; 1621 } 1622 1623 static int probe_iommu_group(struct device *dev, void *data) 1624 { 1625 struct list_head *group_list = data; 1626 struct iommu_group *group; 1627 int ret; 1628 1629 /* Device is probed already if in a group */ 1630 group = iommu_group_get(dev); 1631 if (group) { 1632 iommu_group_put(group); 1633 return 0; 1634 } 1635 1636 ret = __iommu_probe_device(dev, group_list); 1637 if (ret == -ENODEV) 1638 ret = 0; 1639 1640 return ret; 1641 } 1642 1643 static int remove_iommu_group(struct device *dev, void *data) 1644 { 1645 iommu_release_device(dev); 1646 1647 return 0; 1648 } 1649 1650 static int iommu_bus_notifier(struct notifier_block *nb, 1651 unsigned long action, void *data) 1652 { 1653 unsigned long group_action = 0; 1654 struct device *dev = data; 1655 struct iommu_group *group; 1656 1657 /* 1658 * ADD/DEL call into iommu driver ops if provided, which may 1659 * result in ADD/DEL notifiers to group->notifier 1660 */ 1661 if (action == BUS_NOTIFY_ADD_DEVICE) { 1662 int ret; 1663 1664 ret = iommu_probe_device(dev); 1665 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1666 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1667 iommu_release_device(dev); 1668 return NOTIFY_OK; 1669 } 1670 1671 /* 1672 * Remaining BUS_NOTIFYs get filtered and republished to the 1673 * group, if anyone is listening 1674 */ 1675 group = iommu_group_get(dev); 1676 if (!group) 1677 return 0; 1678 1679 switch (action) { 1680 case BUS_NOTIFY_BIND_DRIVER: 1681 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; 1682 break; 1683 case BUS_NOTIFY_BOUND_DRIVER: 1684 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; 1685 break; 1686 case BUS_NOTIFY_UNBIND_DRIVER: 1687 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; 1688 break; 1689 case BUS_NOTIFY_UNBOUND_DRIVER: 1690 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; 1691 break; 1692 } 1693 1694 if (group_action) 1695 blocking_notifier_call_chain(&group->notifier, 1696 group_action, dev); 1697 1698 iommu_group_put(group); 1699 return 0; 1700 } 1701 1702 struct __group_domain_type { 1703 struct device *dev; 1704 unsigned int type; 1705 }; 1706 1707 static int probe_get_default_domain_type(struct device *dev, void *data) 1708 { 1709 struct __group_domain_type *gtype = data; 1710 unsigned int type = iommu_get_def_domain_type(dev); 1711 1712 if (type) { 1713 if (gtype->type && gtype->type != type) { 1714 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", 1715 iommu_domain_type_str(type), 1716 dev_name(gtype->dev), 1717 iommu_domain_type_str(gtype->type)); 1718 gtype->type = 0; 1719 } 1720 1721 if (!gtype->dev) { 1722 gtype->dev = dev; 1723 gtype->type = type; 1724 } 1725 } 1726 1727 return 0; 1728 } 1729 1730 static void probe_alloc_default_domain(struct bus_type *bus, 1731 struct iommu_group *group) 1732 { 1733 struct __group_domain_type gtype; 1734 1735 memset(>ype, 0, sizeof(gtype)); 1736 1737 /* Ask for default domain requirements of all devices in the group */ 1738 __iommu_group_for_each_dev(group, >ype, 1739 probe_get_default_domain_type); 1740 1741 if (!gtype.type) 1742 gtype.type = iommu_def_domain_type; 1743 1744 iommu_group_alloc_default_domain(bus, group, gtype.type); 1745 1746 } 1747 1748 static int iommu_group_do_dma_attach(struct device *dev, void *data) 1749 { 1750 struct iommu_domain *domain = data; 1751 int ret = 0; 1752 1753 if (!iommu_is_attach_deferred(dev)) 1754 ret = __iommu_attach_device(domain, dev); 1755 1756 return ret; 1757 } 1758 1759 static int __iommu_group_dma_attach(struct iommu_group *group) 1760 { 1761 return __iommu_group_for_each_dev(group, group->default_domain, 1762 iommu_group_do_dma_attach); 1763 } 1764 1765 static int iommu_group_do_probe_finalize(struct device *dev, void *data) 1766 { 1767 const struct iommu_ops *ops = dev_iommu_ops(dev); 1768 1769 if (ops->probe_finalize) 1770 ops->probe_finalize(dev); 1771 1772 return 0; 1773 } 1774 1775 static void __iommu_group_dma_finalize(struct iommu_group *group) 1776 { 1777 __iommu_group_for_each_dev(group, group->default_domain, 1778 iommu_group_do_probe_finalize); 1779 } 1780 1781 static int iommu_do_create_direct_mappings(struct device *dev, void *data) 1782 { 1783 struct iommu_group *group = data; 1784 1785 iommu_create_device_direct_mappings(group, dev); 1786 1787 return 0; 1788 } 1789 1790 static int iommu_group_create_direct_mappings(struct iommu_group *group) 1791 { 1792 return __iommu_group_for_each_dev(group, group, 1793 iommu_do_create_direct_mappings); 1794 } 1795 1796 int bus_iommu_probe(struct bus_type *bus) 1797 { 1798 struct iommu_group *group, *next; 1799 LIST_HEAD(group_list); 1800 int ret; 1801 1802 /* 1803 * This code-path does not allocate the default domain when 1804 * creating the iommu group, so do it after the groups are 1805 * created. 1806 */ 1807 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1808 if (ret) 1809 return ret; 1810 1811 list_for_each_entry_safe(group, next, &group_list, entry) { 1812 /* Remove item from the list */ 1813 list_del_init(&group->entry); 1814 1815 mutex_lock(&group->mutex); 1816 1817 /* Try to allocate default domain */ 1818 probe_alloc_default_domain(bus, group); 1819 1820 if (!group->default_domain) { 1821 mutex_unlock(&group->mutex); 1822 continue; 1823 } 1824 1825 iommu_group_create_direct_mappings(group); 1826 1827 ret = __iommu_group_dma_attach(group); 1828 1829 mutex_unlock(&group->mutex); 1830 1831 if (ret) 1832 break; 1833 1834 __iommu_group_dma_finalize(group); 1835 } 1836 1837 return ret; 1838 } 1839 1840 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) 1841 { 1842 struct notifier_block *nb; 1843 int err; 1844 1845 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 1846 if (!nb) 1847 return -ENOMEM; 1848 1849 nb->notifier_call = iommu_bus_notifier; 1850 1851 err = bus_register_notifier(bus, nb); 1852 if (err) 1853 goto out_free; 1854 1855 err = bus_iommu_probe(bus); 1856 if (err) 1857 goto out_err; 1858 1859 1860 return 0; 1861 1862 out_err: 1863 /* Clean up */ 1864 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group); 1865 bus_unregister_notifier(bus, nb); 1866 1867 out_free: 1868 kfree(nb); 1869 1870 return err; 1871 } 1872 1873 /** 1874 * bus_set_iommu - set iommu-callbacks for the bus 1875 * @bus: bus. 1876 * @ops: the callbacks provided by the iommu-driver 1877 * 1878 * This function is called by an iommu driver to set the iommu methods 1879 * used for a particular bus. Drivers for devices on that bus can use 1880 * the iommu-api after these ops are registered. 1881 * This special function is needed because IOMMUs are usually devices on 1882 * the bus itself, so the iommu drivers are not initialized when the bus 1883 * is set up. With this function the iommu-driver can set the iommu-ops 1884 * afterwards. 1885 */ 1886 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) 1887 { 1888 int err; 1889 1890 if (ops == NULL) { 1891 bus->iommu_ops = NULL; 1892 return 0; 1893 } 1894 1895 if (bus->iommu_ops != NULL) 1896 return -EBUSY; 1897 1898 bus->iommu_ops = ops; 1899 1900 /* Do IOMMU specific setup for this bus-type */ 1901 err = iommu_bus_init(bus, ops); 1902 if (err) 1903 bus->iommu_ops = NULL; 1904 1905 return err; 1906 } 1907 EXPORT_SYMBOL_GPL(bus_set_iommu); 1908 1909 bool iommu_present(struct bus_type *bus) 1910 { 1911 return bus->iommu_ops != NULL; 1912 } 1913 EXPORT_SYMBOL_GPL(iommu_present); 1914 1915 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) 1916 { 1917 if (!bus->iommu_ops || !bus->iommu_ops->capable) 1918 return false; 1919 1920 return bus->iommu_ops->capable(cap); 1921 } 1922 EXPORT_SYMBOL_GPL(iommu_capable); 1923 1924 /** 1925 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1926 * @domain: iommu domain 1927 * @handler: fault handler 1928 * @token: user data, will be passed back to the fault handler 1929 * 1930 * This function should be used by IOMMU users which want to be notified 1931 * whenever an IOMMU fault happens. 1932 * 1933 * The fault handler itself should return 0 on success, and an appropriate 1934 * error code otherwise. 1935 */ 1936 void iommu_set_fault_handler(struct iommu_domain *domain, 1937 iommu_fault_handler_t handler, 1938 void *token) 1939 { 1940 BUG_ON(!domain); 1941 1942 domain->handler = handler; 1943 domain->handler_token = token; 1944 } 1945 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1946 1947 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 1948 unsigned type) 1949 { 1950 struct iommu_domain *domain; 1951 1952 if (bus == NULL || bus->iommu_ops == NULL) 1953 return NULL; 1954 1955 domain = bus->iommu_ops->domain_alloc(type); 1956 if (!domain) 1957 return NULL; 1958 1959 domain->type = type; 1960 /* Assume all sizes by default; the driver may override this later */ 1961 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1962 if (!domain->ops) 1963 domain->ops = bus->iommu_ops->default_domain_ops; 1964 1965 if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { 1966 iommu_domain_free(domain); 1967 domain = NULL; 1968 } 1969 return domain; 1970 } 1971 1972 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 1973 { 1974 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1975 } 1976 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1977 1978 void iommu_domain_free(struct iommu_domain *domain) 1979 { 1980 iommu_put_dma_cookie(domain); 1981 domain->ops->free(domain); 1982 } 1983 EXPORT_SYMBOL_GPL(iommu_domain_free); 1984 1985 static int __iommu_attach_device(struct iommu_domain *domain, 1986 struct device *dev) 1987 { 1988 int ret; 1989 1990 if (unlikely(domain->ops->attach_dev == NULL)) 1991 return -ENODEV; 1992 1993 ret = domain->ops->attach_dev(domain, dev); 1994 if (!ret) 1995 trace_attach_device_to_domain(dev); 1996 return ret; 1997 } 1998 1999 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 2000 { 2001 struct iommu_group *group; 2002 int ret; 2003 2004 group = iommu_group_get(dev); 2005 if (!group) 2006 return -ENODEV; 2007 2008 /* 2009 * Lock the group to make sure the device-count doesn't 2010 * change while we are attaching 2011 */ 2012 mutex_lock(&group->mutex); 2013 ret = -EINVAL; 2014 if (iommu_group_device_count(group) != 1) 2015 goto out_unlock; 2016 2017 ret = __iommu_attach_group(domain, group); 2018 2019 out_unlock: 2020 mutex_unlock(&group->mutex); 2021 iommu_group_put(group); 2022 2023 return ret; 2024 } 2025 EXPORT_SYMBOL_GPL(iommu_attach_device); 2026 2027 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2028 { 2029 if (iommu_is_attach_deferred(dev)) 2030 return __iommu_attach_device(domain, dev); 2031 2032 return 0; 2033 } 2034 2035 static void __iommu_detach_device(struct iommu_domain *domain, 2036 struct device *dev) 2037 { 2038 if (iommu_is_attach_deferred(dev)) 2039 return; 2040 2041 if (unlikely(domain->ops->detach_dev == NULL)) 2042 return; 2043 2044 domain->ops->detach_dev(domain, dev); 2045 trace_detach_device_from_domain(dev); 2046 } 2047 2048 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2049 { 2050 struct iommu_group *group; 2051 2052 group = iommu_group_get(dev); 2053 if (!group) 2054 return; 2055 2056 mutex_lock(&group->mutex); 2057 if (iommu_group_device_count(group) != 1) { 2058 WARN_ON(1); 2059 goto out_unlock; 2060 } 2061 2062 __iommu_detach_group(domain, group); 2063 2064 out_unlock: 2065 mutex_unlock(&group->mutex); 2066 iommu_group_put(group); 2067 } 2068 EXPORT_SYMBOL_GPL(iommu_detach_device); 2069 2070 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2071 { 2072 struct iommu_domain *domain; 2073 struct iommu_group *group; 2074 2075 group = iommu_group_get(dev); 2076 if (!group) 2077 return NULL; 2078 2079 domain = group->domain; 2080 2081 iommu_group_put(group); 2082 2083 return domain; 2084 } 2085 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2086 2087 /* 2088 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2089 * guarantees that the group and its default domain are valid and correct. 2090 */ 2091 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2092 { 2093 return dev->iommu_group->default_domain; 2094 } 2095 2096 /* 2097 * IOMMU groups are really the natural working unit of the IOMMU, but 2098 * the IOMMU API works on domains and devices. Bridge that gap by 2099 * iterating over the devices in a group. Ideally we'd have a single 2100 * device which represents the requestor ID of the group, but we also 2101 * allow IOMMU drivers to create policy defined minimum sets, where 2102 * the physical hardware may be able to distiguish members, but we 2103 * wish to group them at a higher level (ex. untrusted multi-function 2104 * PCI devices). Thus we attach each device. 2105 */ 2106 static int iommu_group_do_attach_device(struct device *dev, void *data) 2107 { 2108 struct iommu_domain *domain = data; 2109 2110 return __iommu_attach_device(domain, dev); 2111 } 2112 2113 static int __iommu_attach_group(struct iommu_domain *domain, 2114 struct iommu_group *group) 2115 { 2116 int ret; 2117 2118 if (group->domain && group->domain != group->default_domain) 2119 return -EBUSY; 2120 2121 ret = __iommu_group_for_each_dev(group, domain, 2122 iommu_group_do_attach_device); 2123 if (ret == 0) 2124 group->domain = domain; 2125 2126 return ret; 2127 } 2128 2129 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2130 { 2131 int ret; 2132 2133 mutex_lock(&group->mutex); 2134 ret = __iommu_attach_group(domain, group); 2135 mutex_unlock(&group->mutex); 2136 2137 return ret; 2138 } 2139 EXPORT_SYMBOL_GPL(iommu_attach_group); 2140 2141 static int iommu_group_do_detach_device(struct device *dev, void *data) 2142 { 2143 struct iommu_domain *domain = data; 2144 2145 __iommu_detach_device(domain, dev); 2146 2147 return 0; 2148 } 2149 2150 static void __iommu_detach_group(struct iommu_domain *domain, 2151 struct iommu_group *group) 2152 { 2153 int ret; 2154 2155 /* 2156 * If the group has been claimed already, do not re-attach the default 2157 * domain. 2158 */ 2159 if (!group->default_domain || group->owner) { 2160 __iommu_group_for_each_dev(group, domain, 2161 iommu_group_do_detach_device); 2162 group->domain = NULL; 2163 return; 2164 } 2165 2166 if (group->domain == group->default_domain) 2167 return; 2168 2169 /* Detach by re-attaching to the default domain */ 2170 ret = __iommu_group_for_each_dev(group, group->default_domain, 2171 iommu_group_do_attach_device); 2172 if (ret != 0) 2173 WARN_ON(1); 2174 else 2175 group->domain = group->default_domain; 2176 } 2177 2178 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2179 { 2180 mutex_lock(&group->mutex); 2181 __iommu_detach_group(domain, group); 2182 mutex_unlock(&group->mutex); 2183 } 2184 EXPORT_SYMBOL_GPL(iommu_detach_group); 2185 2186 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2187 { 2188 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2189 return iova; 2190 2191 if (domain->type == IOMMU_DOMAIN_BLOCKED) 2192 return 0; 2193 2194 return domain->ops->iova_to_phys(domain, iova); 2195 } 2196 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2197 2198 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, 2199 phys_addr_t paddr, size_t size, size_t *count) 2200 { 2201 unsigned int pgsize_idx, pgsize_idx_next; 2202 unsigned long pgsizes; 2203 size_t offset, pgsize, pgsize_next; 2204 unsigned long addr_merge = paddr | iova; 2205 2206 /* Page sizes supported by the hardware and small enough for @size */ 2207 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); 2208 2209 /* Constrain the page sizes further based on the maximum alignment */ 2210 if (likely(addr_merge)) 2211 pgsizes &= GENMASK(__ffs(addr_merge), 0); 2212 2213 /* Make sure we have at least one suitable page size */ 2214 BUG_ON(!pgsizes); 2215 2216 /* Pick the biggest page size remaining */ 2217 pgsize_idx = __fls(pgsizes); 2218 pgsize = BIT(pgsize_idx); 2219 if (!count) 2220 return pgsize; 2221 2222 /* Find the next biggest support page size, if it exists */ 2223 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); 2224 if (!pgsizes) 2225 goto out_set_count; 2226 2227 pgsize_idx_next = __ffs(pgsizes); 2228 pgsize_next = BIT(pgsize_idx_next); 2229 2230 /* 2231 * There's no point trying a bigger page size unless the virtual 2232 * and physical addresses are similarly offset within the larger page. 2233 */ 2234 if ((iova ^ paddr) & (pgsize_next - 1)) 2235 goto out_set_count; 2236 2237 /* Calculate the offset to the next page size alignment boundary */ 2238 offset = pgsize_next - (addr_merge & (pgsize_next - 1)); 2239 2240 /* 2241 * If size is big enough to accommodate the larger page, reduce 2242 * the number of smaller pages. 2243 */ 2244 if (offset + pgsize_next <= size) 2245 size = offset; 2246 2247 out_set_count: 2248 *count = size >> pgsize_idx; 2249 return pgsize; 2250 } 2251 2252 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, 2253 phys_addr_t paddr, size_t size, int prot, 2254 gfp_t gfp, size_t *mapped) 2255 { 2256 const struct iommu_domain_ops *ops = domain->ops; 2257 size_t pgsize, count; 2258 int ret; 2259 2260 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); 2261 2262 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", 2263 iova, &paddr, pgsize, count); 2264 2265 if (ops->map_pages) { 2266 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, 2267 gfp, mapped); 2268 } else { 2269 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2270 *mapped = ret ? 0 : pgsize; 2271 } 2272 2273 return ret; 2274 } 2275 2276 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2277 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2278 { 2279 const struct iommu_domain_ops *ops = domain->ops; 2280 unsigned long orig_iova = iova; 2281 unsigned int min_pagesz; 2282 size_t orig_size = size; 2283 phys_addr_t orig_paddr = paddr; 2284 int ret = 0; 2285 2286 if (unlikely(!(ops->map || ops->map_pages) || 2287 domain->pgsize_bitmap == 0UL)) 2288 return -ENODEV; 2289 2290 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2291 return -EINVAL; 2292 2293 /* find out the minimum page size supported */ 2294 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2295 2296 /* 2297 * both the virtual address and the physical one, as well as 2298 * the size of the mapping, must be aligned (at least) to the 2299 * size of the smallest page supported by the hardware 2300 */ 2301 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2302 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2303 iova, &paddr, size, min_pagesz); 2304 return -EINVAL; 2305 } 2306 2307 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2308 2309 while (size) { 2310 size_t mapped = 0; 2311 2312 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, 2313 &mapped); 2314 /* 2315 * Some pages may have been mapped, even if an error occurred, 2316 * so we should account for those so they can be unmapped. 2317 */ 2318 size -= mapped; 2319 2320 if (ret) 2321 break; 2322 2323 iova += mapped; 2324 paddr += mapped; 2325 } 2326 2327 /* unroll mapping in case something went wrong */ 2328 if (ret) 2329 iommu_unmap(domain, orig_iova, orig_size - size); 2330 else 2331 trace_map(orig_iova, orig_paddr, orig_size); 2332 2333 return ret; 2334 } 2335 2336 static int _iommu_map(struct iommu_domain *domain, unsigned long iova, 2337 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2338 { 2339 const struct iommu_domain_ops *ops = domain->ops; 2340 int ret; 2341 2342 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2343 if (ret == 0 && ops->iotlb_sync_map) 2344 ops->iotlb_sync_map(domain, iova, size); 2345 2346 return ret; 2347 } 2348 2349 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2350 phys_addr_t paddr, size_t size, int prot) 2351 { 2352 might_sleep(); 2353 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); 2354 } 2355 EXPORT_SYMBOL_GPL(iommu_map); 2356 2357 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, 2358 phys_addr_t paddr, size_t size, int prot) 2359 { 2360 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); 2361 } 2362 EXPORT_SYMBOL_GPL(iommu_map_atomic); 2363 2364 static size_t __iommu_unmap_pages(struct iommu_domain *domain, 2365 unsigned long iova, size_t size, 2366 struct iommu_iotlb_gather *iotlb_gather) 2367 { 2368 const struct iommu_domain_ops *ops = domain->ops; 2369 size_t pgsize, count; 2370 2371 pgsize = iommu_pgsize(domain, iova, iova, size, &count); 2372 return ops->unmap_pages ? 2373 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) : 2374 ops->unmap(domain, iova, pgsize, iotlb_gather); 2375 } 2376 2377 static size_t __iommu_unmap(struct iommu_domain *domain, 2378 unsigned long iova, size_t size, 2379 struct iommu_iotlb_gather *iotlb_gather) 2380 { 2381 const struct iommu_domain_ops *ops = domain->ops; 2382 size_t unmapped_page, unmapped = 0; 2383 unsigned long orig_iova = iova; 2384 unsigned int min_pagesz; 2385 2386 if (unlikely(!(ops->unmap || ops->unmap_pages) || 2387 domain->pgsize_bitmap == 0UL)) 2388 return 0; 2389 2390 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2391 return 0; 2392 2393 /* find out the minimum page size supported */ 2394 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2395 2396 /* 2397 * The virtual address, as well as the size of the mapping, must be 2398 * aligned (at least) to the size of the smallest page supported 2399 * by the hardware 2400 */ 2401 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2402 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2403 iova, size, min_pagesz); 2404 return 0; 2405 } 2406 2407 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2408 2409 /* 2410 * Keep iterating until we either unmap 'size' bytes (or more) 2411 * or we hit an area that isn't mapped. 2412 */ 2413 while (unmapped < size) { 2414 unmapped_page = __iommu_unmap_pages(domain, iova, 2415 size - unmapped, 2416 iotlb_gather); 2417 if (!unmapped_page) 2418 break; 2419 2420 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2421 iova, unmapped_page); 2422 2423 iova += unmapped_page; 2424 unmapped += unmapped_page; 2425 } 2426 2427 trace_unmap(orig_iova, size, unmapped); 2428 return unmapped; 2429 } 2430 2431 size_t iommu_unmap(struct iommu_domain *domain, 2432 unsigned long iova, size_t size) 2433 { 2434 struct iommu_iotlb_gather iotlb_gather; 2435 size_t ret; 2436 2437 iommu_iotlb_gather_init(&iotlb_gather); 2438 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2439 iommu_iotlb_sync(domain, &iotlb_gather); 2440 2441 return ret; 2442 } 2443 EXPORT_SYMBOL_GPL(iommu_unmap); 2444 2445 size_t iommu_unmap_fast(struct iommu_domain *domain, 2446 unsigned long iova, size_t size, 2447 struct iommu_iotlb_gather *iotlb_gather) 2448 { 2449 return __iommu_unmap(domain, iova, size, iotlb_gather); 2450 } 2451 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2452 2453 static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2454 struct scatterlist *sg, unsigned int nents, int prot, 2455 gfp_t gfp) 2456 { 2457 const struct iommu_domain_ops *ops = domain->ops; 2458 size_t len = 0, mapped = 0; 2459 phys_addr_t start; 2460 unsigned int i = 0; 2461 int ret; 2462 2463 while (i <= nents) { 2464 phys_addr_t s_phys = sg_phys(sg); 2465 2466 if (len && s_phys != start + len) { 2467 ret = __iommu_map(domain, iova + mapped, start, 2468 len, prot, gfp); 2469 2470 if (ret) 2471 goto out_err; 2472 2473 mapped += len; 2474 len = 0; 2475 } 2476 2477 if (len) { 2478 len += sg->length; 2479 } else { 2480 len = sg->length; 2481 start = s_phys; 2482 } 2483 2484 if (++i < nents) 2485 sg = sg_next(sg); 2486 } 2487 2488 if (ops->iotlb_sync_map) 2489 ops->iotlb_sync_map(domain, iova, mapped); 2490 return mapped; 2491 2492 out_err: 2493 /* undo mappings already done */ 2494 iommu_unmap(domain, iova, mapped); 2495 2496 return ret; 2497 } 2498 2499 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2500 struct scatterlist *sg, unsigned int nents, int prot) 2501 { 2502 might_sleep(); 2503 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); 2504 } 2505 EXPORT_SYMBOL_GPL(iommu_map_sg); 2506 2507 ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, 2508 struct scatterlist *sg, unsigned int nents, int prot) 2509 { 2510 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); 2511 } 2512 2513 /** 2514 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2515 * @domain: the iommu domain where the fault has happened 2516 * @dev: the device where the fault has happened 2517 * @iova: the faulting address 2518 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2519 * 2520 * This function should be called by the low-level IOMMU implementations 2521 * whenever IOMMU faults happen, to allow high-level users, that are 2522 * interested in such events, to know about them. 2523 * 2524 * This event may be useful for several possible use cases: 2525 * - mere logging of the event 2526 * - dynamic TLB/PTE loading 2527 * - if restarting of the faulting device is required 2528 * 2529 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2530 * PTE/TLB loading will one day be supported, implementations will be able 2531 * to tell whether it succeeded or not according to this return value). 2532 * 2533 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2534 * (though fault handlers can also return -ENOSYS, in case they want to 2535 * elicit the default behavior of the IOMMU drivers). 2536 */ 2537 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2538 unsigned long iova, int flags) 2539 { 2540 int ret = -ENOSYS; 2541 2542 /* 2543 * if upper layers showed interest and installed a fault handler, 2544 * invoke it. 2545 */ 2546 if (domain->handler) 2547 ret = domain->handler(domain, dev, iova, flags, 2548 domain->handler_token); 2549 2550 trace_io_page_fault(dev, iova, flags); 2551 return ret; 2552 } 2553 EXPORT_SYMBOL_GPL(report_iommu_fault); 2554 2555 static int __init iommu_init(void) 2556 { 2557 iommu_group_kset = kset_create_and_add("iommu_groups", 2558 NULL, kernel_kobj); 2559 BUG_ON(!iommu_group_kset); 2560 2561 iommu_debugfs_setup(); 2562 2563 return 0; 2564 } 2565 core_initcall(iommu_init); 2566 2567 int iommu_enable_nesting(struct iommu_domain *domain) 2568 { 2569 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2570 return -EINVAL; 2571 if (!domain->ops->enable_nesting) 2572 return -EINVAL; 2573 return domain->ops->enable_nesting(domain); 2574 } 2575 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2576 2577 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2578 unsigned long quirk) 2579 { 2580 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2581 return -EINVAL; 2582 if (!domain->ops->set_pgtable_quirks) 2583 return -EINVAL; 2584 return domain->ops->set_pgtable_quirks(domain, quirk); 2585 } 2586 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2587 2588 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2589 { 2590 const struct iommu_ops *ops = dev_iommu_ops(dev); 2591 2592 if (ops->get_resv_regions) 2593 ops->get_resv_regions(dev, list); 2594 } 2595 2596 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2597 { 2598 const struct iommu_ops *ops = dev_iommu_ops(dev); 2599 2600 if (ops->put_resv_regions) 2601 ops->put_resv_regions(dev, list); 2602 } 2603 2604 /** 2605 * generic_iommu_put_resv_regions - Reserved region driver helper 2606 * @dev: device for which to free reserved regions 2607 * @list: reserved region list for device 2608 * 2609 * IOMMU drivers can use this to implement their .put_resv_regions() callback 2610 * for simple reservations. Memory allocated for each reserved region will be 2611 * freed. If an IOMMU driver allocates additional resources per region, it is 2612 * going to have to implement a custom callback. 2613 */ 2614 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list) 2615 { 2616 struct iommu_resv_region *entry, *next; 2617 2618 list_for_each_entry_safe(entry, next, list, list) 2619 kfree(entry); 2620 } 2621 EXPORT_SYMBOL(generic_iommu_put_resv_regions); 2622 2623 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2624 size_t length, int prot, 2625 enum iommu_resv_type type) 2626 { 2627 struct iommu_resv_region *region; 2628 2629 region = kzalloc(sizeof(*region), GFP_KERNEL); 2630 if (!region) 2631 return NULL; 2632 2633 INIT_LIST_HEAD(®ion->list); 2634 region->start = start; 2635 region->length = length; 2636 region->prot = prot; 2637 region->type = type; 2638 return region; 2639 } 2640 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2641 2642 void iommu_set_default_passthrough(bool cmd_line) 2643 { 2644 if (cmd_line) 2645 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2646 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2647 } 2648 2649 void iommu_set_default_translated(bool cmd_line) 2650 { 2651 if (cmd_line) 2652 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2653 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2654 } 2655 2656 bool iommu_default_passthrough(void) 2657 { 2658 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2659 } 2660 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2661 2662 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2663 { 2664 const struct iommu_ops *ops = NULL; 2665 struct iommu_device *iommu; 2666 2667 spin_lock(&iommu_device_lock); 2668 list_for_each_entry(iommu, &iommu_device_list, list) 2669 if (iommu->fwnode == fwnode) { 2670 ops = iommu->ops; 2671 break; 2672 } 2673 spin_unlock(&iommu_device_lock); 2674 return ops; 2675 } 2676 2677 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2678 const struct iommu_ops *ops) 2679 { 2680 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2681 2682 if (fwspec) 2683 return ops == fwspec->ops ? 0 : -EINVAL; 2684 2685 if (!dev_iommu_get(dev)) 2686 return -ENOMEM; 2687 2688 /* Preallocate for the overwhelmingly common case of 1 ID */ 2689 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2690 if (!fwspec) 2691 return -ENOMEM; 2692 2693 of_node_get(to_of_node(iommu_fwnode)); 2694 fwspec->iommu_fwnode = iommu_fwnode; 2695 fwspec->ops = ops; 2696 dev_iommu_fwspec_set(dev, fwspec); 2697 return 0; 2698 } 2699 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2700 2701 void iommu_fwspec_free(struct device *dev) 2702 { 2703 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2704 2705 if (fwspec) { 2706 fwnode_handle_put(fwspec->iommu_fwnode); 2707 kfree(fwspec); 2708 dev_iommu_fwspec_set(dev, NULL); 2709 } 2710 } 2711 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2712 2713 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2714 { 2715 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2716 int i, new_num; 2717 2718 if (!fwspec) 2719 return -EINVAL; 2720 2721 new_num = fwspec->num_ids + num_ids; 2722 if (new_num > 1) { 2723 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2724 GFP_KERNEL); 2725 if (!fwspec) 2726 return -ENOMEM; 2727 2728 dev_iommu_fwspec_set(dev, fwspec); 2729 } 2730 2731 for (i = 0; i < num_ids; i++) 2732 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2733 2734 fwspec->num_ids = new_num; 2735 return 0; 2736 } 2737 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2738 2739 /* 2740 * Per device IOMMU features. 2741 */ 2742 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2743 { 2744 if (dev->iommu && dev->iommu->iommu_dev) { 2745 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2746 2747 if (ops->dev_enable_feat) 2748 return ops->dev_enable_feat(dev, feat); 2749 } 2750 2751 return -ENODEV; 2752 } 2753 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2754 2755 /* 2756 * The device drivers should do the necessary cleanups before calling this. 2757 */ 2758 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2759 { 2760 if (dev->iommu && dev->iommu->iommu_dev) { 2761 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2762 2763 if (ops->dev_disable_feat) 2764 return ops->dev_disable_feat(dev, feat); 2765 } 2766 2767 return -EBUSY; 2768 } 2769 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2770 2771 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) 2772 { 2773 if (dev->iommu && dev->iommu->iommu_dev) { 2774 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2775 2776 if (ops->dev_feat_enabled) 2777 return ops->dev_feat_enabled(dev, feat); 2778 } 2779 2780 return false; 2781 } 2782 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled); 2783 2784 /** 2785 * iommu_sva_bind_device() - Bind a process address space to a device 2786 * @dev: the device 2787 * @mm: the mm to bind, caller must hold a reference to it 2788 * @drvdata: opaque data pointer to pass to bind callback 2789 * 2790 * Create a bond between device and address space, allowing the device to access 2791 * the mm using the returned PASID. If a bond already exists between @device and 2792 * @mm, it is returned and an additional reference is taken. Caller must call 2793 * iommu_sva_unbind_device() to release each reference. 2794 * 2795 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to 2796 * initialize the required SVA features. 2797 * 2798 * On error, returns an ERR_PTR value. 2799 */ 2800 struct iommu_sva * 2801 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) 2802 { 2803 struct iommu_group *group; 2804 struct iommu_sva *handle = ERR_PTR(-EINVAL); 2805 const struct iommu_ops *ops = dev_iommu_ops(dev); 2806 2807 if (!ops->sva_bind) 2808 return ERR_PTR(-ENODEV); 2809 2810 group = iommu_group_get(dev); 2811 if (!group) 2812 return ERR_PTR(-ENODEV); 2813 2814 /* Ensure device count and domain don't change while we're binding */ 2815 mutex_lock(&group->mutex); 2816 2817 /* 2818 * To keep things simple, SVA currently doesn't support IOMMU groups 2819 * with more than one device. Existing SVA-capable systems are not 2820 * affected by the problems that required IOMMU groups (lack of ACS 2821 * isolation, device ID aliasing and other hardware issues). 2822 */ 2823 if (iommu_group_device_count(group) != 1) 2824 goto out_unlock; 2825 2826 handle = ops->sva_bind(dev, mm, drvdata); 2827 2828 out_unlock: 2829 mutex_unlock(&group->mutex); 2830 iommu_group_put(group); 2831 2832 return handle; 2833 } 2834 EXPORT_SYMBOL_GPL(iommu_sva_bind_device); 2835 2836 /** 2837 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device 2838 * @handle: the handle returned by iommu_sva_bind_device() 2839 * 2840 * Put reference to a bond between device and address space. The device should 2841 * not be issuing any more transaction for this PASID. All outstanding page 2842 * requests for this PASID must have been flushed to the IOMMU. 2843 */ 2844 void iommu_sva_unbind_device(struct iommu_sva *handle) 2845 { 2846 struct iommu_group *group; 2847 struct device *dev = handle->dev; 2848 const struct iommu_ops *ops = dev_iommu_ops(dev); 2849 2850 if (!ops->sva_unbind) 2851 return; 2852 2853 group = iommu_group_get(dev); 2854 if (!group) 2855 return; 2856 2857 mutex_lock(&group->mutex); 2858 ops->sva_unbind(handle); 2859 mutex_unlock(&group->mutex); 2860 2861 iommu_group_put(group); 2862 } 2863 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); 2864 2865 u32 iommu_sva_get_pasid(struct iommu_sva *handle) 2866 { 2867 const struct iommu_ops *ops = dev_iommu_ops(handle->dev); 2868 2869 if (!ops->sva_get_pasid) 2870 return IOMMU_PASID_INVALID; 2871 2872 return ops->sva_get_pasid(handle); 2873 } 2874 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); 2875 2876 /* 2877 * Changes the default domain of an iommu group that has *only* one device 2878 * 2879 * @group: The group for which the default domain should be changed 2880 * @prev_dev: The device in the group (this is used to make sure that the device 2881 * hasn't changed after the caller has called this function) 2882 * @type: The type of the new default domain that gets associated with the group 2883 * 2884 * Returns 0 on success and error code on failure 2885 * 2886 * Note: 2887 * 1. Presently, this function is called only when user requests to change the 2888 * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type 2889 * Please take a closer look if intended to use for other purposes. 2890 */ 2891 static int iommu_change_dev_def_domain(struct iommu_group *group, 2892 struct device *prev_dev, int type) 2893 { 2894 struct iommu_domain *prev_dom; 2895 struct group_device *grp_dev; 2896 int ret, dev_def_dom; 2897 struct device *dev; 2898 2899 mutex_lock(&group->mutex); 2900 2901 if (group->default_domain != group->domain) { 2902 dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n"); 2903 ret = -EBUSY; 2904 goto out; 2905 } 2906 2907 /* 2908 * iommu group wasn't locked while acquiring device lock in 2909 * iommu_group_store_type(). So, make sure that the device count hasn't 2910 * changed while acquiring device lock. 2911 * 2912 * Changing default domain of an iommu group with two or more devices 2913 * isn't supported because there could be a potential deadlock. Consider 2914 * the following scenario. T1 is trying to acquire device locks of all 2915 * the devices in the group and before it could acquire all of them, 2916 * there could be another thread T2 (from different sub-system and use 2917 * case) that has already acquired some of the device locks and might be 2918 * waiting for T1 to release other device locks. 2919 */ 2920 if (iommu_group_device_count(group) != 1) { 2921 dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n"); 2922 ret = -EINVAL; 2923 goto out; 2924 } 2925 2926 /* Since group has only one device */ 2927 grp_dev = list_first_entry(&group->devices, struct group_device, list); 2928 dev = grp_dev->dev; 2929 2930 if (prev_dev != dev) { 2931 dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n"); 2932 ret = -EBUSY; 2933 goto out; 2934 } 2935 2936 prev_dom = group->default_domain; 2937 if (!prev_dom) { 2938 ret = -EINVAL; 2939 goto out; 2940 } 2941 2942 dev_def_dom = iommu_get_def_domain_type(dev); 2943 if (!type) { 2944 /* 2945 * If the user hasn't requested any specific type of domain and 2946 * if the device supports both the domains, then default to the 2947 * domain the device was booted with 2948 */ 2949 type = dev_def_dom ? : iommu_def_domain_type; 2950 } else if (dev_def_dom && type != dev_def_dom) { 2951 dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n", 2952 iommu_domain_type_str(type)); 2953 ret = -EINVAL; 2954 goto out; 2955 } 2956 2957 /* 2958 * Switch to a new domain only if the requested domain type is different 2959 * from the existing default domain type 2960 */ 2961 if (prev_dom->type == type) { 2962 ret = 0; 2963 goto out; 2964 } 2965 2966 /* We can bring up a flush queue without tearing down the domain */ 2967 if (type == IOMMU_DOMAIN_DMA_FQ && prev_dom->type == IOMMU_DOMAIN_DMA) { 2968 ret = iommu_dma_init_fq(prev_dom); 2969 if (!ret) 2970 prev_dom->type = IOMMU_DOMAIN_DMA_FQ; 2971 goto out; 2972 } 2973 2974 /* Sets group->default_domain to the newly allocated domain */ 2975 ret = iommu_group_alloc_default_domain(dev->bus, group, type); 2976 if (ret) 2977 goto out; 2978 2979 ret = iommu_create_device_direct_mappings(group, dev); 2980 if (ret) 2981 goto free_new_domain; 2982 2983 ret = __iommu_attach_device(group->default_domain, dev); 2984 if (ret) 2985 goto free_new_domain; 2986 2987 group->domain = group->default_domain; 2988 2989 /* 2990 * Release the mutex here because ops->probe_finalize() call-back of 2991 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 2992 * in-turn might call back into IOMMU core code, where it tries to take 2993 * group->mutex, resulting in a deadlock. 2994 */ 2995 mutex_unlock(&group->mutex); 2996 2997 /* Make sure dma_ops is appropriatley set */ 2998 iommu_group_do_probe_finalize(dev, group->default_domain); 2999 iommu_domain_free(prev_dom); 3000 return 0; 3001 3002 free_new_domain: 3003 iommu_domain_free(group->default_domain); 3004 group->default_domain = prev_dom; 3005 group->domain = prev_dom; 3006 3007 out: 3008 mutex_unlock(&group->mutex); 3009 3010 return ret; 3011 } 3012 3013 /* 3014 * Changing the default domain through sysfs requires the users to unbind the 3015 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ 3016 * transition. Return failure if this isn't met. 3017 * 3018 * We need to consider the race between this and the device release path. 3019 * device_lock(dev) is used here to guarantee that the device release path 3020 * will not be entered at the same time. 3021 */ 3022 static ssize_t iommu_group_store_type(struct iommu_group *group, 3023 const char *buf, size_t count) 3024 { 3025 struct group_device *grp_dev; 3026 struct device *dev; 3027 int ret, req_type; 3028 3029 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 3030 return -EACCES; 3031 3032 if (WARN_ON(!group)) 3033 return -EINVAL; 3034 3035 if (sysfs_streq(buf, "identity")) 3036 req_type = IOMMU_DOMAIN_IDENTITY; 3037 else if (sysfs_streq(buf, "DMA")) 3038 req_type = IOMMU_DOMAIN_DMA; 3039 else if (sysfs_streq(buf, "DMA-FQ")) 3040 req_type = IOMMU_DOMAIN_DMA_FQ; 3041 else if (sysfs_streq(buf, "auto")) 3042 req_type = 0; 3043 else 3044 return -EINVAL; 3045 3046 /* 3047 * Lock/Unlock the group mutex here before device lock to 3048 * 1. Make sure that the iommu group has only one device (this is a 3049 * prerequisite for step 2) 3050 * 2. Get struct *dev which is needed to lock device 3051 */ 3052 mutex_lock(&group->mutex); 3053 if (iommu_group_device_count(group) != 1) { 3054 mutex_unlock(&group->mutex); 3055 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n"); 3056 return -EINVAL; 3057 } 3058 3059 /* Since group has only one device */ 3060 grp_dev = list_first_entry(&group->devices, struct group_device, list); 3061 dev = grp_dev->dev; 3062 get_device(dev); 3063 3064 /* 3065 * Don't hold the group mutex because taking group mutex first and then 3066 * the device lock could potentially cause a deadlock as below. Assume 3067 * two threads T1 and T2. T1 is trying to change default domain of an 3068 * iommu group and T2 is trying to hot unplug a device or release [1] VF 3069 * of a PCIe device which is in the same iommu group. T1 takes group 3070 * mutex and before it could take device lock assume T2 has taken device 3071 * lock and is yet to take group mutex. Now, both the threads will be 3072 * waiting for the other thread to release lock. Below, lock order was 3073 * suggested. 3074 * device_lock(dev); 3075 * mutex_lock(&group->mutex); 3076 * iommu_change_dev_def_domain(); 3077 * mutex_unlock(&group->mutex); 3078 * device_unlock(dev); 3079 * 3080 * [1] Typical device release path 3081 * device_lock() from device/driver core code 3082 * -> bus_notifier() 3083 * -> iommu_bus_notifier() 3084 * -> iommu_release_device() 3085 * -> ops->release_device() vendor driver calls back iommu core code 3086 * -> mutex_lock() from iommu core code 3087 */ 3088 mutex_unlock(&group->mutex); 3089 3090 /* Check if the device in the group still has a driver bound to it */ 3091 device_lock(dev); 3092 if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ && 3093 group->default_domain->type == IOMMU_DOMAIN_DMA)) { 3094 pr_err_ratelimited("Device is still bound to driver\n"); 3095 ret = -EBUSY; 3096 goto out; 3097 } 3098 3099 ret = iommu_change_dev_def_domain(group, dev, req_type); 3100 ret = ret ?: count; 3101 3102 out: 3103 device_unlock(dev); 3104 put_device(dev); 3105 3106 return ret; 3107 } 3108 3109 /** 3110 * iommu_device_use_default_domain() - Device driver wants to handle device 3111 * DMA through the kernel DMA API. 3112 * @dev: The device. 3113 * 3114 * The device driver about to bind @dev wants to do DMA through the kernel 3115 * DMA API. Return 0 if it is allowed, otherwise an error. 3116 */ 3117 int iommu_device_use_default_domain(struct device *dev) 3118 { 3119 struct iommu_group *group = iommu_group_get(dev); 3120 int ret = 0; 3121 3122 if (!group) 3123 return 0; 3124 3125 mutex_lock(&group->mutex); 3126 if (group->owner_cnt) { 3127 if (group->domain != group->default_domain || 3128 group->owner) { 3129 ret = -EBUSY; 3130 goto unlock_out; 3131 } 3132 } 3133 3134 group->owner_cnt++; 3135 3136 unlock_out: 3137 mutex_unlock(&group->mutex); 3138 iommu_group_put(group); 3139 3140 return ret; 3141 } 3142 3143 /** 3144 * iommu_device_unuse_default_domain() - Device driver stops handling device 3145 * DMA through the kernel DMA API. 3146 * @dev: The device. 3147 * 3148 * The device driver doesn't want to do DMA through kernel DMA API anymore. 3149 * It must be called after iommu_device_use_default_domain(). 3150 */ 3151 void iommu_device_unuse_default_domain(struct device *dev) 3152 { 3153 struct iommu_group *group = iommu_group_get(dev); 3154 3155 if (!group) 3156 return; 3157 3158 mutex_lock(&group->mutex); 3159 if (!WARN_ON(!group->owner_cnt)) 3160 group->owner_cnt--; 3161 3162 mutex_unlock(&group->mutex); 3163 iommu_group_put(group); 3164 } 3165 3166 /** 3167 * iommu_group_claim_dma_owner() - Set DMA ownership of a group 3168 * @group: The group. 3169 * @owner: Caller specified pointer. Used for exclusive ownership. 3170 * 3171 * This is to support backward compatibility for vfio which manages 3172 * the dma ownership in iommu_group level. New invocations on this 3173 * interface should be prohibited. 3174 */ 3175 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 3176 { 3177 int ret = 0; 3178 3179 mutex_lock(&group->mutex); 3180 if (group->owner_cnt) { 3181 ret = -EPERM; 3182 goto unlock_out; 3183 } else { 3184 if (group->domain && group->domain != group->default_domain) { 3185 ret = -EBUSY; 3186 goto unlock_out; 3187 } 3188 3189 group->owner = owner; 3190 if (group->domain) 3191 __iommu_detach_group(group->domain, group); 3192 } 3193 3194 group->owner_cnt++; 3195 unlock_out: 3196 mutex_unlock(&group->mutex); 3197 3198 return ret; 3199 } 3200 EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); 3201 3202 /** 3203 * iommu_group_release_dma_owner() - Release DMA ownership of a group 3204 * @group: The group. 3205 * 3206 * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). 3207 */ 3208 void iommu_group_release_dma_owner(struct iommu_group *group) 3209 { 3210 mutex_lock(&group->mutex); 3211 if (WARN_ON(!group->owner_cnt || !group->owner)) 3212 goto unlock_out; 3213 3214 group->owner_cnt = 0; 3215 /* 3216 * The UNMANAGED domain should be detached before all USER 3217 * owners have been released. 3218 */ 3219 if (!WARN_ON(group->domain) && group->default_domain) 3220 __iommu_attach_group(group->default_domain, group); 3221 group->owner = NULL; 3222 unlock_out: 3223 mutex_unlock(&group->mutex); 3224 } 3225 EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); 3226 3227 /** 3228 * iommu_group_dma_owner_claimed() - Query group dma ownership status 3229 * @group: The group. 3230 * 3231 * This provides status query on a given group. It is racy and only for 3232 * non-binding status reporting. 3233 */ 3234 bool iommu_group_dma_owner_claimed(struct iommu_group *group) 3235 { 3236 unsigned int user; 3237 3238 mutex_lock(&group->mutex); 3239 user = group->owner_cnt; 3240 mutex_unlock(&group->mutex); 3241 3242 return user; 3243 } 3244 EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); 3245