1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/device.h> 10 #include <linux/kernel.h> 11 #include <linux/bug.h> 12 #include <linux/types.h> 13 #include <linux/init.h> 14 #include <linux/export.h> 15 #include <linux/slab.h> 16 #include <linux/errno.h> 17 #include <linux/iommu.h> 18 #include <linux/idr.h> 19 #include <linux/notifier.h> 20 #include <linux/err.h> 21 #include <linux/pci.h> 22 #include <linux/bitops.h> 23 #include <linux/property.h> 24 #include <linux/fsl/mc.h> 25 #include <linux/module.h> 26 #include <trace/events/iommu.h> 27 28 static struct kset *iommu_group_kset; 29 static DEFINE_IDA(iommu_group_ida); 30 31 static unsigned int iommu_def_domain_type __read_mostly; 32 static bool iommu_dma_strict __read_mostly = true; 33 static u32 iommu_cmd_line __read_mostly; 34 35 struct iommu_group { 36 struct kobject kobj; 37 struct kobject *devices_kobj; 38 struct list_head devices; 39 struct mutex mutex; 40 struct blocking_notifier_head notifier; 41 void *iommu_data; 42 void (*iommu_data_release)(void *iommu_data); 43 char *name; 44 int id; 45 struct iommu_domain *default_domain; 46 struct iommu_domain *domain; 47 struct list_head entry; 48 }; 49 50 struct group_device { 51 struct list_head list; 52 struct device *dev; 53 char *name; 54 }; 55 56 struct iommu_group_attribute { 57 struct attribute attr; 58 ssize_t (*show)(struct iommu_group *group, char *buf); 59 ssize_t (*store)(struct iommu_group *group, 60 const char *buf, size_t count); 61 }; 62 63 static const char * const iommu_group_resv_type_string[] = { 64 [IOMMU_RESV_DIRECT] = "direct", 65 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 66 [IOMMU_RESV_RESERVED] = "reserved", 67 [IOMMU_RESV_MSI] = "msi", 68 [IOMMU_RESV_SW_MSI] = "msi", 69 }; 70 71 #define IOMMU_CMD_LINE_DMA_API BIT(0) 72 73 static int iommu_alloc_default_domain(struct iommu_group *group, 74 struct device *dev); 75 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 76 unsigned type); 77 static int __iommu_attach_device(struct iommu_domain *domain, 78 struct device *dev); 79 static int __iommu_attach_group(struct iommu_domain *domain, 80 struct iommu_group *group); 81 static void __iommu_detach_group(struct iommu_domain *domain, 82 struct iommu_group *group); 83 static int iommu_create_device_direct_mappings(struct iommu_group *group, 84 struct device *dev); 85 static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 86 static ssize_t iommu_group_store_type(struct iommu_group *group, 87 const char *buf, size_t count); 88 89 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 90 struct iommu_group_attribute iommu_group_attr_##_name = \ 91 __ATTR(_name, _mode, _show, _store) 92 93 #define to_iommu_group_attr(_attr) \ 94 container_of(_attr, struct iommu_group_attribute, attr) 95 #define to_iommu_group(_kobj) \ 96 container_of(_kobj, struct iommu_group, kobj) 97 98 static LIST_HEAD(iommu_device_list); 99 static DEFINE_SPINLOCK(iommu_device_lock); 100 101 /* 102 * Use a function instead of an array here because the domain-type is a 103 * bit-field, so an array would waste memory. 104 */ 105 static const char *iommu_domain_type_str(unsigned int t) 106 { 107 switch (t) { 108 case IOMMU_DOMAIN_BLOCKED: 109 return "Blocked"; 110 case IOMMU_DOMAIN_IDENTITY: 111 return "Passthrough"; 112 case IOMMU_DOMAIN_UNMANAGED: 113 return "Unmanaged"; 114 case IOMMU_DOMAIN_DMA: 115 return "Translated"; 116 default: 117 return "Unknown"; 118 } 119 } 120 121 static int __init iommu_subsys_init(void) 122 { 123 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 124 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 125 iommu_set_default_passthrough(false); 126 else 127 iommu_set_default_translated(false); 128 129 if (iommu_default_passthrough() && mem_encrypt_active()) { 130 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 131 iommu_set_default_translated(false); 132 } 133 } 134 135 pr_info("Default domain type: %s %s\n", 136 iommu_domain_type_str(iommu_def_domain_type), 137 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 138 "(set via kernel command line)" : ""); 139 140 return 0; 141 } 142 subsys_initcall(iommu_subsys_init); 143 144 int iommu_device_register(struct iommu_device *iommu) 145 { 146 spin_lock(&iommu_device_lock); 147 list_add_tail(&iommu->list, &iommu_device_list); 148 spin_unlock(&iommu_device_lock); 149 return 0; 150 } 151 EXPORT_SYMBOL_GPL(iommu_device_register); 152 153 void iommu_device_unregister(struct iommu_device *iommu) 154 { 155 spin_lock(&iommu_device_lock); 156 list_del(&iommu->list); 157 spin_unlock(&iommu_device_lock); 158 } 159 EXPORT_SYMBOL_GPL(iommu_device_unregister); 160 161 static struct dev_iommu *dev_iommu_get(struct device *dev) 162 { 163 struct dev_iommu *param = dev->iommu; 164 165 if (param) 166 return param; 167 168 param = kzalloc(sizeof(*param), GFP_KERNEL); 169 if (!param) 170 return NULL; 171 172 mutex_init(¶m->lock); 173 dev->iommu = param; 174 return param; 175 } 176 177 static void dev_iommu_free(struct device *dev) 178 { 179 iommu_fwspec_free(dev); 180 kfree(dev->iommu); 181 dev->iommu = NULL; 182 } 183 184 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 185 { 186 const struct iommu_ops *ops = dev->bus->iommu_ops; 187 struct iommu_device *iommu_dev; 188 struct iommu_group *group; 189 int ret; 190 191 if (!ops) 192 return -ENODEV; 193 194 if (!dev_iommu_get(dev)) 195 return -ENOMEM; 196 197 if (!try_module_get(ops->owner)) { 198 ret = -EINVAL; 199 goto err_free; 200 } 201 202 iommu_dev = ops->probe_device(dev); 203 if (IS_ERR(iommu_dev)) { 204 ret = PTR_ERR(iommu_dev); 205 goto out_module_put; 206 } 207 208 dev->iommu->iommu_dev = iommu_dev; 209 210 group = iommu_group_get_for_dev(dev); 211 if (IS_ERR(group)) { 212 ret = PTR_ERR(group); 213 goto out_release; 214 } 215 iommu_group_put(group); 216 217 if (group_list && !group->default_domain && list_empty(&group->entry)) 218 list_add_tail(&group->entry, group_list); 219 220 iommu_device_link(iommu_dev, dev); 221 222 return 0; 223 224 out_release: 225 ops->release_device(dev); 226 227 out_module_put: 228 module_put(ops->owner); 229 230 err_free: 231 dev_iommu_free(dev); 232 233 return ret; 234 } 235 236 int iommu_probe_device(struct device *dev) 237 { 238 const struct iommu_ops *ops = dev->bus->iommu_ops; 239 struct iommu_group *group; 240 int ret; 241 242 ret = __iommu_probe_device(dev, NULL); 243 if (ret) 244 goto err_out; 245 246 group = iommu_group_get(dev); 247 if (!group) { 248 ret = -ENODEV; 249 goto err_release; 250 } 251 252 /* 253 * Try to allocate a default domain - needs support from the 254 * IOMMU driver. There are still some drivers which don't 255 * support default domains, so the return value is not yet 256 * checked. 257 */ 258 iommu_alloc_default_domain(group, dev); 259 260 if (group->default_domain) { 261 ret = __iommu_attach_device(group->default_domain, dev); 262 if (ret) { 263 iommu_group_put(group); 264 goto err_release; 265 } 266 } 267 268 iommu_create_device_direct_mappings(group, dev); 269 270 iommu_group_put(group); 271 272 if (ops->probe_finalize) 273 ops->probe_finalize(dev); 274 275 return 0; 276 277 err_release: 278 iommu_release_device(dev); 279 280 err_out: 281 return ret; 282 283 } 284 285 void iommu_release_device(struct device *dev) 286 { 287 const struct iommu_ops *ops = dev->bus->iommu_ops; 288 289 if (!dev->iommu) 290 return; 291 292 iommu_device_unlink(dev->iommu->iommu_dev, dev); 293 294 ops->release_device(dev); 295 296 iommu_group_remove_device(dev); 297 module_put(ops->owner); 298 dev_iommu_free(dev); 299 } 300 301 static int __init iommu_set_def_domain_type(char *str) 302 { 303 bool pt; 304 int ret; 305 306 ret = kstrtobool(str, &pt); 307 if (ret) 308 return ret; 309 310 if (pt) 311 iommu_set_default_passthrough(true); 312 else 313 iommu_set_default_translated(true); 314 315 return 0; 316 } 317 early_param("iommu.passthrough", iommu_set_def_domain_type); 318 319 static int __init iommu_dma_setup(char *str) 320 { 321 return kstrtobool(str, &iommu_dma_strict); 322 } 323 early_param("iommu.strict", iommu_dma_setup); 324 325 static ssize_t iommu_group_attr_show(struct kobject *kobj, 326 struct attribute *__attr, char *buf) 327 { 328 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 329 struct iommu_group *group = to_iommu_group(kobj); 330 ssize_t ret = -EIO; 331 332 if (attr->show) 333 ret = attr->show(group, buf); 334 return ret; 335 } 336 337 static ssize_t iommu_group_attr_store(struct kobject *kobj, 338 struct attribute *__attr, 339 const char *buf, size_t count) 340 { 341 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 342 struct iommu_group *group = to_iommu_group(kobj); 343 ssize_t ret = -EIO; 344 345 if (attr->store) 346 ret = attr->store(group, buf, count); 347 return ret; 348 } 349 350 static const struct sysfs_ops iommu_group_sysfs_ops = { 351 .show = iommu_group_attr_show, 352 .store = iommu_group_attr_store, 353 }; 354 355 static int iommu_group_create_file(struct iommu_group *group, 356 struct iommu_group_attribute *attr) 357 { 358 return sysfs_create_file(&group->kobj, &attr->attr); 359 } 360 361 static void iommu_group_remove_file(struct iommu_group *group, 362 struct iommu_group_attribute *attr) 363 { 364 sysfs_remove_file(&group->kobj, &attr->attr); 365 } 366 367 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 368 { 369 return sprintf(buf, "%s\n", group->name); 370 } 371 372 /** 373 * iommu_insert_resv_region - Insert a new region in the 374 * list of reserved regions. 375 * @new: new region to insert 376 * @regions: list of regions 377 * 378 * Elements are sorted by start address and overlapping segments 379 * of the same type are merged. 380 */ 381 static int iommu_insert_resv_region(struct iommu_resv_region *new, 382 struct list_head *regions) 383 { 384 struct iommu_resv_region *iter, *tmp, *nr, *top; 385 LIST_HEAD(stack); 386 387 nr = iommu_alloc_resv_region(new->start, new->length, 388 new->prot, new->type); 389 if (!nr) 390 return -ENOMEM; 391 392 /* First add the new element based on start address sorting */ 393 list_for_each_entry(iter, regions, list) { 394 if (nr->start < iter->start || 395 (nr->start == iter->start && nr->type <= iter->type)) 396 break; 397 } 398 list_add_tail(&nr->list, &iter->list); 399 400 /* Merge overlapping segments of type nr->type in @regions, if any */ 401 list_for_each_entry_safe(iter, tmp, regions, list) { 402 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 403 404 /* no merge needed on elements of different types than @new */ 405 if (iter->type != new->type) { 406 list_move_tail(&iter->list, &stack); 407 continue; 408 } 409 410 /* look for the last stack element of same type as @iter */ 411 list_for_each_entry_reverse(top, &stack, list) 412 if (top->type == iter->type) 413 goto check_overlap; 414 415 list_move_tail(&iter->list, &stack); 416 continue; 417 418 check_overlap: 419 top_end = top->start + top->length - 1; 420 421 if (iter->start > top_end + 1) { 422 list_move_tail(&iter->list, &stack); 423 } else { 424 top->length = max(top_end, iter_end) - top->start + 1; 425 list_del(&iter->list); 426 kfree(iter); 427 } 428 } 429 list_splice(&stack, regions); 430 return 0; 431 } 432 433 static int 434 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 435 struct list_head *group_resv_regions) 436 { 437 struct iommu_resv_region *entry; 438 int ret = 0; 439 440 list_for_each_entry(entry, dev_resv_regions, list) { 441 ret = iommu_insert_resv_region(entry, group_resv_regions); 442 if (ret) 443 break; 444 } 445 return ret; 446 } 447 448 int iommu_get_group_resv_regions(struct iommu_group *group, 449 struct list_head *head) 450 { 451 struct group_device *device; 452 int ret = 0; 453 454 mutex_lock(&group->mutex); 455 list_for_each_entry(device, &group->devices, list) { 456 struct list_head dev_resv_regions; 457 458 INIT_LIST_HEAD(&dev_resv_regions); 459 iommu_get_resv_regions(device->dev, &dev_resv_regions); 460 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 461 iommu_put_resv_regions(device->dev, &dev_resv_regions); 462 if (ret) 463 break; 464 } 465 mutex_unlock(&group->mutex); 466 return ret; 467 } 468 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 469 470 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 471 char *buf) 472 { 473 struct iommu_resv_region *region, *next; 474 struct list_head group_resv_regions; 475 char *str = buf; 476 477 INIT_LIST_HEAD(&group_resv_regions); 478 iommu_get_group_resv_regions(group, &group_resv_regions); 479 480 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 481 str += sprintf(str, "0x%016llx 0x%016llx %s\n", 482 (long long int)region->start, 483 (long long int)(region->start + 484 region->length - 1), 485 iommu_group_resv_type_string[region->type]); 486 kfree(region); 487 } 488 489 return (str - buf); 490 } 491 492 static ssize_t iommu_group_show_type(struct iommu_group *group, 493 char *buf) 494 { 495 char *type = "unknown\n"; 496 497 mutex_lock(&group->mutex); 498 if (group->default_domain) { 499 switch (group->default_domain->type) { 500 case IOMMU_DOMAIN_BLOCKED: 501 type = "blocked\n"; 502 break; 503 case IOMMU_DOMAIN_IDENTITY: 504 type = "identity\n"; 505 break; 506 case IOMMU_DOMAIN_UNMANAGED: 507 type = "unmanaged\n"; 508 break; 509 case IOMMU_DOMAIN_DMA: 510 type = "DMA\n"; 511 break; 512 } 513 } 514 mutex_unlock(&group->mutex); 515 strcpy(buf, type); 516 517 return strlen(type); 518 } 519 520 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 521 522 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 523 iommu_group_show_resv_regions, NULL); 524 525 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 526 iommu_group_store_type); 527 528 static void iommu_group_release(struct kobject *kobj) 529 { 530 struct iommu_group *group = to_iommu_group(kobj); 531 532 pr_debug("Releasing group %d\n", group->id); 533 534 if (group->iommu_data_release) 535 group->iommu_data_release(group->iommu_data); 536 537 ida_simple_remove(&iommu_group_ida, group->id); 538 539 if (group->default_domain) 540 iommu_domain_free(group->default_domain); 541 542 kfree(group->name); 543 kfree(group); 544 } 545 546 static struct kobj_type iommu_group_ktype = { 547 .sysfs_ops = &iommu_group_sysfs_ops, 548 .release = iommu_group_release, 549 }; 550 551 /** 552 * iommu_group_alloc - Allocate a new group 553 * 554 * This function is called by an iommu driver to allocate a new iommu 555 * group. The iommu group represents the minimum granularity of the iommu. 556 * Upon successful return, the caller holds a reference to the supplied 557 * group in order to hold the group until devices are added. Use 558 * iommu_group_put() to release this extra reference count, allowing the 559 * group to be automatically reclaimed once it has no devices or external 560 * references. 561 */ 562 struct iommu_group *iommu_group_alloc(void) 563 { 564 struct iommu_group *group; 565 int ret; 566 567 group = kzalloc(sizeof(*group), GFP_KERNEL); 568 if (!group) 569 return ERR_PTR(-ENOMEM); 570 571 group->kobj.kset = iommu_group_kset; 572 mutex_init(&group->mutex); 573 INIT_LIST_HEAD(&group->devices); 574 INIT_LIST_HEAD(&group->entry); 575 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); 576 577 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); 578 if (ret < 0) { 579 kfree(group); 580 return ERR_PTR(ret); 581 } 582 group->id = ret; 583 584 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 585 NULL, "%d", group->id); 586 if (ret) { 587 ida_simple_remove(&iommu_group_ida, group->id); 588 kobject_put(&group->kobj); 589 return ERR_PTR(ret); 590 } 591 592 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 593 if (!group->devices_kobj) { 594 kobject_put(&group->kobj); /* triggers .release & free */ 595 return ERR_PTR(-ENOMEM); 596 } 597 598 /* 599 * The devices_kobj holds a reference on the group kobject, so 600 * as long as that exists so will the group. We can therefore 601 * use the devices_kobj for reference counting. 602 */ 603 kobject_put(&group->kobj); 604 605 ret = iommu_group_create_file(group, 606 &iommu_group_attr_reserved_regions); 607 if (ret) 608 return ERR_PTR(ret); 609 610 ret = iommu_group_create_file(group, &iommu_group_attr_type); 611 if (ret) 612 return ERR_PTR(ret); 613 614 pr_debug("Allocated group %d\n", group->id); 615 616 return group; 617 } 618 EXPORT_SYMBOL_GPL(iommu_group_alloc); 619 620 struct iommu_group *iommu_group_get_by_id(int id) 621 { 622 struct kobject *group_kobj; 623 struct iommu_group *group; 624 const char *name; 625 626 if (!iommu_group_kset) 627 return NULL; 628 629 name = kasprintf(GFP_KERNEL, "%d", id); 630 if (!name) 631 return NULL; 632 633 group_kobj = kset_find_obj(iommu_group_kset, name); 634 kfree(name); 635 636 if (!group_kobj) 637 return NULL; 638 639 group = container_of(group_kobj, struct iommu_group, kobj); 640 BUG_ON(group->id != id); 641 642 kobject_get(group->devices_kobj); 643 kobject_put(&group->kobj); 644 645 return group; 646 } 647 EXPORT_SYMBOL_GPL(iommu_group_get_by_id); 648 649 /** 650 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 651 * @group: the group 652 * 653 * iommu drivers can store data in the group for use when doing iommu 654 * operations. This function provides a way to retrieve it. Caller 655 * should hold a group reference. 656 */ 657 void *iommu_group_get_iommudata(struct iommu_group *group) 658 { 659 return group->iommu_data; 660 } 661 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 662 663 /** 664 * iommu_group_set_iommudata - set iommu_data for a group 665 * @group: the group 666 * @iommu_data: new data 667 * @release: release function for iommu_data 668 * 669 * iommu drivers can store data in the group for use when doing iommu 670 * operations. This function provides a way to set the data after 671 * the group has been allocated. Caller should hold a group reference. 672 */ 673 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 674 void (*release)(void *iommu_data)) 675 { 676 group->iommu_data = iommu_data; 677 group->iommu_data_release = release; 678 } 679 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 680 681 /** 682 * iommu_group_set_name - set name for a group 683 * @group: the group 684 * @name: name 685 * 686 * Allow iommu driver to set a name for a group. When set it will 687 * appear in a name attribute file under the group in sysfs. 688 */ 689 int iommu_group_set_name(struct iommu_group *group, const char *name) 690 { 691 int ret; 692 693 if (group->name) { 694 iommu_group_remove_file(group, &iommu_group_attr_name); 695 kfree(group->name); 696 group->name = NULL; 697 if (!name) 698 return 0; 699 } 700 701 group->name = kstrdup(name, GFP_KERNEL); 702 if (!group->name) 703 return -ENOMEM; 704 705 ret = iommu_group_create_file(group, &iommu_group_attr_name); 706 if (ret) { 707 kfree(group->name); 708 group->name = NULL; 709 return ret; 710 } 711 712 return 0; 713 } 714 EXPORT_SYMBOL_GPL(iommu_group_set_name); 715 716 static int iommu_create_device_direct_mappings(struct iommu_group *group, 717 struct device *dev) 718 { 719 struct iommu_domain *domain = group->default_domain; 720 struct iommu_resv_region *entry; 721 struct list_head mappings; 722 unsigned long pg_size; 723 int ret = 0; 724 725 if (!domain || domain->type != IOMMU_DOMAIN_DMA) 726 return 0; 727 728 BUG_ON(!domain->pgsize_bitmap); 729 730 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 731 INIT_LIST_HEAD(&mappings); 732 733 iommu_get_resv_regions(dev, &mappings); 734 735 /* We need to consider overlapping regions for different devices */ 736 list_for_each_entry(entry, &mappings, list) { 737 dma_addr_t start, end, addr; 738 size_t map_size = 0; 739 740 if (domain->ops->apply_resv_region) 741 domain->ops->apply_resv_region(dev, domain, entry); 742 743 start = ALIGN(entry->start, pg_size); 744 end = ALIGN(entry->start + entry->length, pg_size); 745 746 if (entry->type != IOMMU_RESV_DIRECT && 747 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 748 continue; 749 750 for (addr = start; addr <= end; addr += pg_size) { 751 phys_addr_t phys_addr; 752 753 if (addr == end) 754 goto map_end; 755 756 phys_addr = iommu_iova_to_phys(domain, addr); 757 if (!phys_addr) { 758 map_size += pg_size; 759 continue; 760 } 761 762 map_end: 763 if (map_size) { 764 ret = iommu_map(domain, addr - map_size, 765 addr - map_size, map_size, 766 entry->prot); 767 if (ret) 768 goto out; 769 map_size = 0; 770 } 771 } 772 773 } 774 775 iommu_flush_iotlb_all(domain); 776 777 out: 778 iommu_put_resv_regions(dev, &mappings); 779 780 return ret; 781 } 782 783 static bool iommu_is_attach_deferred(struct iommu_domain *domain, 784 struct device *dev) 785 { 786 if (domain->ops->is_attach_deferred) 787 return domain->ops->is_attach_deferred(domain, dev); 788 789 return false; 790 } 791 792 /** 793 * iommu_group_add_device - add a device to an iommu group 794 * @group: the group into which to add the device (reference should be held) 795 * @dev: the device 796 * 797 * This function is called by an iommu driver to add a device into a 798 * group. Adding a device increments the group reference count. 799 */ 800 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 801 { 802 int ret, i = 0; 803 struct group_device *device; 804 805 device = kzalloc(sizeof(*device), GFP_KERNEL); 806 if (!device) 807 return -ENOMEM; 808 809 device->dev = dev; 810 811 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 812 if (ret) 813 goto err_free_device; 814 815 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 816 rename: 817 if (!device->name) { 818 ret = -ENOMEM; 819 goto err_remove_link; 820 } 821 822 ret = sysfs_create_link_nowarn(group->devices_kobj, 823 &dev->kobj, device->name); 824 if (ret) { 825 if (ret == -EEXIST && i >= 0) { 826 /* 827 * Account for the slim chance of collision 828 * and append an instance to the name. 829 */ 830 kfree(device->name); 831 device->name = kasprintf(GFP_KERNEL, "%s.%d", 832 kobject_name(&dev->kobj), i++); 833 goto rename; 834 } 835 goto err_free_name; 836 } 837 838 kobject_get(group->devices_kobj); 839 840 dev->iommu_group = group; 841 842 mutex_lock(&group->mutex); 843 list_add_tail(&device->list, &group->devices); 844 if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) 845 ret = __iommu_attach_device(group->domain, dev); 846 mutex_unlock(&group->mutex); 847 if (ret) 848 goto err_put_group; 849 850 /* Notify any listeners about change to group. */ 851 blocking_notifier_call_chain(&group->notifier, 852 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); 853 854 trace_add_device_to_group(group->id, dev); 855 856 dev_info(dev, "Adding to iommu group %d\n", group->id); 857 858 return 0; 859 860 err_put_group: 861 mutex_lock(&group->mutex); 862 list_del(&device->list); 863 mutex_unlock(&group->mutex); 864 dev->iommu_group = NULL; 865 kobject_put(group->devices_kobj); 866 sysfs_remove_link(group->devices_kobj, device->name); 867 err_free_name: 868 kfree(device->name); 869 err_remove_link: 870 sysfs_remove_link(&dev->kobj, "iommu_group"); 871 err_free_device: 872 kfree(device); 873 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 874 return ret; 875 } 876 EXPORT_SYMBOL_GPL(iommu_group_add_device); 877 878 /** 879 * iommu_group_remove_device - remove a device from it's current group 880 * @dev: device to be removed 881 * 882 * This function is called by an iommu driver to remove the device from 883 * it's current group. This decrements the iommu group reference count. 884 */ 885 void iommu_group_remove_device(struct device *dev) 886 { 887 struct iommu_group *group = dev->iommu_group; 888 struct group_device *tmp_device, *device = NULL; 889 890 dev_info(dev, "Removing from iommu group %d\n", group->id); 891 892 /* Pre-notify listeners that a device is being removed. */ 893 blocking_notifier_call_chain(&group->notifier, 894 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); 895 896 mutex_lock(&group->mutex); 897 list_for_each_entry(tmp_device, &group->devices, list) { 898 if (tmp_device->dev == dev) { 899 device = tmp_device; 900 list_del(&device->list); 901 break; 902 } 903 } 904 mutex_unlock(&group->mutex); 905 906 if (!device) 907 return; 908 909 sysfs_remove_link(group->devices_kobj, device->name); 910 sysfs_remove_link(&dev->kobj, "iommu_group"); 911 912 trace_remove_device_from_group(group->id, dev); 913 914 kfree(device->name); 915 kfree(device); 916 dev->iommu_group = NULL; 917 kobject_put(group->devices_kobj); 918 } 919 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 920 921 static int iommu_group_device_count(struct iommu_group *group) 922 { 923 struct group_device *entry; 924 int ret = 0; 925 926 list_for_each_entry(entry, &group->devices, list) 927 ret++; 928 929 return ret; 930 } 931 932 /** 933 * iommu_group_for_each_dev - iterate over each device in the group 934 * @group: the group 935 * @data: caller opaque data to be passed to callback function 936 * @fn: caller supplied callback function 937 * 938 * This function is called by group users to iterate over group devices. 939 * Callers should hold a reference count to the group during callback. 940 * The group->mutex is held across callbacks, which will block calls to 941 * iommu_group_add/remove_device. 942 */ 943 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 944 int (*fn)(struct device *, void *)) 945 { 946 struct group_device *device; 947 int ret = 0; 948 949 list_for_each_entry(device, &group->devices, list) { 950 ret = fn(device->dev, data); 951 if (ret) 952 break; 953 } 954 return ret; 955 } 956 957 958 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 959 int (*fn)(struct device *, void *)) 960 { 961 int ret; 962 963 mutex_lock(&group->mutex); 964 ret = __iommu_group_for_each_dev(group, data, fn); 965 mutex_unlock(&group->mutex); 966 967 return ret; 968 } 969 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 970 971 /** 972 * iommu_group_get - Return the group for a device and increment reference 973 * @dev: get the group that this device belongs to 974 * 975 * This function is called by iommu drivers and users to get the group 976 * for the specified device. If found, the group is returned and the group 977 * reference in incremented, else NULL. 978 */ 979 struct iommu_group *iommu_group_get(struct device *dev) 980 { 981 struct iommu_group *group = dev->iommu_group; 982 983 if (group) 984 kobject_get(group->devices_kobj); 985 986 return group; 987 } 988 EXPORT_SYMBOL_GPL(iommu_group_get); 989 990 /** 991 * iommu_group_ref_get - Increment reference on a group 992 * @group: the group to use, must not be NULL 993 * 994 * This function is called by iommu drivers to take additional references on an 995 * existing group. Returns the given group for convenience. 996 */ 997 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 998 { 999 kobject_get(group->devices_kobj); 1000 return group; 1001 } 1002 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1003 1004 /** 1005 * iommu_group_put - Decrement group reference 1006 * @group: the group to use 1007 * 1008 * This function is called by iommu drivers and users to release the 1009 * iommu group. Once the reference count is zero, the group is released. 1010 */ 1011 void iommu_group_put(struct iommu_group *group) 1012 { 1013 if (group) 1014 kobject_put(group->devices_kobj); 1015 } 1016 EXPORT_SYMBOL_GPL(iommu_group_put); 1017 1018 /** 1019 * iommu_group_register_notifier - Register a notifier for group changes 1020 * @group: the group to watch 1021 * @nb: notifier block to signal 1022 * 1023 * This function allows iommu group users to track changes in a group. 1024 * See include/linux/iommu.h for actions sent via this notifier. Caller 1025 * should hold a reference to the group throughout notifier registration. 1026 */ 1027 int iommu_group_register_notifier(struct iommu_group *group, 1028 struct notifier_block *nb) 1029 { 1030 return blocking_notifier_chain_register(&group->notifier, nb); 1031 } 1032 EXPORT_SYMBOL_GPL(iommu_group_register_notifier); 1033 1034 /** 1035 * iommu_group_unregister_notifier - Unregister a notifier 1036 * @group: the group to watch 1037 * @nb: notifier block to signal 1038 * 1039 * Unregister a previously registered group notifier block. 1040 */ 1041 int iommu_group_unregister_notifier(struct iommu_group *group, 1042 struct notifier_block *nb) 1043 { 1044 return blocking_notifier_chain_unregister(&group->notifier, nb); 1045 } 1046 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); 1047 1048 /** 1049 * iommu_register_device_fault_handler() - Register a device fault handler 1050 * @dev: the device 1051 * @handler: the fault handler 1052 * @data: private data passed as argument to the handler 1053 * 1054 * When an IOMMU fault event is received, this handler gets called with the 1055 * fault event and data as argument. The handler should return 0 on success. If 1056 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1057 * complete the fault by calling iommu_page_response() with one of the following 1058 * response code: 1059 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1060 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1061 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1062 * page faults if possible. 1063 * 1064 * Return 0 if the fault handler was installed successfully, or an error. 1065 */ 1066 int iommu_register_device_fault_handler(struct device *dev, 1067 iommu_dev_fault_handler_t handler, 1068 void *data) 1069 { 1070 struct dev_iommu *param = dev->iommu; 1071 int ret = 0; 1072 1073 if (!param) 1074 return -EINVAL; 1075 1076 mutex_lock(¶m->lock); 1077 /* Only allow one fault handler registered for each device */ 1078 if (param->fault_param) { 1079 ret = -EBUSY; 1080 goto done_unlock; 1081 } 1082 1083 get_device(dev); 1084 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1085 if (!param->fault_param) { 1086 put_device(dev); 1087 ret = -ENOMEM; 1088 goto done_unlock; 1089 } 1090 param->fault_param->handler = handler; 1091 param->fault_param->data = data; 1092 mutex_init(¶m->fault_param->lock); 1093 INIT_LIST_HEAD(¶m->fault_param->faults); 1094 1095 done_unlock: 1096 mutex_unlock(¶m->lock); 1097 1098 return ret; 1099 } 1100 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1101 1102 /** 1103 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1104 * @dev: the device 1105 * 1106 * Remove the device fault handler installed with 1107 * iommu_register_device_fault_handler(). 1108 * 1109 * Return 0 on success, or an error. 1110 */ 1111 int iommu_unregister_device_fault_handler(struct device *dev) 1112 { 1113 struct dev_iommu *param = dev->iommu; 1114 int ret = 0; 1115 1116 if (!param) 1117 return -EINVAL; 1118 1119 mutex_lock(¶m->lock); 1120 1121 if (!param->fault_param) 1122 goto unlock; 1123 1124 /* we cannot unregister handler if there are pending faults */ 1125 if (!list_empty(¶m->fault_param->faults)) { 1126 ret = -EBUSY; 1127 goto unlock; 1128 } 1129 1130 kfree(param->fault_param); 1131 param->fault_param = NULL; 1132 put_device(dev); 1133 unlock: 1134 mutex_unlock(¶m->lock); 1135 1136 return ret; 1137 } 1138 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1139 1140 /** 1141 * iommu_report_device_fault() - Report fault event to device driver 1142 * @dev: the device 1143 * @evt: fault event data 1144 * 1145 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1146 * handler. When this function fails and the fault is recoverable, it is the 1147 * caller's responsibility to complete the fault. 1148 * 1149 * Return 0 on success, or an error. 1150 */ 1151 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1152 { 1153 struct dev_iommu *param = dev->iommu; 1154 struct iommu_fault_event *evt_pending = NULL; 1155 struct iommu_fault_param *fparam; 1156 int ret = 0; 1157 1158 if (!param || !evt) 1159 return -EINVAL; 1160 1161 /* we only report device fault if there is a handler registered */ 1162 mutex_lock(¶m->lock); 1163 fparam = param->fault_param; 1164 if (!fparam || !fparam->handler) { 1165 ret = -EINVAL; 1166 goto done_unlock; 1167 } 1168 1169 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1170 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1171 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1172 GFP_KERNEL); 1173 if (!evt_pending) { 1174 ret = -ENOMEM; 1175 goto done_unlock; 1176 } 1177 mutex_lock(&fparam->lock); 1178 list_add_tail(&evt_pending->list, &fparam->faults); 1179 mutex_unlock(&fparam->lock); 1180 } 1181 1182 ret = fparam->handler(&evt->fault, fparam->data); 1183 if (ret && evt_pending) { 1184 mutex_lock(&fparam->lock); 1185 list_del(&evt_pending->list); 1186 mutex_unlock(&fparam->lock); 1187 kfree(evt_pending); 1188 } 1189 done_unlock: 1190 mutex_unlock(¶m->lock); 1191 return ret; 1192 } 1193 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1194 1195 int iommu_page_response(struct device *dev, 1196 struct iommu_page_response *msg) 1197 { 1198 bool needs_pasid; 1199 int ret = -EINVAL; 1200 struct iommu_fault_event *evt; 1201 struct iommu_fault_page_request *prm; 1202 struct dev_iommu *param = dev->iommu; 1203 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1204 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1205 1206 if (!domain || !domain->ops->page_response) 1207 return -ENODEV; 1208 1209 if (!param || !param->fault_param) 1210 return -EINVAL; 1211 1212 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1213 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1214 return -EINVAL; 1215 1216 /* Only send response if there is a fault report pending */ 1217 mutex_lock(¶m->fault_param->lock); 1218 if (list_empty(¶m->fault_param->faults)) { 1219 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1220 goto done_unlock; 1221 } 1222 /* 1223 * Check if we have a matching page request pending to respond, 1224 * otherwise return -EINVAL 1225 */ 1226 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1227 prm = &evt->fault.prm; 1228 if (prm->grpid != msg->grpid) 1229 continue; 1230 1231 /* 1232 * If the PASID is required, the corresponding request is 1233 * matched using the group ID, the PASID valid bit and the PASID 1234 * value. Otherwise only the group ID matches request and 1235 * response. 1236 */ 1237 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1238 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1239 continue; 1240 1241 if (!needs_pasid && has_pasid) { 1242 /* No big deal, just clear it. */ 1243 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1244 msg->pasid = 0; 1245 } 1246 1247 ret = domain->ops->page_response(dev, evt, msg); 1248 list_del(&evt->list); 1249 kfree(evt); 1250 break; 1251 } 1252 1253 done_unlock: 1254 mutex_unlock(¶m->fault_param->lock); 1255 return ret; 1256 } 1257 EXPORT_SYMBOL_GPL(iommu_page_response); 1258 1259 /** 1260 * iommu_group_id - Return ID for a group 1261 * @group: the group to ID 1262 * 1263 * Return the unique ID for the group matching the sysfs group number. 1264 */ 1265 int iommu_group_id(struct iommu_group *group) 1266 { 1267 return group->id; 1268 } 1269 EXPORT_SYMBOL_GPL(iommu_group_id); 1270 1271 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1272 unsigned long *devfns); 1273 1274 /* 1275 * To consider a PCI device isolated, we require ACS to support Source 1276 * Validation, Request Redirection, Completer Redirection, and Upstream 1277 * Forwarding. This effectively means that devices cannot spoof their 1278 * requester ID, requests and completions cannot be redirected, and all 1279 * transactions are forwarded upstream, even as it passes through a 1280 * bridge where the target device is downstream. 1281 */ 1282 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1283 1284 /* 1285 * For multifunction devices which are not isolated from each other, find 1286 * all the other non-isolated functions and look for existing groups. For 1287 * each function, we also need to look for aliases to or from other devices 1288 * that may already have a group. 1289 */ 1290 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1291 unsigned long *devfns) 1292 { 1293 struct pci_dev *tmp = NULL; 1294 struct iommu_group *group; 1295 1296 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1297 return NULL; 1298 1299 for_each_pci_dev(tmp) { 1300 if (tmp == pdev || tmp->bus != pdev->bus || 1301 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1302 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1303 continue; 1304 1305 group = get_pci_alias_group(tmp, devfns); 1306 if (group) { 1307 pci_dev_put(tmp); 1308 return group; 1309 } 1310 } 1311 1312 return NULL; 1313 } 1314 1315 /* 1316 * Look for aliases to or from the given device for existing groups. DMA 1317 * aliases are only supported on the same bus, therefore the search 1318 * space is quite small (especially since we're really only looking at pcie 1319 * device, and therefore only expect multiple slots on the root complex or 1320 * downstream switch ports). It's conceivable though that a pair of 1321 * multifunction devices could have aliases between them that would cause a 1322 * loop. To prevent this, we use a bitmap to track where we've been. 1323 */ 1324 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1325 unsigned long *devfns) 1326 { 1327 struct pci_dev *tmp = NULL; 1328 struct iommu_group *group; 1329 1330 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1331 return NULL; 1332 1333 group = iommu_group_get(&pdev->dev); 1334 if (group) 1335 return group; 1336 1337 for_each_pci_dev(tmp) { 1338 if (tmp == pdev || tmp->bus != pdev->bus) 1339 continue; 1340 1341 /* We alias them or they alias us */ 1342 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1343 group = get_pci_alias_group(tmp, devfns); 1344 if (group) { 1345 pci_dev_put(tmp); 1346 return group; 1347 } 1348 1349 group = get_pci_function_alias_group(tmp, devfns); 1350 if (group) { 1351 pci_dev_put(tmp); 1352 return group; 1353 } 1354 } 1355 } 1356 1357 return NULL; 1358 } 1359 1360 struct group_for_pci_data { 1361 struct pci_dev *pdev; 1362 struct iommu_group *group; 1363 }; 1364 1365 /* 1366 * DMA alias iterator callback, return the last seen device. Stop and return 1367 * the IOMMU group if we find one along the way. 1368 */ 1369 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1370 { 1371 struct group_for_pci_data *data = opaque; 1372 1373 data->pdev = pdev; 1374 data->group = iommu_group_get(&pdev->dev); 1375 1376 return data->group != NULL; 1377 } 1378 1379 /* 1380 * Generic device_group call-back function. It just allocates one 1381 * iommu-group per device. 1382 */ 1383 struct iommu_group *generic_device_group(struct device *dev) 1384 { 1385 return iommu_group_alloc(); 1386 } 1387 EXPORT_SYMBOL_GPL(generic_device_group); 1388 1389 /* 1390 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1391 * to find or create an IOMMU group for a device. 1392 */ 1393 struct iommu_group *pci_device_group(struct device *dev) 1394 { 1395 struct pci_dev *pdev = to_pci_dev(dev); 1396 struct group_for_pci_data data; 1397 struct pci_bus *bus; 1398 struct iommu_group *group = NULL; 1399 u64 devfns[4] = { 0 }; 1400 1401 if (WARN_ON(!dev_is_pci(dev))) 1402 return ERR_PTR(-EINVAL); 1403 1404 /* 1405 * Find the upstream DMA alias for the device. A device must not 1406 * be aliased due to topology in order to have its own IOMMU group. 1407 * If we find an alias along the way that already belongs to a 1408 * group, use it. 1409 */ 1410 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1411 return data.group; 1412 1413 pdev = data.pdev; 1414 1415 /* 1416 * Continue upstream from the point of minimum IOMMU granularity 1417 * due to aliases to the point where devices are protected from 1418 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1419 * group, use it. 1420 */ 1421 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1422 if (!bus->self) 1423 continue; 1424 1425 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1426 break; 1427 1428 pdev = bus->self; 1429 1430 group = iommu_group_get(&pdev->dev); 1431 if (group) 1432 return group; 1433 } 1434 1435 /* 1436 * Look for existing groups on device aliases. If we alias another 1437 * device or another device aliases us, use the same group. 1438 */ 1439 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1440 if (group) 1441 return group; 1442 1443 /* 1444 * Look for existing groups on non-isolated functions on the same 1445 * slot and aliases of those funcions, if any. No need to clear 1446 * the search bitmap, the tested devfns are still valid. 1447 */ 1448 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1449 if (group) 1450 return group; 1451 1452 /* No shared group found, allocate new */ 1453 return iommu_group_alloc(); 1454 } 1455 EXPORT_SYMBOL_GPL(pci_device_group); 1456 1457 /* Get the IOMMU group for device on fsl-mc bus */ 1458 struct iommu_group *fsl_mc_device_group(struct device *dev) 1459 { 1460 struct device *cont_dev = fsl_mc_cont_dev(dev); 1461 struct iommu_group *group; 1462 1463 group = iommu_group_get(cont_dev); 1464 if (!group) 1465 group = iommu_group_alloc(); 1466 return group; 1467 } 1468 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1469 1470 static int iommu_get_def_domain_type(struct device *dev) 1471 { 1472 const struct iommu_ops *ops = dev->bus->iommu_ops; 1473 1474 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 1475 return IOMMU_DOMAIN_DMA; 1476 1477 if (ops->def_domain_type) 1478 return ops->def_domain_type(dev); 1479 1480 return 0; 1481 } 1482 1483 static int iommu_group_alloc_default_domain(struct bus_type *bus, 1484 struct iommu_group *group, 1485 unsigned int type) 1486 { 1487 struct iommu_domain *dom; 1488 1489 dom = __iommu_domain_alloc(bus, type); 1490 if (!dom && type != IOMMU_DOMAIN_DMA) { 1491 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA); 1492 if (dom) 1493 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1494 type, group->name); 1495 } 1496 1497 if (!dom) 1498 return -ENOMEM; 1499 1500 group->default_domain = dom; 1501 if (!group->domain) 1502 group->domain = dom; 1503 1504 if (!iommu_dma_strict) { 1505 int attr = 1; 1506 iommu_domain_set_attr(dom, 1507 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, 1508 &attr); 1509 } 1510 1511 return 0; 1512 } 1513 1514 static int iommu_alloc_default_domain(struct iommu_group *group, 1515 struct device *dev) 1516 { 1517 unsigned int type; 1518 1519 if (group->default_domain) 1520 return 0; 1521 1522 type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type; 1523 1524 return iommu_group_alloc_default_domain(dev->bus, group, type); 1525 } 1526 1527 /** 1528 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1529 * @dev: target device 1530 * 1531 * This function is intended to be called by IOMMU drivers and extended to 1532 * support common, bus-defined algorithms when determining or creating the 1533 * IOMMU group for a device. On success, the caller will hold a reference 1534 * to the returned IOMMU group, which will already include the provided 1535 * device. The reference should be released with iommu_group_put(). 1536 */ 1537 static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1538 { 1539 const struct iommu_ops *ops = dev->bus->iommu_ops; 1540 struct iommu_group *group; 1541 int ret; 1542 1543 group = iommu_group_get(dev); 1544 if (group) 1545 return group; 1546 1547 if (!ops) 1548 return ERR_PTR(-EINVAL); 1549 1550 group = ops->device_group(dev); 1551 if (WARN_ON_ONCE(group == NULL)) 1552 return ERR_PTR(-EINVAL); 1553 1554 if (IS_ERR(group)) 1555 return group; 1556 1557 ret = iommu_group_add_device(group, dev); 1558 if (ret) 1559 goto out_put_group; 1560 1561 return group; 1562 1563 out_put_group: 1564 iommu_group_put(group); 1565 1566 return ERR_PTR(ret); 1567 } 1568 1569 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1570 { 1571 return group->default_domain; 1572 } 1573 1574 static int probe_iommu_group(struct device *dev, void *data) 1575 { 1576 struct list_head *group_list = data; 1577 struct iommu_group *group; 1578 int ret; 1579 1580 /* Device is probed already if in a group */ 1581 group = iommu_group_get(dev); 1582 if (group) { 1583 iommu_group_put(group); 1584 return 0; 1585 } 1586 1587 ret = __iommu_probe_device(dev, group_list); 1588 if (ret == -ENODEV) 1589 ret = 0; 1590 1591 return ret; 1592 } 1593 1594 static int remove_iommu_group(struct device *dev, void *data) 1595 { 1596 iommu_release_device(dev); 1597 1598 return 0; 1599 } 1600 1601 static int iommu_bus_notifier(struct notifier_block *nb, 1602 unsigned long action, void *data) 1603 { 1604 unsigned long group_action = 0; 1605 struct device *dev = data; 1606 struct iommu_group *group; 1607 1608 /* 1609 * ADD/DEL call into iommu driver ops if provided, which may 1610 * result in ADD/DEL notifiers to group->notifier 1611 */ 1612 if (action == BUS_NOTIFY_ADD_DEVICE) { 1613 int ret; 1614 1615 ret = iommu_probe_device(dev); 1616 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1617 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1618 iommu_release_device(dev); 1619 return NOTIFY_OK; 1620 } 1621 1622 /* 1623 * Remaining BUS_NOTIFYs get filtered and republished to the 1624 * group, if anyone is listening 1625 */ 1626 group = iommu_group_get(dev); 1627 if (!group) 1628 return 0; 1629 1630 switch (action) { 1631 case BUS_NOTIFY_BIND_DRIVER: 1632 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; 1633 break; 1634 case BUS_NOTIFY_BOUND_DRIVER: 1635 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; 1636 break; 1637 case BUS_NOTIFY_UNBIND_DRIVER: 1638 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; 1639 break; 1640 case BUS_NOTIFY_UNBOUND_DRIVER: 1641 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; 1642 break; 1643 } 1644 1645 if (group_action) 1646 blocking_notifier_call_chain(&group->notifier, 1647 group_action, dev); 1648 1649 iommu_group_put(group); 1650 return 0; 1651 } 1652 1653 struct __group_domain_type { 1654 struct device *dev; 1655 unsigned int type; 1656 }; 1657 1658 static int probe_get_default_domain_type(struct device *dev, void *data) 1659 { 1660 struct __group_domain_type *gtype = data; 1661 unsigned int type = iommu_get_def_domain_type(dev); 1662 1663 if (type) { 1664 if (gtype->type && gtype->type != type) { 1665 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", 1666 iommu_domain_type_str(type), 1667 dev_name(gtype->dev), 1668 iommu_domain_type_str(gtype->type)); 1669 gtype->type = 0; 1670 } 1671 1672 if (!gtype->dev) { 1673 gtype->dev = dev; 1674 gtype->type = type; 1675 } 1676 } 1677 1678 return 0; 1679 } 1680 1681 static void probe_alloc_default_domain(struct bus_type *bus, 1682 struct iommu_group *group) 1683 { 1684 struct __group_domain_type gtype; 1685 1686 memset(>ype, 0, sizeof(gtype)); 1687 1688 /* Ask for default domain requirements of all devices in the group */ 1689 __iommu_group_for_each_dev(group, >ype, 1690 probe_get_default_domain_type); 1691 1692 if (!gtype.type) 1693 gtype.type = iommu_def_domain_type; 1694 1695 iommu_group_alloc_default_domain(bus, group, gtype.type); 1696 1697 } 1698 1699 static int iommu_group_do_dma_attach(struct device *dev, void *data) 1700 { 1701 struct iommu_domain *domain = data; 1702 int ret = 0; 1703 1704 if (!iommu_is_attach_deferred(domain, dev)) 1705 ret = __iommu_attach_device(domain, dev); 1706 1707 return ret; 1708 } 1709 1710 static int __iommu_group_dma_attach(struct iommu_group *group) 1711 { 1712 return __iommu_group_for_each_dev(group, group->default_domain, 1713 iommu_group_do_dma_attach); 1714 } 1715 1716 static int iommu_group_do_probe_finalize(struct device *dev, void *data) 1717 { 1718 struct iommu_domain *domain = data; 1719 1720 if (domain->ops->probe_finalize) 1721 domain->ops->probe_finalize(dev); 1722 1723 return 0; 1724 } 1725 1726 static void __iommu_group_dma_finalize(struct iommu_group *group) 1727 { 1728 __iommu_group_for_each_dev(group, group->default_domain, 1729 iommu_group_do_probe_finalize); 1730 } 1731 1732 static int iommu_do_create_direct_mappings(struct device *dev, void *data) 1733 { 1734 struct iommu_group *group = data; 1735 1736 iommu_create_device_direct_mappings(group, dev); 1737 1738 return 0; 1739 } 1740 1741 static int iommu_group_create_direct_mappings(struct iommu_group *group) 1742 { 1743 return __iommu_group_for_each_dev(group, group, 1744 iommu_do_create_direct_mappings); 1745 } 1746 1747 int bus_iommu_probe(struct bus_type *bus) 1748 { 1749 struct iommu_group *group, *next; 1750 LIST_HEAD(group_list); 1751 int ret; 1752 1753 /* 1754 * This code-path does not allocate the default domain when 1755 * creating the iommu group, so do it after the groups are 1756 * created. 1757 */ 1758 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1759 if (ret) 1760 return ret; 1761 1762 list_for_each_entry_safe(group, next, &group_list, entry) { 1763 /* Remove item from the list */ 1764 list_del_init(&group->entry); 1765 1766 mutex_lock(&group->mutex); 1767 1768 /* Try to allocate default domain */ 1769 probe_alloc_default_domain(bus, group); 1770 1771 if (!group->default_domain) { 1772 mutex_unlock(&group->mutex); 1773 continue; 1774 } 1775 1776 iommu_group_create_direct_mappings(group); 1777 1778 ret = __iommu_group_dma_attach(group); 1779 1780 mutex_unlock(&group->mutex); 1781 1782 if (ret) 1783 break; 1784 1785 __iommu_group_dma_finalize(group); 1786 } 1787 1788 return ret; 1789 } 1790 1791 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) 1792 { 1793 struct notifier_block *nb; 1794 int err; 1795 1796 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 1797 if (!nb) 1798 return -ENOMEM; 1799 1800 nb->notifier_call = iommu_bus_notifier; 1801 1802 err = bus_register_notifier(bus, nb); 1803 if (err) 1804 goto out_free; 1805 1806 err = bus_iommu_probe(bus); 1807 if (err) 1808 goto out_err; 1809 1810 1811 return 0; 1812 1813 out_err: 1814 /* Clean up */ 1815 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group); 1816 bus_unregister_notifier(bus, nb); 1817 1818 out_free: 1819 kfree(nb); 1820 1821 return err; 1822 } 1823 1824 /** 1825 * bus_set_iommu - set iommu-callbacks for the bus 1826 * @bus: bus. 1827 * @ops: the callbacks provided by the iommu-driver 1828 * 1829 * This function is called by an iommu driver to set the iommu methods 1830 * used for a particular bus. Drivers for devices on that bus can use 1831 * the iommu-api after these ops are registered. 1832 * This special function is needed because IOMMUs are usually devices on 1833 * the bus itself, so the iommu drivers are not initialized when the bus 1834 * is set up. With this function the iommu-driver can set the iommu-ops 1835 * afterwards. 1836 */ 1837 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) 1838 { 1839 int err; 1840 1841 if (ops == NULL) { 1842 bus->iommu_ops = NULL; 1843 return 0; 1844 } 1845 1846 if (bus->iommu_ops != NULL) 1847 return -EBUSY; 1848 1849 bus->iommu_ops = ops; 1850 1851 /* Do IOMMU specific setup for this bus-type */ 1852 err = iommu_bus_init(bus, ops); 1853 if (err) 1854 bus->iommu_ops = NULL; 1855 1856 return err; 1857 } 1858 EXPORT_SYMBOL_GPL(bus_set_iommu); 1859 1860 bool iommu_present(struct bus_type *bus) 1861 { 1862 return bus->iommu_ops != NULL; 1863 } 1864 EXPORT_SYMBOL_GPL(iommu_present); 1865 1866 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) 1867 { 1868 if (!bus->iommu_ops || !bus->iommu_ops->capable) 1869 return false; 1870 1871 return bus->iommu_ops->capable(cap); 1872 } 1873 EXPORT_SYMBOL_GPL(iommu_capable); 1874 1875 /** 1876 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1877 * @domain: iommu domain 1878 * @handler: fault handler 1879 * @token: user data, will be passed back to the fault handler 1880 * 1881 * This function should be used by IOMMU users which want to be notified 1882 * whenever an IOMMU fault happens. 1883 * 1884 * The fault handler itself should return 0 on success, and an appropriate 1885 * error code otherwise. 1886 */ 1887 void iommu_set_fault_handler(struct iommu_domain *domain, 1888 iommu_fault_handler_t handler, 1889 void *token) 1890 { 1891 BUG_ON(!domain); 1892 1893 domain->handler = handler; 1894 domain->handler_token = token; 1895 } 1896 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1897 1898 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 1899 unsigned type) 1900 { 1901 struct iommu_domain *domain; 1902 1903 if (bus == NULL || bus->iommu_ops == NULL) 1904 return NULL; 1905 1906 domain = bus->iommu_ops->domain_alloc(type); 1907 if (!domain) 1908 return NULL; 1909 1910 domain->ops = bus->iommu_ops; 1911 domain->type = type; 1912 /* Assume all sizes by default; the driver may override this later */ 1913 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1914 1915 return domain; 1916 } 1917 1918 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 1919 { 1920 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1921 } 1922 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1923 1924 void iommu_domain_free(struct iommu_domain *domain) 1925 { 1926 domain->ops->domain_free(domain); 1927 } 1928 EXPORT_SYMBOL_GPL(iommu_domain_free); 1929 1930 static int __iommu_attach_device(struct iommu_domain *domain, 1931 struct device *dev) 1932 { 1933 int ret; 1934 1935 if (unlikely(domain->ops->attach_dev == NULL)) 1936 return -ENODEV; 1937 1938 ret = domain->ops->attach_dev(domain, dev); 1939 if (!ret) 1940 trace_attach_device_to_domain(dev); 1941 return ret; 1942 } 1943 1944 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 1945 { 1946 struct iommu_group *group; 1947 int ret; 1948 1949 group = iommu_group_get(dev); 1950 if (!group) 1951 return -ENODEV; 1952 1953 /* 1954 * Lock the group to make sure the device-count doesn't 1955 * change while we are attaching 1956 */ 1957 mutex_lock(&group->mutex); 1958 ret = -EINVAL; 1959 if (iommu_group_device_count(group) != 1) 1960 goto out_unlock; 1961 1962 ret = __iommu_attach_group(domain, group); 1963 1964 out_unlock: 1965 mutex_unlock(&group->mutex); 1966 iommu_group_put(group); 1967 1968 return ret; 1969 } 1970 EXPORT_SYMBOL_GPL(iommu_attach_device); 1971 1972 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 1973 { 1974 const struct iommu_ops *ops = domain->ops; 1975 1976 if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev)) 1977 return __iommu_attach_device(domain, dev); 1978 1979 return 0; 1980 } 1981 1982 /* 1983 * Check flags and other user provided data for valid combinations. We also 1984 * make sure no reserved fields or unused flags are set. This is to ensure 1985 * not breaking userspace in the future when these fields or flags are used. 1986 */ 1987 static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info) 1988 { 1989 u32 mask; 1990 int i; 1991 1992 if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1) 1993 return -EINVAL; 1994 1995 mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1; 1996 if (info->cache & ~mask) 1997 return -EINVAL; 1998 1999 if (info->granularity >= IOMMU_INV_GRANU_NR) 2000 return -EINVAL; 2001 2002 switch (info->granularity) { 2003 case IOMMU_INV_GRANU_ADDR: 2004 if (info->cache & IOMMU_CACHE_INV_TYPE_PASID) 2005 return -EINVAL; 2006 2007 mask = IOMMU_INV_ADDR_FLAGS_PASID | 2008 IOMMU_INV_ADDR_FLAGS_ARCHID | 2009 IOMMU_INV_ADDR_FLAGS_LEAF; 2010 2011 if (info->granu.addr_info.flags & ~mask) 2012 return -EINVAL; 2013 break; 2014 case IOMMU_INV_GRANU_PASID: 2015 mask = IOMMU_INV_PASID_FLAGS_PASID | 2016 IOMMU_INV_PASID_FLAGS_ARCHID; 2017 if (info->granu.pasid_info.flags & ~mask) 2018 return -EINVAL; 2019 2020 break; 2021 case IOMMU_INV_GRANU_DOMAIN: 2022 if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) 2023 return -EINVAL; 2024 break; 2025 default: 2026 return -EINVAL; 2027 } 2028 2029 /* Check reserved padding fields */ 2030 for (i = 0; i < sizeof(info->padding); i++) { 2031 if (info->padding[i]) 2032 return -EINVAL; 2033 } 2034 2035 return 0; 2036 } 2037 2038 int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, 2039 void __user *uinfo) 2040 { 2041 struct iommu_cache_invalidate_info inv_info = { 0 }; 2042 u32 minsz; 2043 int ret; 2044 2045 if (unlikely(!domain->ops->cache_invalidate)) 2046 return -ENODEV; 2047 2048 /* 2049 * No new spaces can be added before the variable sized union, the 2050 * minimum size is the offset to the union. 2051 */ 2052 minsz = offsetof(struct iommu_cache_invalidate_info, granu); 2053 2054 /* Copy minsz from user to get flags and argsz */ 2055 if (copy_from_user(&inv_info, uinfo, minsz)) 2056 return -EFAULT; 2057 2058 /* Fields before the variable size union are mandatory */ 2059 if (inv_info.argsz < minsz) 2060 return -EINVAL; 2061 2062 /* PASID and address granu require additional info beyond minsz */ 2063 if (inv_info.granularity == IOMMU_INV_GRANU_PASID && 2064 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info)) 2065 return -EINVAL; 2066 2067 if (inv_info.granularity == IOMMU_INV_GRANU_ADDR && 2068 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info)) 2069 return -EINVAL; 2070 2071 /* 2072 * User might be using a newer UAPI header which has a larger data 2073 * size, we shall support the existing flags within the current 2074 * size. Copy the remaining user data _after_ minsz but not more 2075 * than the current kernel supported size. 2076 */ 2077 if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz, 2078 min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz)) 2079 return -EFAULT; 2080 2081 /* Now the argsz is validated, check the content */ 2082 ret = iommu_check_cache_invl_data(&inv_info); 2083 if (ret) 2084 return ret; 2085 2086 return domain->ops->cache_invalidate(domain, dev, &inv_info); 2087 } 2088 EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate); 2089 2090 static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data) 2091 { 2092 u64 mask; 2093 int i; 2094 2095 if (data->version != IOMMU_GPASID_BIND_VERSION_1) 2096 return -EINVAL; 2097 2098 /* Check the range of supported formats */ 2099 if (data->format >= IOMMU_PASID_FORMAT_LAST) 2100 return -EINVAL; 2101 2102 /* Check all flags */ 2103 mask = IOMMU_SVA_GPASID_VAL; 2104 if (data->flags & ~mask) 2105 return -EINVAL; 2106 2107 /* Check reserved padding fields */ 2108 for (i = 0; i < sizeof(data->padding); i++) { 2109 if (data->padding[i]) 2110 return -EINVAL; 2111 } 2112 2113 return 0; 2114 } 2115 2116 static int iommu_sva_prepare_bind_data(void __user *udata, 2117 struct iommu_gpasid_bind_data *data) 2118 { 2119 u32 minsz; 2120 2121 /* 2122 * No new spaces can be added before the variable sized union, the 2123 * minimum size is the offset to the union. 2124 */ 2125 minsz = offsetof(struct iommu_gpasid_bind_data, vendor); 2126 2127 /* Copy minsz from user to get flags and argsz */ 2128 if (copy_from_user(data, udata, minsz)) 2129 return -EFAULT; 2130 2131 /* Fields before the variable size union are mandatory */ 2132 if (data->argsz < minsz) 2133 return -EINVAL; 2134 /* 2135 * User might be using a newer UAPI header, we shall let IOMMU vendor 2136 * driver decide on what size it needs. Since the guest PASID bind data 2137 * can be vendor specific, larger argsz could be the result of extension 2138 * for one vendor but it should not affect another vendor. 2139 * Copy the remaining user data _after_ minsz 2140 */ 2141 if (copy_from_user((void *)data + minsz, udata + minsz, 2142 min_t(u32, data->argsz, sizeof(*data)) - minsz)) 2143 return -EFAULT; 2144 2145 return iommu_check_bind_data(data); 2146 } 2147 2148 int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, 2149 void __user *udata) 2150 { 2151 struct iommu_gpasid_bind_data data = { 0 }; 2152 int ret; 2153 2154 if (unlikely(!domain->ops->sva_bind_gpasid)) 2155 return -ENODEV; 2156 2157 ret = iommu_sva_prepare_bind_data(udata, &data); 2158 if (ret) 2159 return ret; 2160 2161 return domain->ops->sva_bind_gpasid(domain, dev, &data); 2162 } 2163 EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid); 2164 2165 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, 2166 ioasid_t pasid) 2167 { 2168 if (unlikely(!domain->ops->sva_unbind_gpasid)) 2169 return -ENODEV; 2170 2171 return domain->ops->sva_unbind_gpasid(dev, pasid); 2172 } 2173 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); 2174 2175 int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, 2176 void __user *udata) 2177 { 2178 struct iommu_gpasid_bind_data data = { 0 }; 2179 int ret; 2180 2181 if (unlikely(!domain->ops->sva_bind_gpasid)) 2182 return -ENODEV; 2183 2184 ret = iommu_sva_prepare_bind_data(udata, &data); 2185 if (ret) 2186 return ret; 2187 2188 return iommu_sva_unbind_gpasid(domain, dev, data.hpasid); 2189 } 2190 EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid); 2191 2192 static void __iommu_detach_device(struct iommu_domain *domain, 2193 struct device *dev) 2194 { 2195 if (iommu_is_attach_deferred(domain, dev)) 2196 return; 2197 2198 if (unlikely(domain->ops->detach_dev == NULL)) 2199 return; 2200 2201 domain->ops->detach_dev(domain, dev); 2202 trace_detach_device_from_domain(dev); 2203 } 2204 2205 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2206 { 2207 struct iommu_group *group; 2208 2209 group = iommu_group_get(dev); 2210 if (!group) 2211 return; 2212 2213 mutex_lock(&group->mutex); 2214 if (iommu_group_device_count(group) != 1) { 2215 WARN_ON(1); 2216 goto out_unlock; 2217 } 2218 2219 __iommu_detach_group(domain, group); 2220 2221 out_unlock: 2222 mutex_unlock(&group->mutex); 2223 iommu_group_put(group); 2224 } 2225 EXPORT_SYMBOL_GPL(iommu_detach_device); 2226 2227 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2228 { 2229 struct iommu_domain *domain; 2230 struct iommu_group *group; 2231 2232 group = iommu_group_get(dev); 2233 if (!group) 2234 return NULL; 2235 2236 domain = group->domain; 2237 2238 iommu_group_put(group); 2239 2240 return domain; 2241 } 2242 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2243 2244 /* 2245 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2246 * guarantees that the group and its default domain are valid and correct. 2247 */ 2248 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2249 { 2250 return dev->iommu_group->default_domain; 2251 } 2252 2253 /* 2254 * IOMMU groups are really the natural working unit of the IOMMU, but 2255 * the IOMMU API works on domains and devices. Bridge that gap by 2256 * iterating over the devices in a group. Ideally we'd have a single 2257 * device which represents the requestor ID of the group, but we also 2258 * allow IOMMU drivers to create policy defined minimum sets, where 2259 * the physical hardware may be able to distiguish members, but we 2260 * wish to group them at a higher level (ex. untrusted multi-function 2261 * PCI devices). Thus we attach each device. 2262 */ 2263 static int iommu_group_do_attach_device(struct device *dev, void *data) 2264 { 2265 struct iommu_domain *domain = data; 2266 2267 return __iommu_attach_device(domain, dev); 2268 } 2269 2270 static int __iommu_attach_group(struct iommu_domain *domain, 2271 struct iommu_group *group) 2272 { 2273 int ret; 2274 2275 if (group->default_domain && group->domain != group->default_domain) 2276 return -EBUSY; 2277 2278 ret = __iommu_group_for_each_dev(group, domain, 2279 iommu_group_do_attach_device); 2280 if (ret == 0) 2281 group->domain = domain; 2282 2283 return ret; 2284 } 2285 2286 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2287 { 2288 int ret; 2289 2290 mutex_lock(&group->mutex); 2291 ret = __iommu_attach_group(domain, group); 2292 mutex_unlock(&group->mutex); 2293 2294 return ret; 2295 } 2296 EXPORT_SYMBOL_GPL(iommu_attach_group); 2297 2298 static int iommu_group_do_detach_device(struct device *dev, void *data) 2299 { 2300 struct iommu_domain *domain = data; 2301 2302 __iommu_detach_device(domain, dev); 2303 2304 return 0; 2305 } 2306 2307 static void __iommu_detach_group(struct iommu_domain *domain, 2308 struct iommu_group *group) 2309 { 2310 int ret; 2311 2312 if (!group->default_domain) { 2313 __iommu_group_for_each_dev(group, domain, 2314 iommu_group_do_detach_device); 2315 group->domain = NULL; 2316 return; 2317 } 2318 2319 if (group->domain == group->default_domain) 2320 return; 2321 2322 /* Detach by re-attaching to the default domain */ 2323 ret = __iommu_group_for_each_dev(group, group->default_domain, 2324 iommu_group_do_attach_device); 2325 if (ret != 0) 2326 WARN_ON(1); 2327 else 2328 group->domain = group->default_domain; 2329 } 2330 2331 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2332 { 2333 mutex_lock(&group->mutex); 2334 __iommu_detach_group(domain, group); 2335 mutex_unlock(&group->mutex); 2336 } 2337 EXPORT_SYMBOL_GPL(iommu_detach_group); 2338 2339 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2340 { 2341 if (unlikely(domain->ops->iova_to_phys == NULL)) 2342 return 0; 2343 2344 return domain->ops->iova_to_phys(domain, iova); 2345 } 2346 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2347 2348 static size_t iommu_pgsize(struct iommu_domain *domain, 2349 unsigned long addr_merge, size_t size) 2350 { 2351 unsigned int pgsize_idx; 2352 size_t pgsize; 2353 2354 /* Max page size that still fits into 'size' */ 2355 pgsize_idx = __fls(size); 2356 2357 /* need to consider alignment requirements ? */ 2358 if (likely(addr_merge)) { 2359 /* Max page size allowed by address */ 2360 unsigned int align_pgsize_idx = __ffs(addr_merge); 2361 pgsize_idx = min(pgsize_idx, align_pgsize_idx); 2362 } 2363 2364 /* build a mask of acceptable page sizes */ 2365 pgsize = (1UL << (pgsize_idx + 1)) - 1; 2366 2367 /* throw away page sizes not supported by the hardware */ 2368 pgsize &= domain->pgsize_bitmap; 2369 2370 /* make sure we're still sane */ 2371 BUG_ON(!pgsize); 2372 2373 /* pick the biggest page */ 2374 pgsize_idx = __fls(pgsize); 2375 pgsize = 1UL << pgsize_idx; 2376 2377 return pgsize; 2378 } 2379 2380 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2381 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2382 { 2383 const struct iommu_ops *ops = domain->ops; 2384 unsigned long orig_iova = iova; 2385 unsigned int min_pagesz; 2386 size_t orig_size = size; 2387 phys_addr_t orig_paddr = paddr; 2388 int ret = 0; 2389 2390 if (unlikely(ops->map == NULL || 2391 domain->pgsize_bitmap == 0UL)) 2392 return -ENODEV; 2393 2394 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2395 return -EINVAL; 2396 2397 /* find out the minimum page size supported */ 2398 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2399 2400 /* 2401 * both the virtual address and the physical one, as well as 2402 * the size of the mapping, must be aligned (at least) to the 2403 * size of the smallest page supported by the hardware 2404 */ 2405 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2406 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2407 iova, &paddr, size, min_pagesz); 2408 return -EINVAL; 2409 } 2410 2411 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2412 2413 while (size) { 2414 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); 2415 2416 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", 2417 iova, &paddr, pgsize); 2418 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2419 2420 if (ret) 2421 break; 2422 2423 iova += pgsize; 2424 paddr += pgsize; 2425 size -= pgsize; 2426 } 2427 2428 /* unroll mapping in case something went wrong */ 2429 if (ret) 2430 iommu_unmap(domain, orig_iova, orig_size - size); 2431 else 2432 trace_map(orig_iova, orig_paddr, orig_size); 2433 2434 return ret; 2435 } 2436 2437 static int _iommu_map(struct iommu_domain *domain, unsigned long iova, 2438 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2439 { 2440 const struct iommu_ops *ops = domain->ops; 2441 int ret; 2442 2443 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2444 if (ret == 0 && ops->iotlb_sync_map) 2445 ops->iotlb_sync_map(domain, iova, size); 2446 2447 return ret; 2448 } 2449 2450 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2451 phys_addr_t paddr, size_t size, int prot) 2452 { 2453 might_sleep(); 2454 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); 2455 } 2456 EXPORT_SYMBOL_GPL(iommu_map); 2457 2458 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, 2459 phys_addr_t paddr, size_t size, int prot) 2460 { 2461 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); 2462 } 2463 EXPORT_SYMBOL_GPL(iommu_map_atomic); 2464 2465 static size_t __iommu_unmap(struct iommu_domain *domain, 2466 unsigned long iova, size_t size, 2467 struct iommu_iotlb_gather *iotlb_gather) 2468 { 2469 const struct iommu_ops *ops = domain->ops; 2470 size_t unmapped_page, unmapped = 0; 2471 unsigned long orig_iova = iova; 2472 unsigned int min_pagesz; 2473 2474 if (unlikely(ops->unmap == NULL || 2475 domain->pgsize_bitmap == 0UL)) 2476 return 0; 2477 2478 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2479 return 0; 2480 2481 /* find out the minimum page size supported */ 2482 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2483 2484 /* 2485 * The virtual address, as well as the size of the mapping, must be 2486 * aligned (at least) to the size of the smallest page supported 2487 * by the hardware 2488 */ 2489 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2490 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2491 iova, size, min_pagesz); 2492 return 0; 2493 } 2494 2495 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2496 2497 /* 2498 * Keep iterating until we either unmap 'size' bytes (or more) 2499 * or we hit an area that isn't mapped. 2500 */ 2501 while (unmapped < size) { 2502 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); 2503 2504 unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather); 2505 if (!unmapped_page) 2506 break; 2507 2508 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2509 iova, unmapped_page); 2510 2511 iova += unmapped_page; 2512 unmapped += unmapped_page; 2513 } 2514 2515 trace_unmap(orig_iova, size, unmapped); 2516 return unmapped; 2517 } 2518 2519 size_t iommu_unmap(struct iommu_domain *domain, 2520 unsigned long iova, size_t size) 2521 { 2522 struct iommu_iotlb_gather iotlb_gather; 2523 size_t ret; 2524 2525 iommu_iotlb_gather_init(&iotlb_gather); 2526 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2527 iommu_iotlb_sync(domain, &iotlb_gather); 2528 2529 return ret; 2530 } 2531 EXPORT_SYMBOL_GPL(iommu_unmap); 2532 2533 size_t iommu_unmap_fast(struct iommu_domain *domain, 2534 unsigned long iova, size_t size, 2535 struct iommu_iotlb_gather *iotlb_gather) 2536 { 2537 return __iommu_unmap(domain, iova, size, iotlb_gather); 2538 } 2539 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2540 2541 static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2542 struct scatterlist *sg, unsigned int nents, int prot, 2543 gfp_t gfp) 2544 { 2545 const struct iommu_ops *ops = domain->ops; 2546 size_t len = 0, mapped = 0; 2547 phys_addr_t start; 2548 unsigned int i = 0; 2549 int ret; 2550 2551 while (i <= nents) { 2552 phys_addr_t s_phys = sg_phys(sg); 2553 2554 if (len && s_phys != start + len) { 2555 ret = __iommu_map(domain, iova + mapped, start, 2556 len, prot, gfp); 2557 2558 if (ret) 2559 goto out_err; 2560 2561 mapped += len; 2562 len = 0; 2563 } 2564 2565 if (len) { 2566 len += sg->length; 2567 } else { 2568 len = sg->length; 2569 start = s_phys; 2570 } 2571 2572 if (++i < nents) 2573 sg = sg_next(sg); 2574 } 2575 2576 if (ops->iotlb_sync_map) 2577 ops->iotlb_sync_map(domain, iova, mapped); 2578 return mapped; 2579 2580 out_err: 2581 /* undo mappings already done */ 2582 iommu_unmap(domain, iova, mapped); 2583 2584 return 0; 2585 2586 } 2587 2588 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2589 struct scatterlist *sg, unsigned int nents, int prot) 2590 { 2591 might_sleep(); 2592 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); 2593 } 2594 EXPORT_SYMBOL_GPL(iommu_map_sg); 2595 2596 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, 2597 struct scatterlist *sg, unsigned int nents, int prot) 2598 { 2599 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); 2600 } 2601 2602 /** 2603 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2604 * @domain: the iommu domain where the fault has happened 2605 * @dev: the device where the fault has happened 2606 * @iova: the faulting address 2607 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2608 * 2609 * This function should be called by the low-level IOMMU implementations 2610 * whenever IOMMU faults happen, to allow high-level users, that are 2611 * interested in such events, to know about them. 2612 * 2613 * This event may be useful for several possible use cases: 2614 * - mere logging of the event 2615 * - dynamic TLB/PTE loading 2616 * - if restarting of the faulting device is required 2617 * 2618 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2619 * PTE/TLB loading will one day be supported, implementations will be able 2620 * to tell whether it succeeded or not according to this return value). 2621 * 2622 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2623 * (though fault handlers can also return -ENOSYS, in case they want to 2624 * elicit the default behavior of the IOMMU drivers). 2625 */ 2626 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2627 unsigned long iova, int flags) 2628 { 2629 int ret = -ENOSYS; 2630 2631 /* 2632 * if upper layers showed interest and installed a fault handler, 2633 * invoke it. 2634 */ 2635 if (domain->handler) 2636 ret = domain->handler(domain, dev, iova, flags, 2637 domain->handler_token); 2638 2639 trace_io_page_fault(dev, iova, flags); 2640 return ret; 2641 } 2642 EXPORT_SYMBOL_GPL(report_iommu_fault); 2643 2644 static int __init iommu_init(void) 2645 { 2646 iommu_group_kset = kset_create_and_add("iommu_groups", 2647 NULL, kernel_kobj); 2648 BUG_ON(!iommu_group_kset); 2649 2650 iommu_debugfs_setup(); 2651 2652 return 0; 2653 } 2654 core_initcall(iommu_init); 2655 2656 int iommu_domain_get_attr(struct iommu_domain *domain, 2657 enum iommu_attr attr, void *data) 2658 { 2659 if (!domain->ops->domain_get_attr) 2660 return -EINVAL; 2661 return domain->ops->domain_get_attr(domain, attr, data); 2662 } 2663 EXPORT_SYMBOL_GPL(iommu_domain_get_attr); 2664 2665 int iommu_domain_set_attr(struct iommu_domain *domain, 2666 enum iommu_attr attr, void *data) 2667 { 2668 int ret = 0; 2669 2670 switch (attr) { 2671 default: 2672 if (domain->ops->domain_set_attr == NULL) 2673 return -EINVAL; 2674 2675 ret = domain->ops->domain_set_attr(domain, attr, data); 2676 } 2677 2678 return ret; 2679 } 2680 EXPORT_SYMBOL_GPL(iommu_domain_set_attr); 2681 2682 int iommu_enable_nesting(struct iommu_domain *domain) 2683 { 2684 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2685 return -EINVAL; 2686 if (!domain->ops->enable_nesting) 2687 return -EINVAL; 2688 return domain->ops->enable_nesting(domain); 2689 } 2690 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2691 2692 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2693 { 2694 const struct iommu_ops *ops = dev->bus->iommu_ops; 2695 2696 if (ops && ops->get_resv_regions) 2697 ops->get_resv_regions(dev, list); 2698 } 2699 2700 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2701 { 2702 const struct iommu_ops *ops = dev->bus->iommu_ops; 2703 2704 if (ops && ops->put_resv_regions) 2705 ops->put_resv_regions(dev, list); 2706 } 2707 2708 /** 2709 * generic_iommu_put_resv_regions - Reserved region driver helper 2710 * @dev: device for which to free reserved regions 2711 * @list: reserved region list for device 2712 * 2713 * IOMMU drivers can use this to implement their .put_resv_regions() callback 2714 * for simple reservations. Memory allocated for each reserved region will be 2715 * freed. If an IOMMU driver allocates additional resources per region, it is 2716 * going to have to implement a custom callback. 2717 */ 2718 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list) 2719 { 2720 struct iommu_resv_region *entry, *next; 2721 2722 list_for_each_entry_safe(entry, next, list, list) 2723 kfree(entry); 2724 } 2725 EXPORT_SYMBOL(generic_iommu_put_resv_regions); 2726 2727 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2728 size_t length, int prot, 2729 enum iommu_resv_type type) 2730 { 2731 struct iommu_resv_region *region; 2732 2733 region = kzalloc(sizeof(*region), GFP_KERNEL); 2734 if (!region) 2735 return NULL; 2736 2737 INIT_LIST_HEAD(®ion->list); 2738 region->start = start; 2739 region->length = length; 2740 region->prot = prot; 2741 region->type = type; 2742 return region; 2743 } 2744 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2745 2746 void iommu_set_default_passthrough(bool cmd_line) 2747 { 2748 if (cmd_line) 2749 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2750 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2751 } 2752 2753 void iommu_set_default_translated(bool cmd_line) 2754 { 2755 if (cmd_line) 2756 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2757 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2758 } 2759 2760 bool iommu_default_passthrough(void) 2761 { 2762 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2763 } 2764 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2765 2766 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2767 { 2768 const struct iommu_ops *ops = NULL; 2769 struct iommu_device *iommu; 2770 2771 spin_lock(&iommu_device_lock); 2772 list_for_each_entry(iommu, &iommu_device_list, list) 2773 if (iommu->fwnode == fwnode) { 2774 ops = iommu->ops; 2775 break; 2776 } 2777 spin_unlock(&iommu_device_lock); 2778 return ops; 2779 } 2780 2781 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2782 const struct iommu_ops *ops) 2783 { 2784 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2785 2786 if (fwspec) 2787 return ops == fwspec->ops ? 0 : -EINVAL; 2788 2789 if (!dev_iommu_get(dev)) 2790 return -ENOMEM; 2791 2792 /* Preallocate for the overwhelmingly common case of 1 ID */ 2793 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2794 if (!fwspec) 2795 return -ENOMEM; 2796 2797 of_node_get(to_of_node(iommu_fwnode)); 2798 fwspec->iommu_fwnode = iommu_fwnode; 2799 fwspec->ops = ops; 2800 dev_iommu_fwspec_set(dev, fwspec); 2801 return 0; 2802 } 2803 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2804 2805 void iommu_fwspec_free(struct device *dev) 2806 { 2807 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2808 2809 if (fwspec) { 2810 fwnode_handle_put(fwspec->iommu_fwnode); 2811 kfree(fwspec); 2812 dev_iommu_fwspec_set(dev, NULL); 2813 } 2814 } 2815 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2816 2817 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2818 { 2819 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2820 int i, new_num; 2821 2822 if (!fwspec) 2823 return -EINVAL; 2824 2825 new_num = fwspec->num_ids + num_ids; 2826 if (new_num > 1) { 2827 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2828 GFP_KERNEL); 2829 if (!fwspec) 2830 return -ENOMEM; 2831 2832 dev_iommu_fwspec_set(dev, fwspec); 2833 } 2834 2835 for (i = 0; i < num_ids; i++) 2836 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2837 2838 fwspec->num_ids = new_num; 2839 return 0; 2840 } 2841 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2842 2843 /* 2844 * Per device IOMMU features. 2845 */ 2846 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2847 { 2848 if (dev->iommu && dev->iommu->iommu_dev) { 2849 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2850 2851 if (ops->dev_enable_feat) 2852 return ops->dev_enable_feat(dev, feat); 2853 } 2854 2855 return -ENODEV; 2856 } 2857 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2858 2859 /* 2860 * The device drivers should do the necessary cleanups before calling this. 2861 * For example, before disabling the aux-domain feature, the device driver 2862 * should detach all aux-domains. Otherwise, this will return -EBUSY. 2863 */ 2864 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2865 { 2866 if (dev->iommu && dev->iommu->iommu_dev) { 2867 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2868 2869 if (ops->dev_disable_feat) 2870 return ops->dev_disable_feat(dev, feat); 2871 } 2872 2873 return -EBUSY; 2874 } 2875 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2876 2877 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) 2878 { 2879 if (dev->iommu && dev->iommu->iommu_dev) { 2880 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2881 2882 if (ops->dev_feat_enabled) 2883 return ops->dev_feat_enabled(dev, feat); 2884 } 2885 2886 return false; 2887 } 2888 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled); 2889 2890 /* 2891 * Aux-domain specific attach/detach. 2892 * 2893 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns 2894 * true. Also, as long as domains are attached to a device through this 2895 * interface, any tries to call iommu_attach_device() should fail 2896 * (iommu_detach_device() can't fail, so we fail when trying to re-attach). 2897 * This should make us safe against a device being attached to a guest as a 2898 * whole while there are still pasid users on it (aux and sva). 2899 */ 2900 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) 2901 { 2902 int ret = -ENODEV; 2903 2904 if (domain->ops->aux_attach_dev) 2905 ret = domain->ops->aux_attach_dev(domain, dev); 2906 2907 if (!ret) 2908 trace_attach_device_to_domain(dev); 2909 2910 return ret; 2911 } 2912 EXPORT_SYMBOL_GPL(iommu_aux_attach_device); 2913 2914 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) 2915 { 2916 if (domain->ops->aux_detach_dev) { 2917 domain->ops->aux_detach_dev(domain, dev); 2918 trace_detach_device_from_domain(dev); 2919 } 2920 } 2921 EXPORT_SYMBOL_GPL(iommu_aux_detach_device); 2922 2923 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) 2924 { 2925 int ret = -ENODEV; 2926 2927 if (domain->ops->aux_get_pasid) 2928 ret = domain->ops->aux_get_pasid(domain, dev); 2929 2930 return ret; 2931 } 2932 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); 2933 2934 /** 2935 * iommu_sva_bind_device() - Bind a process address space to a device 2936 * @dev: the device 2937 * @mm: the mm to bind, caller must hold a reference to it 2938 * 2939 * Create a bond between device and address space, allowing the device to access 2940 * the mm using the returned PASID. If a bond already exists between @device and 2941 * @mm, it is returned and an additional reference is taken. Caller must call 2942 * iommu_sva_unbind_device() to release each reference. 2943 * 2944 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to 2945 * initialize the required SVA features. 2946 * 2947 * On error, returns an ERR_PTR value. 2948 */ 2949 struct iommu_sva * 2950 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) 2951 { 2952 struct iommu_group *group; 2953 struct iommu_sva *handle = ERR_PTR(-EINVAL); 2954 const struct iommu_ops *ops = dev->bus->iommu_ops; 2955 2956 if (!ops || !ops->sva_bind) 2957 return ERR_PTR(-ENODEV); 2958 2959 group = iommu_group_get(dev); 2960 if (!group) 2961 return ERR_PTR(-ENODEV); 2962 2963 /* Ensure device count and domain don't change while we're binding */ 2964 mutex_lock(&group->mutex); 2965 2966 /* 2967 * To keep things simple, SVA currently doesn't support IOMMU groups 2968 * with more than one device. Existing SVA-capable systems are not 2969 * affected by the problems that required IOMMU groups (lack of ACS 2970 * isolation, device ID aliasing and other hardware issues). 2971 */ 2972 if (iommu_group_device_count(group) != 1) 2973 goto out_unlock; 2974 2975 handle = ops->sva_bind(dev, mm, drvdata); 2976 2977 out_unlock: 2978 mutex_unlock(&group->mutex); 2979 iommu_group_put(group); 2980 2981 return handle; 2982 } 2983 EXPORT_SYMBOL_GPL(iommu_sva_bind_device); 2984 2985 /** 2986 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device 2987 * @handle: the handle returned by iommu_sva_bind_device() 2988 * 2989 * Put reference to a bond between device and address space. The device should 2990 * not be issuing any more transaction for this PASID. All outstanding page 2991 * requests for this PASID must have been flushed to the IOMMU. 2992 */ 2993 void iommu_sva_unbind_device(struct iommu_sva *handle) 2994 { 2995 struct iommu_group *group; 2996 struct device *dev = handle->dev; 2997 const struct iommu_ops *ops = dev->bus->iommu_ops; 2998 2999 if (!ops || !ops->sva_unbind) 3000 return; 3001 3002 group = iommu_group_get(dev); 3003 if (!group) 3004 return; 3005 3006 mutex_lock(&group->mutex); 3007 ops->sva_unbind(handle); 3008 mutex_unlock(&group->mutex); 3009 3010 iommu_group_put(group); 3011 } 3012 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); 3013 3014 u32 iommu_sva_get_pasid(struct iommu_sva *handle) 3015 { 3016 const struct iommu_ops *ops = handle->dev->bus->iommu_ops; 3017 3018 if (!ops || !ops->sva_get_pasid) 3019 return IOMMU_PASID_INVALID; 3020 3021 return ops->sva_get_pasid(handle); 3022 } 3023 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); 3024 3025 /* 3026 * Changes the default domain of an iommu group that has *only* one device 3027 * 3028 * @group: The group for which the default domain should be changed 3029 * @prev_dev: The device in the group (this is used to make sure that the device 3030 * hasn't changed after the caller has called this function) 3031 * @type: The type of the new default domain that gets associated with the group 3032 * 3033 * Returns 0 on success and error code on failure 3034 * 3035 * Note: 3036 * 1. Presently, this function is called only when user requests to change the 3037 * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type 3038 * Please take a closer look if intended to use for other purposes. 3039 */ 3040 static int iommu_change_dev_def_domain(struct iommu_group *group, 3041 struct device *prev_dev, int type) 3042 { 3043 struct iommu_domain *prev_dom; 3044 struct group_device *grp_dev; 3045 int ret, dev_def_dom; 3046 struct device *dev; 3047 3048 if (!group) 3049 return -EINVAL; 3050 3051 mutex_lock(&group->mutex); 3052 3053 if (group->default_domain != group->domain) { 3054 dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n"); 3055 ret = -EBUSY; 3056 goto out; 3057 } 3058 3059 /* 3060 * iommu group wasn't locked while acquiring device lock in 3061 * iommu_group_store_type(). So, make sure that the device count hasn't 3062 * changed while acquiring device lock. 3063 * 3064 * Changing default domain of an iommu group with two or more devices 3065 * isn't supported because there could be a potential deadlock. Consider 3066 * the following scenario. T1 is trying to acquire device locks of all 3067 * the devices in the group and before it could acquire all of them, 3068 * there could be another thread T2 (from different sub-system and use 3069 * case) that has already acquired some of the device locks and might be 3070 * waiting for T1 to release other device locks. 3071 */ 3072 if (iommu_group_device_count(group) != 1) { 3073 dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n"); 3074 ret = -EINVAL; 3075 goto out; 3076 } 3077 3078 /* Since group has only one device */ 3079 grp_dev = list_first_entry(&group->devices, struct group_device, list); 3080 dev = grp_dev->dev; 3081 3082 if (prev_dev != dev) { 3083 dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n"); 3084 ret = -EBUSY; 3085 goto out; 3086 } 3087 3088 prev_dom = group->default_domain; 3089 if (!prev_dom) { 3090 ret = -EINVAL; 3091 goto out; 3092 } 3093 3094 dev_def_dom = iommu_get_def_domain_type(dev); 3095 if (!type) { 3096 /* 3097 * If the user hasn't requested any specific type of domain and 3098 * if the device supports both the domains, then default to the 3099 * domain the device was booted with 3100 */ 3101 type = dev_def_dom ? : iommu_def_domain_type; 3102 } else if (dev_def_dom && type != dev_def_dom) { 3103 dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n", 3104 iommu_domain_type_str(type)); 3105 ret = -EINVAL; 3106 goto out; 3107 } 3108 3109 /* 3110 * Switch to a new domain only if the requested domain type is different 3111 * from the existing default domain type 3112 */ 3113 if (prev_dom->type == type) { 3114 ret = 0; 3115 goto out; 3116 } 3117 3118 /* Sets group->default_domain to the newly allocated domain */ 3119 ret = iommu_group_alloc_default_domain(dev->bus, group, type); 3120 if (ret) 3121 goto out; 3122 3123 ret = iommu_create_device_direct_mappings(group, dev); 3124 if (ret) 3125 goto free_new_domain; 3126 3127 ret = __iommu_attach_device(group->default_domain, dev); 3128 if (ret) 3129 goto free_new_domain; 3130 3131 group->domain = group->default_domain; 3132 3133 /* 3134 * Release the mutex here because ops->probe_finalize() call-back of 3135 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 3136 * in-turn might call back into IOMMU core code, where it tries to take 3137 * group->mutex, resulting in a deadlock. 3138 */ 3139 mutex_unlock(&group->mutex); 3140 3141 /* Make sure dma_ops is appropriatley set */ 3142 iommu_group_do_probe_finalize(dev, group->default_domain); 3143 iommu_domain_free(prev_dom); 3144 return 0; 3145 3146 free_new_domain: 3147 iommu_domain_free(group->default_domain); 3148 group->default_domain = prev_dom; 3149 group->domain = prev_dom; 3150 3151 out: 3152 mutex_unlock(&group->mutex); 3153 3154 return ret; 3155 } 3156 3157 /* 3158 * Changing the default domain through sysfs requires the users to ubind the 3159 * drivers from the devices in the iommu group. Return failure if this doesn't 3160 * meet. 3161 * 3162 * We need to consider the race between this and the device release path. 3163 * device_lock(dev) is used here to guarantee that the device release path 3164 * will not be entered at the same time. 3165 */ 3166 static ssize_t iommu_group_store_type(struct iommu_group *group, 3167 const char *buf, size_t count) 3168 { 3169 struct group_device *grp_dev; 3170 struct device *dev; 3171 int ret, req_type; 3172 3173 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 3174 return -EACCES; 3175 3176 if (WARN_ON(!group)) 3177 return -EINVAL; 3178 3179 if (sysfs_streq(buf, "identity")) 3180 req_type = IOMMU_DOMAIN_IDENTITY; 3181 else if (sysfs_streq(buf, "DMA")) 3182 req_type = IOMMU_DOMAIN_DMA; 3183 else if (sysfs_streq(buf, "auto")) 3184 req_type = 0; 3185 else 3186 return -EINVAL; 3187 3188 /* 3189 * Lock/Unlock the group mutex here before device lock to 3190 * 1. Make sure that the iommu group has only one device (this is a 3191 * prerequisite for step 2) 3192 * 2. Get struct *dev which is needed to lock device 3193 */ 3194 mutex_lock(&group->mutex); 3195 if (iommu_group_device_count(group) != 1) { 3196 mutex_unlock(&group->mutex); 3197 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n"); 3198 return -EINVAL; 3199 } 3200 3201 /* Since group has only one device */ 3202 grp_dev = list_first_entry(&group->devices, struct group_device, list); 3203 dev = grp_dev->dev; 3204 get_device(dev); 3205 3206 /* 3207 * Don't hold the group mutex because taking group mutex first and then 3208 * the device lock could potentially cause a deadlock as below. Assume 3209 * two threads T1 and T2. T1 is trying to change default domain of an 3210 * iommu group and T2 is trying to hot unplug a device or release [1] VF 3211 * of a PCIe device which is in the same iommu group. T1 takes group 3212 * mutex and before it could take device lock assume T2 has taken device 3213 * lock and is yet to take group mutex. Now, both the threads will be 3214 * waiting for the other thread to release lock. Below, lock order was 3215 * suggested. 3216 * device_lock(dev); 3217 * mutex_lock(&group->mutex); 3218 * iommu_change_dev_def_domain(); 3219 * mutex_unlock(&group->mutex); 3220 * device_unlock(dev); 3221 * 3222 * [1] Typical device release path 3223 * device_lock() from device/driver core code 3224 * -> bus_notifier() 3225 * -> iommu_bus_notifier() 3226 * -> iommu_release_device() 3227 * -> ops->release_device() vendor driver calls back iommu core code 3228 * -> mutex_lock() from iommu core code 3229 */ 3230 mutex_unlock(&group->mutex); 3231 3232 /* Check if the device in the group still has a driver bound to it */ 3233 device_lock(dev); 3234 if (device_is_bound(dev)) { 3235 pr_err_ratelimited("Device is still bound to driver\n"); 3236 ret = -EBUSY; 3237 goto out; 3238 } 3239 3240 ret = iommu_change_dev_def_domain(group, dev, req_type); 3241 ret = ret ?: count; 3242 3243 out: 3244 device_unlock(dev); 3245 put_device(dev); 3246 3247 return ret; 3248 } 3249