1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/device.h> 10 #include <linux/kernel.h> 11 #include <linux/bug.h> 12 #include <linux/types.h> 13 #include <linux/init.h> 14 #include <linux/export.h> 15 #include <linux/slab.h> 16 #include <linux/errno.h> 17 #include <linux/iommu.h> 18 #include <linux/idr.h> 19 #include <linux/notifier.h> 20 #include <linux/err.h> 21 #include <linux/pci.h> 22 #include <linux/bitops.h> 23 #include <linux/property.h> 24 #include <linux/fsl/mc.h> 25 #include <linux/module.h> 26 #include <trace/events/iommu.h> 27 28 static struct kset *iommu_group_kset; 29 static DEFINE_IDA(iommu_group_ida); 30 31 static unsigned int iommu_def_domain_type __read_mostly; 32 static bool iommu_dma_strict __read_mostly = true; 33 static u32 iommu_cmd_line __read_mostly; 34 35 struct iommu_group { 36 struct kobject kobj; 37 struct kobject *devices_kobj; 38 struct list_head devices; 39 struct mutex mutex; 40 struct blocking_notifier_head notifier; 41 void *iommu_data; 42 void (*iommu_data_release)(void *iommu_data); 43 char *name; 44 int id; 45 struct iommu_domain *default_domain; 46 struct iommu_domain *domain; 47 struct list_head entry; 48 }; 49 50 struct group_device { 51 struct list_head list; 52 struct device *dev; 53 char *name; 54 }; 55 56 struct iommu_group_attribute { 57 struct attribute attr; 58 ssize_t (*show)(struct iommu_group *group, char *buf); 59 ssize_t (*store)(struct iommu_group *group, 60 const char *buf, size_t count); 61 }; 62 63 static const char * const iommu_group_resv_type_string[] = { 64 [IOMMU_RESV_DIRECT] = "direct", 65 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 66 [IOMMU_RESV_RESERVED] = "reserved", 67 [IOMMU_RESV_MSI] = "msi", 68 [IOMMU_RESV_SW_MSI] = "msi", 69 }; 70 71 #define IOMMU_CMD_LINE_DMA_API BIT(0) 72 #define IOMMU_CMD_LINE_STRICT BIT(1) 73 74 static int iommu_alloc_default_domain(struct iommu_group *group, 75 struct device *dev); 76 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 77 unsigned type); 78 static int __iommu_attach_device(struct iommu_domain *domain, 79 struct device *dev); 80 static int __iommu_attach_group(struct iommu_domain *domain, 81 struct iommu_group *group); 82 static void __iommu_detach_group(struct iommu_domain *domain, 83 struct iommu_group *group); 84 static int iommu_create_device_direct_mappings(struct iommu_group *group, 85 struct device *dev); 86 static struct iommu_group *iommu_group_get_for_dev(struct device *dev); 87 static ssize_t iommu_group_store_type(struct iommu_group *group, 88 const char *buf, size_t count); 89 90 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 91 struct iommu_group_attribute iommu_group_attr_##_name = \ 92 __ATTR(_name, _mode, _show, _store) 93 94 #define to_iommu_group_attr(_attr) \ 95 container_of(_attr, struct iommu_group_attribute, attr) 96 #define to_iommu_group(_kobj) \ 97 container_of(_kobj, struct iommu_group, kobj) 98 99 static LIST_HEAD(iommu_device_list); 100 static DEFINE_SPINLOCK(iommu_device_lock); 101 102 /* 103 * Use a function instead of an array here because the domain-type is a 104 * bit-field, so an array would waste memory. 105 */ 106 static const char *iommu_domain_type_str(unsigned int t) 107 { 108 switch (t) { 109 case IOMMU_DOMAIN_BLOCKED: 110 return "Blocked"; 111 case IOMMU_DOMAIN_IDENTITY: 112 return "Passthrough"; 113 case IOMMU_DOMAIN_UNMANAGED: 114 return "Unmanaged"; 115 case IOMMU_DOMAIN_DMA: 116 return "Translated"; 117 default: 118 return "Unknown"; 119 } 120 } 121 122 static int __init iommu_subsys_init(void) 123 { 124 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { 125 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 126 iommu_set_default_passthrough(false); 127 else 128 iommu_set_default_translated(false); 129 130 if (iommu_default_passthrough() && mem_encrypt_active()) { 131 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 132 iommu_set_default_translated(false); 133 } 134 } 135 136 pr_info("Default domain type: %s %s\n", 137 iommu_domain_type_str(iommu_def_domain_type), 138 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? 139 "(set via kernel command line)" : ""); 140 141 return 0; 142 } 143 subsys_initcall(iommu_subsys_init); 144 145 int iommu_device_register(struct iommu_device *iommu) 146 { 147 spin_lock(&iommu_device_lock); 148 list_add_tail(&iommu->list, &iommu_device_list); 149 spin_unlock(&iommu_device_lock); 150 return 0; 151 } 152 EXPORT_SYMBOL_GPL(iommu_device_register); 153 154 void iommu_device_unregister(struct iommu_device *iommu) 155 { 156 spin_lock(&iommu_device_lock); 157 list_del(&iommu->list); 158 spin_unlock(&iommu_device_lock); 159 } 160 EXPORT_SYMBOL_GPL(iommu_device_unregister); 161 162 static struct dev_iommu *dev_iommu_get(struct device *dev) 163 { 164 struct dev_iommu *param = dev->iommu; 165 166 if (param) 167 return param; 168 169 param = kzalloc(sizeof(*param), GFP_KERNEL); 170 if (!param) 171 return NULL; 172 173 mutex_init(¶m->lock); 174 dev->iommu = param; 175 return param; 176 } 177 178 static void dev_iommu_free(struct device *dev) 179 { 180 iommu_fwspec_free(dev); 181 kfree(dev->iommu); 182 dev->iommu = NULL; 183 } 184 185 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 186 { 187 const struct iommu_ops *ops = dev->bus->iommu_ops; 188 struct iommu_device *iommu_dev; 189 struct iommu_group *group; 190 int ret; 191 192 if (!ops) 193 return -ENODEV; 194 195 if (!dev_iommu_get(dev)) 196 return -ENOMEM; 197 198 if (!try_module_get(ops->owner)) { 199 ret = -EINVAL; 200 goto err_free; 201 } 202 203 iommu_dev = ops->probe_device(dev); 204 if (IS_ERR(iommu_dev)) { 205 ret = PTR_ERR(iommu_dev); 206 goto out_module_put; 207 } 208 209 dev->iommu->iommu_dev = iommu_dev; 210 211 group = iommu_group_get_for_dev(dev); 212 if (IS_ERR(group)) { 213 ret = PTR_ERR(group); 214 goto out_release; 215 } 216 iommu_group_put(group); 217 218 if (group_list && !group->default_domain && list_empty(&group->entry)) 219 list_add_tail(&group->entry, group_list); 220 221 iommu_device_link(iommu_dev, dev); 222 223 return 0; 224 225 out_release: 226 ops->release_device(dev); 227 228 out_module_put: 229 module_put(ops->owner); 230 231 err_free: 232 dev_iommu_free(dev); 233 234 return ret; 235 } 236 237 int iommu_probe_device(struct device *dev) 238 { 239 const struct iommu_ops *ops = dev->bus->iommu_ops; 240 struct iommu_group *group; 241 int ret; 242 243 ret = __iommu_probe_device(dev, NULL); 244 if (ret) 245 goto err_out; 246 247 group = iommu_group_get(dev); 248 if (!group) { 249 ret = -ENODEV; 250 goto err_release; 251 } 252 253 /* 254 * Try to allocate a default domain - needs support from the 255 * IOMMU driver. There are still some drivers which don't 256 * support default domains, so the return value is not yet 257 * checked. 258 */ 259 iommu_alloc_default_domain(group, dev); 260 261 if (group->default_domain) { 262 ret = __iommu_attach_device(group->default_domain, dev); 263 if (ret) { 264 iommu_group_put(group); 265 goto err_release; 266 } 267 } 268 269 iommu_create_device_direct_mappings(group, dev); 270 271 iommu_group_put(group); 272 273 if (ops->probe_finalize) 274 ops->probe_finalize(dev); 275 276 return 0; 277 278 err_release: 279 iommu_release_device(dev); 280 281 err_out: 282 return ret; 283 284 } 285 286 void iommu_release_device(struct device *dev) 287 { 288 const struct iommu_ops *ops = dev->bus->iommu_ops; 289 290 if (!dev->iommu) 291 return; 292 293 iommu_device_unlink(dev->iommu->iommu_dev, dev); 294 295 ops->release_device(dev); 296 297 iommu_group_remove_device(dev); 298 module_put(ops->owner); 299 dev_iommu_free(dev); 300 } 301 302 static int __init iommu_set_def_domain_type(char *str) 303 { 304 bool pt; 305 int ret; 306 307 ret = kstrtobool(str, &pt); 308 if (ret) 309 return ret; 310 311 if (pt) 312 iommu_set_default_passthrough(true); 313 else 314 iommu_set_default_translated(true); 315 316 return 0; 317 } 318 early_param("iommu.passthrough", iommu_set_def_domain_type); 319 320 static int __init iommu_dma_setup(char *str) 321 { 322 int ret = kstrtobool(str, &iommu_dma_strict); 323 324 if (!ret) 325 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; 326 return ret; 327 } 328 early_param("iommu.strict", iommu_dma_setup); 329 330 void iommu_set_dma_strict(bool strict) 331 { 332 if (strict || !(iommu_cmd_line & IOMMU_CMD_LINE_STRICT)) 333 iommu_dma_strict = strict; 334 } 335 336 bool iommu_get_dma_strict(struct iommu_domain *domain) 337 { 338 /* only allow lazy flushing for DMA domains */ 339 if (domain->type == IOMMU_DOMAIN_DMA) 340 return iommu_dma_strict; 341 return true; 342 } 343 EXPORT_SYMBOL_GPL(iommu_get_dma_strict); 344 345 static ssize_t iommu_group_attr_show(struct kobject *kobj, 346 struct attribute *__attr, char *buf) 347 { 348 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 349 struct iommu_group *group = to_iommu_group(kobj); 350 ssize_t ret = -EIO; 351 352 if (attr->show) 353 ret = attr->show(group, buf); 354 return ret; 355 } 356 357 static ssize_t iommu_group_attr_store(struct kobject *kobj, 358 struct attribute *__attr, 359 const char *buf, size_t count) 360 { 361 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 362 struct iommu_group *group = to_iommu_group(kobj); 363 ssize_t ret = -EIO; 364 365 if (attr->store) 366 ret = attr->store(group, buf, count); 367 return ret; 368 } 369 370 static const struct sysfs_ops iommu_group_sysfs_ops = { 371 .show = iommu_group_attr_show, 372 .store = iommu_group_attr_store, 373 }; 374 375 static int iommu_group_create_file(struct iommu_group *group, 376 struct iommu_group_attribute *attr) 377 { 378 return sysfs_create_file(&group->kobj, &attr->attr); 379 } 380 381 static void iommu_group_remove_file(struct iommu_group *group, 382 struct iommu_group_attribute *attr) 383 { 384 sysfs_remove_file(&group->kobj, &attr->attr); 385 } 386 387 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 388 { 389 return sprintf(buf, "%s\n", group->name); 390 } 391 392 /** 393 * iommu_insert_resv_region - Insert a new region in the 394 * list of reserved regions. 395 * @new: new region to insert 396 * @regions: list of regions 397 * 398 * Elements are sorted by start address and overlapping segments 399 * of the same type are merged. 400 */ 401 static int iommu_insert_resv_region(struct iommu_resv_region *new, 402 struct list_head *regions) 403 { 404 struct iommu_resv_region *iter, *tmp, *nr, *top; 405 LIST_HEAD(stack); 406 407 nr = iommu_alloc_resv_region(new->start, new->length, 408 new->prot, new->type); 409 if (!nr) 410 return -ENOMEM; 411 412 /* First add the new element based on start address sorting */ 413 list_for_each_entry(iter, regions, list) { 414 if (nr->start < iter->start || 415 (nr->start == iter->start && nr->type <= iter->type)) 416 break; 417 } 418 list_add_tail(&nr->list, &iter->list); 419 420 /* Merge overlapping segments of type nr->type in @regions, if any */ 421 list_for_each_entry_safe(iter, tmp, regions, list) { 422 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 423 424 /* no merge needed on elements of different types than @new */ 425 if (iter->type != new->type) { 426 list_move_tail(&iter->list, &stack); 427 continue; 428 } 429 430 /* look for the last stack element of same type as @iter */ 431 list_for_each_entry_reverse(top, &stack, list) 432 if (top->type == iter->type) 433 goto check_overlap; 434 435 list_move_tail(&iter->list, &stack); 436 continue; 437 438 check_overlap: 439 top_end = top->start + top->length - 1; 440 441 if (iter->start > top_end + 1) { 442 list_move_tail(&iter->list, &stack); 443 } else { 444 top->length = max(top_end, iter_end) - top->start + 1; 445 list_del(&iter->list); 446 kfree(iter); 447 } 448 } 449 list_splice(&stack, regions); 450 return 0; 451 } 452 453 static int 454 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 455 struct list_head *group_resv_regions) 456 { 457 struct iommu_resv_region *entry; 458 int ret = 0; 459 460 list_for_each_entry(entry, dev_resv_regions, list) { 461 ret = iommu_insert_resv_region(entry, group_resv_regions); 462 if (ret) 463 break; 464 } 465 return ret; 466 } 467 468 int iommu_get_group_resv_regions(struct iommu_group *group, 469 struct list_head *head) 470 { 471 struct group_device *device; 472 int ret = 0; 473 474 mutex_lock(&group->mutex); 475 list_for_each_entry(device, &group->devices, list) { 476 struct list_head dev_resv_regions; 477 478 INIT_LIST_HEAD(&dev_resv_regions); 479 iommu_get_resv_regions(device->dev, &dev_resv_regions); 480 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 481 iommu_put_resv_regions(device->dev, &dev_resv_regions); 482 if (ret) 483 break; 484 } 485 mutex_unlock(&group->mutex); 486 return ret; 487 } 488 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 489 490 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 491 char *buf) 492 { 493 struct iommu_resv_region *region, *next; 494 struct list_head group_resv_regions; 495 char *str = buf; 496 497 INIT_LIST_HEAD(&group_resv_regions); 498 iommu_get_group_resv_regions(group, &group_resv_regions); 499 500 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 501 str += sprintf(str, "0x%016llx 0x%016llx %s\n", 502 (long long int)region->start, 503 (long long int)(region->start + 504 region->length - 1), 505 iommu_group_resv_type_string[region->type]); 506 kfree(region); 507 } 508 509 return (str - buf); 510 } 511 512 static ssize_t iommu_group_show_type(struct iommu_group *group, 513 char *buf) 514 { 515 char *type = "unknown\n"; 516 517 mutex_lock(&group->mutex); 518 if (group->default_domain) { 519 switch (group->default_domain->type) { 520 case IOMMU_DOMAIN_BLOCKED: 521 type = "blocked\n"; 522 break; 523 case IOMMU_DOMAIN_IDENTITY: 524 type = "identity\n"; 525 break; 526 case IOMMU_DOMAIN_UNMANAGED: 527 type = "unmanaged\n"; 528 break; 529 case IOMMU_DOMAIN_DMA: 530 type = "DMA\n"; 531 break; 532 } 533 } 534 mutex_unlock(&group->mutex); 535 strcpy(buf, type); 536 537 return strlen(type); 538 } 539 540 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 541 542 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 543 iommu_group_show_resv_regions, NULL); 544 545 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, 546 iommu_group_store_type); 547 548 static void iommu_group_release(struct kobject *kobj) 549 { 550 struct iommu_group *group = to_iommu_group(kobj); 551 552 pr_debug("Releasing group %d\n", group->id); 553 554 if (group->iommu_data_release) 555 group->iommu_data_release(group->iommu_data); 556 557 ida_simple_remove(&iommu_group_ida, group->id); 558 559 if (group->default_domain) 560 iommu_domain_free(group->default_domain); 561 562 kfree(group->name); 563 kfree(group); 564 } 565 566 static struct kobj_type iommu_group_ktype = { 567 .sysfs_ops = &iommu_group_sysfs_ops, 568 .release = iommu_group_release, 569 }; 570 571 /** 572 * iommu_group_alloc - Allocate a new group 573 * 574 * This function is called by an iommu driver to allocate a new iommu 575 * group. The iommu group represents the minimum granularity of the iommu. 576 * Upon successful return, the caller holds a reference to the supplied 577 * group in order to hold the group until devices are added. Use 578 * iommu_group_put() to release this extra reference count, allowing the 579 * group to be automatically reclaimed once it has no devices or external 580 * references. 581 */ 582 struct iommu_group *iommu_group_alloc(void) 583 { 584 struct iommu_group *group; 585 int ret; 586 587 group = kzalloc(sizeof(*group), GFP_KERNEL); 588 if (!group) 589 return ERR_PTR(-ENOMEM); 590 591 group->kobj.kset = iommu_group_kset; 592 mutex_init(&group->mutex); 593 INIT_LIST_HEAD(&group->devices); 594 INIT_LIST_HEAD(&group->entry); 595 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); 596 597 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); 598 if (ret < 0) { 599 kfree(group); 600 return ERR_PTR(ret); 601 } 602 group->id = ret; 603 604 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 605 NULL, "%d", group->id); 606 if (ret) { 607 ida_simple_remove(&iommu_group_ida, group->id); 608 kobject_put(&group->kobj); 609 return ERR_PTR(ret); 610 } 611 612 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 613 if (!group->devices_kobj) { 614 kobject_put(&group->kobj); /* triggers .release & free */ 615 return ERR_PTR(-ENOMEM); 616 } 617 618 /* 619 * The devices_kobj holds a reference on the group kobject, so 620 * as long as that exists so will the group. We can therefore 621 * use the devices_kobj for reference counting. 622 */ 623 kobject_put(&group->kobj); 624 625 ret = iommu_group_create_file(group, 626 &iommu_group_attr_reserved_regions); 627 if (ret) 628 return ERR_PTR(ret); 629 630 ret = iommu_group_create_file(group, &iommu_group_attr_type); 631 if (ret) 632 return ERR_PTR(ret); 633 634 pr_debug("Allocated group %d\n", group->id); 635 636 return group; 637 } 638 EXPORT_SYMBOL_GPL(iommu_group_alloc); 639 640 struct iommu_group *iommu_group_get_by_id(int id) 641 { 642 struct kobject *group_kobj; 643 struct iommu_group *group; 644 const char *name; 645 646 if (!iommu_group_kset) 647 return NULL; 648 649 name = kasprintf(GFP_KERNEL, "%d", id); 650 if (!name) 651 return NULL; 652 653 group_kobj = kset_find_obj(iommu_group_kset, name); 654 kfree(name); 655 656 if (!group_kobj) 657 return NULL; 658 659 group = container_of(group_kobj, struct iommu_group, kobj); 660 BUG_ON(group->id != id); 661 662 kobject_get(group->devices_kobj); 663 kobject_put(&group->kobj); 664 665 return group; 666 } 667 EXPORT_SYMBOL_GPL(iommu_group_get_by_id); 668 669 /** 670 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 671 * @group: the group 672 * 673 * iommu drivers can store data in the group for use when doing iommu 674 * operations. This function provides a way to retrieve it. Caller 675 * should hold a group reference. 676 */ 677 void *iommu_group_get_iommudata(struct iommu_group *group) 678 { 679 return group->iommu_data; 680 } 681 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 682 683 /** 684 * iommu_group_set_iommudata - set iommu_data for a group 685 * @group: the group 686 * @iommu_data: new data 687 * @release: release function for iommu_data 688 * 689 * iommu drivers can store data in the group for use when doing iommu 690 * operations. This function provides a way to set the data after 691 * the group has been allocated. Caller should hold a group reference. 692 */ 693 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 694 void (*release)(void *iommu_data)) 695 { 696 group->iommu_data = iommu_data; 697 group->iommu_data_release = release; 698 } 699 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 700 701 /** 702 * iommu_group_set_name - set name for a group 703 * @group: the group 704 * @name: name 705 * 706 * Allow iommu driver to set a name for a group. When set it will 707 * appear in a name attribute file under the group in sysfs. 708 */ 709 int iommu_group_set_name(struct iommu_group *group, const char *name) 710 { 711 int ret; 712 713 if (group->name) { 714 iommu_group_remove_file(group, &iommu_group_attr_name); 715 kfree(group->name); 716 group->name = NULL; 717 if (!name) 718 return 0; 719 } 720 721 group->name = kstrdup(name, GFP_KERNEL); 722 if (!group->name) 723 return -ENOMEM; 724 725 ret = iommu_group_create_file(group, &iommu_group_attr_name); 726 if (ret) { 727 kfree(group->name); 728 group->name = NULL; 729 return ret; 730 } 731 732 return 0; 733 } 734 EXPORT_SYMBOL_GPL(iommu_group_set_name); 735 736 static int iommu_create_device_direct_mappings(struct iommu_group *group, 737 struct device *dev) 738 { 739 struct iommu_domain *domain = group->default_domain; 740 struct iommu_resv_region *entry; 741 struct list_head mappings; 742 unsigned long pg_size; 743 int ret = 0; 744 745 if (!domain || domain->type != IOMMU_DOMAIN_DMA) 746 return 0; 747 748 BUG_ON(!domain->pgsize_bitmap); 749 750 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 751 INIT_LIST_HEAD(&mappings); 752 753 iommu_get_resv_regions(dev, &mappings); 754 755 /* We need to consider overlapping regions for different devices */ 756 list_for_each_entry(entry, &mappings, list) { 757 dma_addr_t start, end, addr; 758 size_t map_size = 0; 759 760 if (domain->ops->apply_resv_region) 761 domain->ops->apply_resv_region(dev, domain, entry); 762 763 start = ALIGN(entry->start, pg_size); 764 end = ALIGN(entry->start + entry->length, pg_size); 765 766 if (entry->type != IOMMU_RESV_DIRECT && 767 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 768 continue; 769 770 for (addr = start; addr <= end; addr += pg_size) { 771 phys_addr_t phys_addr; 772 773 if (addr == end) 774 goto map_end; 775 776 phys_addr = iommu_iova_to_phys(domain, addr); 777 if (!phys_addr) { 778 map_size += pg_size; 779 continue; 780 } 781 782 map_end: 783 if (map_size) { 784 ret = iommu_map(domain, addr - map_size, 785 addr - map_size, map_size, 786 entry->prot); 787 if (ret) 788 goto out; 789 map_size = 0; 790 } 791 } 792 793 } 794 795 iommu_flush_iotlb_all(domain); 796 797 out: 798 iommu_put_resv_regions(dev, &mappings); 799 800 return ret; 801 } 802 803 static bool iommu_is_attach_deferred(struct iommu_domain *domain, 804 struct device *dev) 805 { 806 if (domain->ops->is_attach_deferred) 807 return domain->ops->is_attach_deferred(domain, dev); 808 809 return false; 810 } 811 812 /** 813 * iommu_group_add_device - add a device to an iommu group 814 * @group: the group into which to add the device (reference should be held) 815 * @dev: the device 816 * 817 * This function is called by an iommu driver to add a device into a 818 * group. Adding a device increments the group reference count. 819 */ 820 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 821 { 822 int ret, i = 0; 823 struct group_device *device; 824 825 device = kzalloc(sizeof(*device), GFP_KERNEL); 826 if (!device) 827 return -ENOMEM; 828 829 device->dev = dev; 830 831 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 832 if (ret) 833 goto err_free_device; 834 835 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 836 rename: 837 if (!device->name) { 838 ret = -ENOMEM; 839 goto err_remove_link; 840 } 841 842 ret = sysfs_create_link_nowarn(group->devices_kobj, 843 &dev->kobj, device->name); 844 if (ret) { 845 if (ret == -EEXIST && i >= 0) { 846 /* 847 * Account for the slim chance of collision 848 * and append an instance to the name. 849 */ 850 kfree(device->name); 851 device->name = kasprintf(GFP_KERNEL, "%s.%d", 852 kobject_name(&dev->kobj), i++); 853 goto rename; 854 } 855 goto err_free_name; 856 } 857 858 kobject_get(group->devices_kobj); 859 860 dev->iommu_group = group; 861 862 mutex_lock(&group->mutex); 863 list_add_tail(&device->list, &group->devices); 864 if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) 865 ret = __iommu_attach_device(group->domain, dev); 866 mutex_unlock(&group->mutex); 867 if (ret) 868 goto err_put_group; 869 870 /* Notify any listeners about change to group. */ 871 blocking_notifier_call_chain(&group->notifier, 872 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); 873 874 trace_add_device_to_group(group->id, dev); 875 876 dev_info(dev, "Adding to iommu group %d\n", group->id); 877 878 return 0; 879 880 err_put_group: 881 mutex_lock(&group->mutex); 882 list_del(&device->list); 883 mutex_unlock(&group->mutex); 884 dev->iommu_group = NULL; 885 kobject_put(group->devices_kobj); 886 sysfs_remove_link(group->devices_kobj, device->name); 887 err_free_name: 888 kfree(device->name); 889 err_remove_link: 890 sysfs_remove_link(&dev->kobj, "iommu_group"); 891 err_free_device: 892 kfree(device); 893 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 894 return ret; 895 } 896 EXPORT_SYMBOL_GPL(iommu_group_add_device); 897 898 /** 899 * iommu_group_remove_device - remove a device from it's current group 900 * @dev: device to be removed 901 * 902 * This function is called by an iommu driver to remove the device from 903 * it's current group. This decrements the iommu group reference count. 904 */ 905 void iommu_group_remove_device(struct device *dev) 906 { 907 struct iommu_group *group = dev->iommu_group; 908 struct group_device *tmp_device, *device = NULL; 909 910 dev_info(dev, "Removing from iommu group %d\n", group->id); 911 912 /* Pre-notify listeners that a device is being removed. */ 913 blocking_notifier_call_chain(&group->notifier, 914 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); 915 916 mutex_lock(&group->mutex); 917 list_for_each_entry(tmp_device, &group->devices, list) { 918 if (tmp_device->dev == dev) { 919 device = tmp_device; 920 list_del(&device->list); 921 break; 922 } 923 } 924 mutex_unlock(&group->mutex); 925 926 if (!device) 927 return; 928 929 sysfs_remove_link(group->devices_kobj, device->name); 930 sysfs_remove_link(&dev->kobj, "iommu_group"); 931 932 trace_remove_device_from_group(group->id, dev); 933 934 kfree(device->name); 935 kfree(device); 936 dev->iommu_group = NULL; 937 kobject_put(group->devices_kobj); 938 } 939 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 940 941 static int iommu_group_device_count(struct iommu_group *group) 942 { 943 struct group_device *entry; 944 int ret = 0; 945 946 list_for_each_entry(entry, &group->devices, list) 947 ret++; 948 949 return ret; 950 } 951 952 /** 953 * iommu_group_for_each_dev - iterate over each device in the group 954 * @group: the group 955 * @data: caller opaque data to be passed to callback function 956 * @fn: caller supplied callback function 957 * 958 * This function is called by group users to iterate over group devices. 959 * Callers should hold a reference count to the group during callback. 960 * The group->mutex is held across callbacks, which will block calls to 961 * iommu_group_add/remove_device. 962 */ 963 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 964 int (*fn)(struct device *, void *)) 965 { 966 struct group_device *device; 967 int ret = 0; 968 969 list_for_each_entry(device, &group->devices, list) { 970 ret = fn(device->dev, data); 971 if (ret) 972 break; 973 } 974 return ret; 975 } 976 977 978 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 979 int (*fn)(struct device *, void *)) 980 { 981 int ret; 982 983 mutex_lock(&group->mutex); 984 ret = __iommu_group_for_each_dev(group, data, fn); 985 mutex_unlock(&group->mutex); 986 987 return ret; 988 } 989 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 990 991 /** 992 * iommu_group_get - Return the group for a device and increment reference 993 * @dev: get the group that this device belongs to 994 * 995 * This function is called by iommu drivers and users to get the group 996 * for the specified device. If found, the group is returned and the group 997 * reference in incremented, else NULL. 998 */ 999 struct iommu_group *iommu_group_get(struct device *dev) 1000 { 1001 struct iommu_group *group = dev->iommu_group; 1002 1003 if (group) 1004 kobject_get(group->devices_kobj); 1005 1006 return group; 1007 } 1008 EXPORT_SYMBOL_GPL(iommu_group_get); 1009 1010 /** 1011 * iommu_group_ref_get - Increment reference on a group 1012 * @group: the group to use, must not be NULL 1013 * 1014 * This function is called by iommu drivers to take additional references on an 1015 * existing group. Returns the given group for convenience. 1016 */ 1017 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 1018 { 1019 kobject_get(group->devices_kobj); 1020 return group; 1021 } 1022 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 1023 1024 /** 1025 * iommu_group_put - Decrement group reference 1026 * @group: the group to use 1027 * 1028 * This function is called by iommu drivers and users to release the 1029 * iommu group. Once the reference count is zero, the group is released. 1030 */ 1031 void iommu_group_put(struct iommu_group *group) 1032 { 1033 if (group) 1034 kobject_put(group->devices_kobj); 1035 } 1036 EXPORT_SYMBOL_GPL(iommu_group_put); 1037 1038 /** 1039 * iommu_group_register_notifier - Register a notifier for group changes 1040 * @group: the group to watch 1041 * @nb: notifier block to signal 1042 * 1043 * This function allows iommu group users to track changes in a group. 1044 * See include/linux/iommu.h for actions sent via this notifier. Caller 1045 * should hold a reference to the group throughout notifier registration. 1046 */ 1047 int iommu_group_register_notifier(struct iommu_group *group, 1048 struct notifier_block *nb) 1049 { 1050 return blocking_notifier_chain_register(&group->notifier, nb); 1051 } 1052 EXPORT_SYMBOL_GPL(iommu_group_register_notifier); 1053 1054 /** 1055 * iommu_group_unregister_notifier - Unregister a notifier 1056 * @group: the group to watch 1057 * @nb: notifier block to signal 1058 * 1059 * Unregister a previously registered group notifier block. 1060 */ 1061 int iommu_group_unregister_notifier(struct iommu_group *group, 1062 struct notifier_block *nb) 1063 { 1064 return blocking_notifier_chain_unregister(&group->notifier, nb); 1065 } 1066 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); 1067 1068 /** 1069 * iommu_register_device_fault_handler() - Register a device fault handler 1070 * @dev: the device 1071 * @handler: the fault handler 1072 * @data: private data passed as argument to the handler 1073 * 1074 * When an IOMMU fault event is received, this handler gets called with the 1075 * fault event and data as argument. The handler should return 0 on success. If 1076 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1077 * complete the fault by calling iommu_page_response() with one of the following 1078 * response code: 1079 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1080 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1081 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1082 * page faults if possible. 1083 * 1084 * Return 0 if the fault handler was installed successfully, or an error. 1085 */ 1086 int iommu_register_device_fault_handler(struct device *dev, 1087 iommu_dev_fault_handler_t handler, 1088 void *data) 1089 { 1090 struct dev_iommu *param = dev->iommu; 1091 int ret = 0; 1092 1093 if (!param) 1094 return -EINVAL; 1095 1096 mutex_lock(¶m->lock); 1097 /* Only allow one fault handler registered for each device */ 1098 if (param->fault_param) { 1099 ret = -EBUSY; 1100 goto done_unlock; 1101 } 1102 1103 get_device(dev); 1104 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1105 if (!param->fault_param) { 1106 put_device(dev); 1107 ret = -ENOMEM; 1108 goto done_unlock; 1109 } 1110 param->fault_param->handler = handler; 1111 param->fault_param->data = data; 1112 mutex_init(¶m->fault_param->lock); 1113 INIT_LIST_HEAD(¶m->fault_param->faults); 1114 1115 done_unlock: 1116 mutex_unlock(¶m->lock); 1117 1118 return ret; 1119 } 1120 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1121 1122 /** 1123 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1124 * @dev: the device 1125 * 1126 * Remove the device fault handler installed with 1127 * iommu_register_device_fault_handler(). 1128 * 1129 * Return 0 on success, or an error. 1130 */ 1131 int iommu_unregister_device_fault_handler(struct device *dev) 1132 { 1133 struct dev_iommu *param = dev->iommu; 1134 int ret = 0; 1135 1136 if (!param) 1137 return -EINVAL; 1138 1139 mutex_lock(¶m->lock); 1140 1141 if (!param->fault_param) 1142 goto unlock; 1143 1144 /* we cannot unregister handler if there are pending faults */ 1145 if (!list_empty(¶m->fault_param->faults)) { 1146 ret = -EBUSY; 1147 goto unlock; 1148 } 1149 1150 kfree(param->fault_param); 1151 param->fault_param = NULL; 1152 put_device(dev); 1153 unlock: 1154 mutex_unlock(¶m->lock); 1155 1156 return ret; 1157 } 1158 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1159 1160 /** 1161 * iommu_report_device_fault() - Report fault event to device driver 1162 * @dev: the device 1163 * @evt: fault event data 1164 * 1165 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1166 * handler. When this function fails and the fault is recoverable, it is the 1167 * caller's responsibility to complete the fault. 1168 * 1169 * Return 0 on success, or an error. 1170 */ 1171 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1172 { 1173 struct dev_iommu *param = dev->iommu; 1174 struct iommu_fault_event *evt_pending = NULL; 1175 struct iommu_fault_param *fparam; 1176 int ret = 0; 1177 1178 if (!param || !evt) 1179 return -EINVAL; 1180 1181 /* we only report device fault if there is a handler registered */ 1182 mutex_lock(¶m->lock); 1183 fparam = param->fault_param; 1184 if (!fparam || !fparam->handler) { 1185 ret = -EINVAL; 1186 goto done_unlock; 1187 } 1188 1189 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1190 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1191 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1192 GFP_KERNEL); 1193 if (!evt_pending) { 1194 ret = -ENOMEM; 1195 goto done_unlock; 1196 } 1197 mutex_lock(&fparam->lock); 1198 list_add_tail(&evt_pending->list, &fparam->faults); 1199 mutex_unlock(&fparam->lock); 1200 } 1201 1202 ret = fparam->handler(&evt->fault, fparam->data); 1203 if (ret && evt_pending) { 1204 mutex_lock(&fparam->lock); 1205 list_del(&evt_pending->list); 1206 mutex_unlock(&fparam->lock); 1207 kfree(evt_pending); 1208 } 1209 done_unlock: 1210 mutex_unlock(¶m->lock); 1211 return ret; 1212 } 1213 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1214 1215 int iommu_page_response(struct device *dev, 1216 struct iommu_page_response *msg) 1217 { 1218 bool needs_pasid; 1219 int ret = -EINVAL; 1220 struct iommu_fault_event *evt; 1221 struct iommu_fault_page_request *prm; 1222 struct dev_iommu *param = dev->iommu; 1223 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; 1224 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1225 1226 if (!domain || !domain->ops->page_response) 1227 return -ENODEV; 1228 1229 if (!param || !param->fault_param) 1230 return -EINVAL; 1231 1232 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1233 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1234 return -EINVAL; 1235 1236 /* Only send response if there is a fault report pending */ 1237 mutex_lock(¶m->fault_param->lock); 1238 if (list_empty(¶m->fault_param->faults)) { 1239 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1240 goto done_unlock; 1241 } 1242 /* 1243 * Check if we have a matching page request pending to respond, 1244 * otherwise return -EINVAL 1245 */ 1246 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1247 prm = &evt->fault.prm; 1248 if (prm->grpid != msg->grpid) 1249 continue; 1250 1251 /* 1252 * If the PASID is required, the corresponding request is 1253 * matched using the group ID, the PASID valid bit and the PASID 1254 * value. Otherwise only the group ID matches request and 1255 * response. 1256 */ 1257 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; 1258 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) 1259 continue; 1260 1261 if (!needs_pasid && has_pasid) { 1262 /* No big deal, just clear it. */ 1263 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; 1264 msg->pasid = 0; 1265 } 1266 1267 ret = domain->ops->page_response(dev, evt, msg); 1268 list_del(&evt->list); 1269 kfree(evt); 1270 break; 1271 } 1272 1273 done_unlock: 1274 mutex_unlock(¶m->fault_param->lock); 1275 return ret; 1276 } 1277 EXPORT_SYMBOL_GPL(iommu_page_response); 1278 1279 /** 1280 * iommu_group_id - Return ID for a group 1281 * @group: the group to ID 1282 * 1283 * Return the unique ID for the group matching the sysfs group number. 1284 */ 1285 int iommu_group_id(struct iommu_group *group) 1286 { 1287 return group->id; 1288 } 1289 EXPORT_SYMBOL_GPL(iommu_group_id); 1290 1291 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1292 unsigned long *devfns); 1293 1294 /* 1295 * To consider a PCI device isolated, we require ACS to support Source 1296 * Validation, Request Redirection, Completer Redirection, and Upstream 1297 * Forwarding. This effectively means that devices cannot spoof their 1298 * requester ID, requests and completions cannot be redirected, and all 1299 * transactions are forwarded upstream, even as it passes through a 1300 * bridge where the target device is downstream. 1301 */ 1302 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1303 1304 /* 1305 * For multifunction devices which are not isolated from each other, find 1306 * all the other non-isolated functions and look for existing groups. For 1307 * each function, we also need to look for aliases to or from other devices 1308 * that may already have a group. 1309 */ 1310 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1311 unsigned long *devfns) 1312 { 1313 struct pci_dev *tmp = NULL; 1314 struct iommu_group *group; 1315 1316 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1317 return NULL; 1318 1319 for_each_pci_dev(tmp) { 1320 if (tmp == pdev || tmp->bus != pdev->bus || 1321 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1322 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1323 continue; 1324 1325 group = get_pci_alias_group(tmp, devfns); 1326 if (group) { 1327 pci_dev_put(tmp); 1328 return group; 1329 } 1330 } 1331 1332 return NULL; 1333 } 1334 1335 /* 1336 * Look for aliases to or from the given device for existing groups. DMA 1337 * aliases are only supported on the same bus, therefore the search 1338 * space is quite small (especially since we're really only looking at pcie 1339 * device, and therefore only expect multiple slots on the root complex or 1340 * downstream switch ports). It's conceivable though that a pair of 1341 * multifunction devices could have aliases between them that would cause a 1342 * loop. To prevent this, we use a bitmap to track where we've been. 1343 */ 1344 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1345 unsigned long *devfns) 1346 { 1347 struct pci_dev *tmp = NULL; 1348 struct iommu_group *group; 1349 1350 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1351 return NULL; 1352 1353 group = iommu_group_get(&pdev->dev); 1354 if (group) 1355 return group; 1356 1357 for_each_pci_dev(tmp) { 1358 if (tmp == pdev || tmp->bus != pdev->bus) 1359 continue; 1360 1361 /* We alias them or they alias us */ 1362 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1363 group = get_pci_alias_group(tmp, devfns); 1364 if (group) { 1365 pci_dev_put(tmp); 1366 return group; 1367 } 1368 1369 group = get_pci_function_alias_group(tmp, devfns); 1370 if (group) { 1371 pci_dev_put(tmp); 1372 return group; 1373 } 1374 } 1375 } 1376 1377 return NULL; 1378 } 1379 1380 struct group_for_pci_data { 1381 struct pci_dev *pdev; 1382 struct iommu_group *group; 1383 }; 1384 1385 /* 1386 * DMA alias iterator callback, return the last seen device. Stop and return 1387 * the IOMMU group if we find one along the way. 1388 */ 1389 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1390 { 1391 struct group_for_pci_data *data = opaque; 1392 1393 data->pdev = pdev; 1394 data->group = iommu_group_get(&pdev->dev); 1395 1396 return data->group != NULL; 1397 } 1398 1399 /* 1400 * Generic device_group call-back function. It just allocates one 1401 * iommu-group per device. 1402 */ 1403 struct iommu_group *generic_device_group(struct device *dev) 1404 { 1405 return iommu_group_alloc(); 1406 } 1407 EXPORT_SYMBOL_GPL(generic_device_group); 1408 1409 /* 1410 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1411 * to find or create an IOMMU group for a device. 1412 */ 1413 struct iommu_group *pci_device_group(struct device *dev) 1414 { 1415 struct pci_dev *pdev = to_pci_dev(dev); 1416 struct group_for_pci_data data; 1417 struct pci_bus *bus; 1418 struct iommu_group *group = NULL; 1419 u64 devfns[4] = { 0 }; 1420 1421 if (WARN_ON(!dev_is_pci(dev))) 1422 return ERR_PTR(-EINVAL); 1423 1424 /* 1425 * Find the upstream DMA alias for the device. A device must not 1426 * be aliased due to topology in order to have its own IOMMU group. 1427 * If we find an alias along the way that already belongs to a 1428 * group, use it. 1429 */ 1430 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1431 return data.group; 1432 1433 pdev = data.pdev; 1434 1435 /* 1436 * Continue upstream from the point of minimum IOMMU granularity 1437 * due to aliases to the point where devices are protected from 1438 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1439 * group, use it. 1440 */ 1441 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1442 if (!bus->self) 1443 continue; 1444 1445 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1446 break; 1447 1448 pdev = bus->self; 1449 1450 group = iommu_group_get(&pdev->dev); 1451 if (group) 1452 return group; 1453 } 1454 1455 /* 1456 * Look for existing groups on device aliases. If we alias another 1457 * device or another device aliases us, use the same group. 1458 */ 1459 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1460 if (group) 1461 return group; 1462 1463 /* 1464 * Look for existing groups on non-isolated functions on the same 1465 * slot and aliases of those funcions, if any. No need to clear 1466 * the search bitmap, the tested devfns are still valid. 1467 */ 1468 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1469 if (group) 1470 return group; 1471 1472 /* No shared group found, allocate new */ 1473 return iommu_group_alloc(); 1474 } 1475 EXPORT_SYMBOL_GPL(pci_device_group); 1476 1477 /* Get the IOMMU group for device on fsl-mc bus */ 1478 struct iommu_group *fsl_mc_device_group(struct device *dev) 1479 { 1480 struct device *cont_dev = fsl_mc_cont_dev(dev); 1481 struct iommu_group *group; 1482 1483 group = iommu_group_get(cont_dev); 1484 if (!group) 1485 group = iommu_group_alloc(); 1486 return group; 1487 } 1488 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1489 1490 static int iommu_get_def_domain_type(struct device *dev) 1491 { 1492 const struct iommu_ops *ops = dev->bus->iommu_ops; 1493 1494 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) 1495 return IOMMU_DOMAIN_DMA; 1496 1497 if (ops->def_domain_type) 1498 return ops->def_domain_type(dev); 1499 1500 return 0; 1501 } 1502 1503 static int iommu_group_alloc_default_domain(struct bus_type *bus, 1504 struct iommu_group *group, 1505 unsigned int type) 1506 { 1507 struct iommu_domain *dom; 1508 1509 dom = __iommu_domain_alloc(bus, type); 1510 if (!dom && type != IOMMU_DOMAIN_DMA) { 1511 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA); 1512 if (dom) 1513 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1514 type, group->name); 1515 } 1516 1517 if (!dom) 1518 return -ENOMEM; 1519 1520 group->default_domain = dom; 1521 if (!group->domain) 1522 group->domain = dom; 1523 return 0; 1524 } 1525 1526 static int iommu_alloc_default_domain(struct iommu_group *group, 1527 struct device *dev) 1528 { 1529 unsigned int type; 1530 1531 if (group->default_domain) 1532 return 0; 1533 1534 type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type; 1535 1536 return iommu_group_alloc_default_domain(dev->bus, group, type); 1537 } 1538 1539 /** 1540 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1541 * @dev: target device 1542 * 1543 * This function is intended to be called by IOMMU drivers and extended to 1544 * support common, bus-defined algorithms when determining or creating the 1545 * IOMMU group for a device. On success, the caller will hold a reference 1546 * to the returned IOMMU group, which will already include the provided 1547 * device. The reference should be released with iommu_group_put(). 1548 */ 1549 static struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1550 { 1551 const struct iommu_ops *ops = dev->bus->iommu_ops; 1552 struct iommu_group *group; 1553 int ret; 1554 1555 group = iommu_group_get(dev); 1556 if (group) 1557 return group; 1558 1559 if (!ops) 1560 return ERR_PTR(-EINVAL); 1561 1562 group = ops->device_group(dev); 1563 if (WARN_ON_ONCE(group == NULL)) 1564 return ERR_PTR(-EINVAL); 1565 1566 if (IS_ERR(group)) 1567 return group; 1568 1569 ret = iommu_group_add_device(group, dev); 1570 if (ret) 1571 goto out_put_group; 1572 1573 return group; 1574 1575 out_put_group: 1576 iommu_group_put(group); 1577 1578 return ERR_PTR(ret); 1579 } 1580 1581 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1582 { 1583 return group->default_domain; 1584 } 1585 1586 static int probe_iommu_group(struct device *dev, void *data) 1587 { 1588 struct list_head *group_list = data; 1589 struct iommu_group *group; 1590 int ret; 1591 1592 /* Device is probed already if in a group */ 1593 group = iommu_group_get(dev); 1594 if (group) { 1595 iommu_group_put(group); 1596 return 0; 1597 } 1598 1599 ret = __iommu_probe_device(dev, group_list); 1600 if (ret == -ENODEV) 1601 ret = 0; 1602 1603 return ret; 1604 } 1605 1606 static int remove_iommu_group(struct device *dev, void *data) 1607 { 1608 iommu_release_device(dev); 1609 1610 return 0; 1611 } 1612 1613 static int iommu_bus_notifier(struct notifier_block *nb, 1614 unsigned long action, void *data) 1615 { 1616 unsigned long group_action = 0; 1617 struct device *dev = data; 1618 struct iommu_group *group; 1619 1620 /* 1621 * ADD/DEL call into iommu driver ops if provided, which may 1622 * result in ADD/DEL notifiers to group->notifier 1623 */ 1624 if (action == BUS_NOTIFY_ADD_DEVICE) { 1625 int ret; 1626 1627 ret = iommu_probe_device(dev); 1628 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1629 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1630 iommu_release_device(dev); 1631 return NOTIFY_OK; 1632 } 1633 1634 /* 1635 * Remaining BUS_NOTIFYs get filtered and republished to the 1636 * group, if anyone is listening 1637 */ 1638 group = iommu_group_get(dev); 1639 if (!group) 1640 return 0; 1641 1642 switch (action) { 1643 case BUS_NOTIFY_BIND_DRIVER: 1644 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; 1645 break; 1646 case BUS_NOTIFY_BOUND_DRIVER: 1647 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; 1648 break; 1649 case BUS_NOTIFY_UNBIND_DRIVER: 1650 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; 1651 break; 1652 case BUS_NOTIFY_UNBOUND_DRIVER: 1653 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; 1654 break; 1655 } 1656 1657 if (group_action) 1658 blocking_notifier_call_chain(&group->notifier, 1659 group_action, dev); 1660 1661 iommu_group_put(group); 1662 return 0; 1663 } 1664 1665 struct __group_domain_type { 1666 struct device *dev; 1667 unsigned int type; 1668 }; 1669 1670 static int probe_get_default_domain_type(struct device *dev, void *data) 1671 { 1672 struct __group_domain_type *gtype = data; 1673 unsigned int type = iommu_get_def_domain_type(dev); 1674 1675 if (type) { 1676 if (gtype->type && gtype->type != type) { 1677 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n", 1678 iommu_domain_type_str(type), 1679 dev_name(gtype->dev), 1680 iommu_domain_type_str(gtype->type)); 1681 gtype->type = 0; 1682 } 1683 1684 if (!gtype->dev) { 1685 gtype->dev = dev; 1686 gtype->type = type; 1687 } 1688 } 1689 1690 return 0; 1691 } 1692 1693 static void probe_alloc_default_domain(struct bus_type *bus, 1694 struct iommu_group *group) 1695 { 1696 struct __group_domain_type gtype; 1697 1698 memset(>ype, 0, sizeof(gtype)); 1699 1700 /* Ask for default domain requirements of all devices in the group */ 1701 __iommu_group_for_each_dev(group, >ype, 1702 probe_get_default_domain_type); 1703 1704 if (!gtype.type) 1705 gtype.type = iommu_def_domain_type; 1706 1707 iommu_group_alloc_default_domain(bus, group, gtype.type); 1708 1709 } 1710 1711 static int iommu_group_do_dma_attach(struct device *dev, void *data) 1712 { 1713 struct iommu_domain *domain = data; 1714 int ret = 0; 1715 1716 if (!iommu_is_attach_deferred(domain, dev)) 1717 ret = __iommu_attach_device(domain, dev); 1718 1719 return ret; 1720 } 1721 1722 static int __iommu_group_dma_attach(struct iommu_group *group) 1723 { 1724 return __iommu_group_for_each_dev(group, group->default_domain, 1725 iommu_group_do_dma_attach); 1726 } 1727 1728 static int iommu_group_do_probe_finalize(struct device *dev, void *data) 1729 { 1730 struct iommu_domain *domain = data; 1731 1732 if (domain->ops->probe_finalize) 1733 domain->ops->probe_finalize(dev); 1734 1735 return 0; 1736 } 1737 1738 static void __iommu_group_dma_finalize(struct iommu_group *group) 1739 { 1740 __iommu_group_for_each_dev(group, group->default_domain, 1741 iommu_group_do_probe_finalize); 1742 } 1743 1744 static int iommu_do_create_direct_mappings(struct device *dev, void *data) 1745 { 1746 struct iommu_group *group = data; 1747 1748 iommu_create_device_direct_mappings(group, dev); 1749 1750 return 0; 1751 } 1752 1753 static int iommu_group_create_direct_mappings(struct iommu_group *group) 1754 { 1755 return __iommu_group_for_each_dev(group, group, 1756 iommu_do_create_direct_mappings); 1757 } 1758 1759 int bus_iommu_probe(struct bus_type *bus) 1760 { 1761 struct iommu_group *group, *next; 1762 LIST_HEAD(group_list); 1763 int ret; 1764 1765 /* 1766 * This code-path does not allocate the default domain when 1767 * creating the iommu group, so do it after the groups are 1768 * created. 1769 */ 1770 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); 1771 if (ret) 1772 return ret; 1773 1774 list_for_each_entry_safe(group, next, &group_list, entry) { 1775 /* Remove item from the list */ 1776 list_del_init(&group->entry); 1777 1778 mutex_lock(&group->mutex); 1779 1780 /* Try to allocate default domain */ 1781 probe_alloc_default_domain(bus, group); 1782 1783 if (!group->default_domain) { 1784 mutex_unlock(&group->mutex); 1785 continue; 1786 } 1787 1788 iommu_group_create_direct_mappings(group); 1789 1790 ret = __iommu_group_dma_attach(group); 1791 1792 mutex_unlock(&group->mutex); 1793 1794 if (ret) 1795 break; 1796 1797 __iommu_group_dma_finalize(group); 1798 } 1799 1800 return ret; 1801 } 1802 1803 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) 1804 { 1805 struct notifier_block *nb; 1806 int err; 1807 1808 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 1809 if (!nb) 1810 return -ENOMEM; 1811 1812 nb->notifier_call = iommu_bus_notifier; 1813 1814 err = bus_register_notifier(bus, nb); 1815 if (err) 1816 goto out_free; 1817 1818 err = bus_iommu_probe(bus); 1819 if (err) 1820 goto out_err; 1821 1822 1823 return 0; 1824 1825 out_err: 1826 /* Clean up */ 1827 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group); 1828 bus_unregister_notifier(bus, nb); 1829 1830 out_free: 1831 kfree(nb); 1832 1833 return err; 1834 } 1835 1836 /** 1837 * bus_set_iommu - set iommu-callbacks for the bus 1838 * @bus: bus. 1839 * @ops: the callbacks provided by the iommu-driver 1840 * 1841 * This function is called by an iommu driver to set the iommu methods 1842 * used for a particular bus. Drivers for devices on that bus can use 1843 * the iommu-api after these ops are registered. 1844 * This special function is needed because IOMMUs are usually devices on 1845 * the bus itself, so the iommu drivers are not initialized when the bus 1846 * is set up. With this function the iommu-driver can set the iommu-ops 1847 * afterwards. 1848 */ 1849 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) 1850 { 1851 int err; 1852 1853 if (ops == NULL) { 1854 bus->iommu_ops = NULL; 1855 return 0; 1856 } 1857 1858 if (bus->iommu_ops != NULL) 1859 return -EBUSY; 1860 1861 bus->iommu_ops = ops; 1862 1863 /* Do IOMMU specific setup for this bus-type */ 1864 err = iommu_bus_init(bus, ops); 1865 if (err) 1866 bus->iommu_ops = NULL; 1867 1868 return err; 1869 } 1870 EXPORT_SYMBOL_GPL(bus_set_iommu); 1871 1872 bool iommu_present(struct bus_type *bus) 1873 { 1874 return bus->iommu_ops != NULL; 1875 } 1876 EXPORT_SYMBOL_GPL(iommu_present); 1877 1878 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) 1879 { 1880 if (!bus->iommu_ops || !bus->iommu_ops->capable) 1881 return false; 1882 1883 return bus->iommu_ops->capable(cap); 1884 } 1885 EXPORT_SYMBOL_GPL(iommu_capable); 1886 1887 /** 1888 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1889 * @domain: iommu domain 1890 * @handler: fault handler 1891 * @token: user data, will be passed back to the fault handler 1892 * 1893 * This function should be used by IOMMU users which want to be notified 1894 * whenever an IOMMU fault happens. 1895 * 1896 * The fault handler itself should return 0 on success, and an appropriate 1897 * error code otherwise. 1898 */ 1899 void iommu_set_fault_handler(struct iommu_domain *domain, 1900 iommu_fault_handler_t handler, 1901 void *token) 1902 { 1903 BUG_ON(!domain); 1904 1905 domain->handler = handler; 1906 domain->handler_token = token; 1907 } 1908 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1909 1910 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 1911 unsigned type) 1912 { 1913 struct iommu_domain *domain; 1914 1915 if (bus == NULL || bus->iommu_ops == NULL) 1916 return NULL; 1917 1918 domain = bus->iommu_ops->domain_alloc(type); 1919 if (!domain) 1920 return NULL; 1921 1922 domain->ops = bus->iommu_ops; 1923 domain->type = type; 1924 /* Assume all sizes by default; the driver may override this later */ 1925 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1926 1927 return domain; 1928 } 1929 1930 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 1931 { 1932 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1933 } 1934 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1935 1936 void iommu_domain_free(struct iommu_domain *domain) 1937 { 1938 domain->ops->domain_free(domain); 1939 } 1940 EXPORT_SYMBOL_GPL(iommu_domain_free); 1941 1942 static int __iommu_attach_device(struct iommu_domain *domain, 1943 struct device *dev) 1944 { 1945 int ret; 1946 1947 if (unlikely(domain->ops->attach_dev == NULL)) 1948 return -ENODEV; 1949 1950 ret = domain->ops->attach_dev(domain, dev); 1951 if (!ret) 1952 trace_attach_device_to_domain(dev); 1953 return ret; 1954 } 1955 1956 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 1957 { 1958 struct iommu_group *group; 1959 int ret; 1960 1961 group = iommu_group_get(dev); 1962 if (!group) 1963 return -ENODEV; 1964 1965 /* 1966 * Lock the group to make sure the device-count doesn't 1967 * change while we are attaching 1968 */ 1969 mutex_lock(&group->mutex); 1970 ret = -EINVAL; 1971 if (iommu_group_device_count(group) != 1) 1972 goto out_unlock; 1973 1974 ret = __iommu_attach_group(domain, group); 1975 1976 out_unlock: 1977 mutex_unlock(&group->mutex); 1978 iommu_group_put(group); 1979 1980 return ret; 1981 } 1982 EXPORT_SYMBOL_GPL(iommu_attach_device); 1983 1984 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 1985 { 1986 const struct iommu_ops *ops = domain->ops; 1987 1988 if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev)) 1989 return __iommu_attach_device(domain, dev); 1990 1991 return 0; 1992 } 1993 1994 /* 1995 * Check flags and other user provided data for valid combinations. We also 1996 * make sure no reserved fields or unused flags are set. This is to ensure 1997 * not breaking userspace in the future when these fields or flags are used. 1998 */ 1999 static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info) 2000 { 2001 u32 mask; 2002 int i; 2003 2004 if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1) 2005 return -EINVAL; 2006 2007 mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1; 2008 if (info->cache & ~mask) 2009 return -EINVAL; 2010 2011 if (info->granularity >= IOMMU_INV_GRANU_NR) 2012 return -EINVAL; 2013 2014 switch (info->granularity) { 2015 case IOMMU_INV_GRANU_ADDR: 2016 if (info->cache & IOMMU_CACHE_INV_TYPE_PASID) 2017 return -EINVAL; 2018 2019 mask = IOMMU_INV_ADDR_FLAGS_PASID | 2020 IOMMU_INV_ADDR_FLAGS_ARCHID | 2021 IOMMU_INV_ADDR_FLAGS_LEAF; 2022 2023 if (info->granu.addr_info.flags & ~mask) 2024 return -EINVAL; 2025 break; 2026 case IOMMU_INV_GRANU_PASID: 2027 mask = IOMMU_INV_PASID_FLAGS_PASID | 2028 IOMMU_INV_PASID_FLAGS_ARCHID; 2029 if (info->granu.pasid_info.flags & ~mask) 2030 return -EINVAL; 2031 2032 break; 2033 case IOMMU_INV_GRANU_DOMAIN: 2034 if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) 2035 return -EINVAL; 2036 break; 2037 default: 2038 return -EINVAL; 2039 } 2040 2041 /* Check reserved padding fields */ 2042 for (i = 0; i < sizeof(info->padding); i++) { 2043 if (info->padding[i]) 2044 return -EINVAL; 2045 } 2046 2047 return 0; 2048 } 2049 2050 int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, 2051 void __user *uinfo) 2052 { 2053 struct iommu_cache_invalidate_info inv_info = { 0 }; 2054 u32 minsz; 2055 int ret; 2056 2057 if (unlikely(!domain->ops->cache_invalidate)) 2058 return -ENODEV; 2059 2060 /* 2061 * No new spaces can be added before the variable sized union, the 2062 * minimum size is the offset to the union. 2063 */ 2064 minsz = offsetof(struct iommu_cache_invalidate_info, granu); 2065 2066 /* Copy minsz from user to get flags and argsz */ 2067 if (copy_from_user(&inv_info, uinfo, minsz)) 2068 return -EFAULT; 2069 2070 /* Fields before the variable size union are mandatory */ 2071 if (inv_info.argsz < minsz) 2072 return -EINVAL; 2073 2074 /* PASID and address granu require additional info beyond minsz */ 2075 if (inv_info.granularity == IOMMU_INV_GRANU_PASID && 2076 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info)) 2077 return -EINVAL; 2078 2079 if (inv_info.granularity == IOMMU_INV_GRANU_ADDR && 2080 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info)) 2081 return -EINVAL; 2082 2083 /* 2084 * User might be using a newer UAPI header which has a larger data 2085 * size, we shall support the existing flags within the current 2086 * size. Copy the remaining user data _after_ minsz but not more 2087 * than the current kernel supported size. 2088 */ 2089 if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz, 2090 min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz)) 2091 return -EFAULT; 2092 2093 /* Now the argsz is validated, check the content */ 2094 ret = iommu_check_cache_invl_data(&inv_info); 2095 if (ret) 2096 return ret; 2097 2098 return domain->ops->cache_invalidate(domain, dev, &inv_info); 2099 } 2100 EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate); 2101 2102 static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data) 2103 { 2104 u64 mask; 2105 int i; 2106 2107 if (data->version != IOMMU_GPASID_BIND_VERSION_1) 2108 return -EINVAL; 2109 2110 /* Check the range of supported formats */ 2111 if (data->format >= IOMMU_PASID_FORMAT_LAST) 2112 return -EINVAL; 2113 2114 /* Check all flags */ 2115 mask = IOMMU_SVA_GPASID_VAL; 2116 if (data->flags & ~mask) 2117 return -EINVAL; 2118 2119 /* Check reserved padding fields */ 2120 for (i = 0; i < sizeof(data->padding); i++) { 2121 if (data->padding[i]) 2122 return -EINVAL; 2123 } 2124 2125 return 0; 2126 } 2127 2128 static int iommu_sva_prepare_bind_data(void __user *udata, 2129 struct iommu_gpasid_bind_data *data) 2130 { 2131 u32 minsz; 2132 2133 /* 2134 * No new spaces can be added before the variable sized union, the 2135 * minimum size is the offset to the union. 2136 */ 2137 minsz = offsetof(struct iommu_gpasid_bind_data, vendor); 2138 2139 /* Copy minsz from user to get flags and argsz */ 2140 if (copy_from_user(data, udata, minsz)) 2141 return -EFAULT; 2142 2143 /* Fields before the variable size union are mandatory */ 2144 if (data->argsz < minsz) 2145 return -EINVAL; 2146 /* 2147 * User might be using a newer UAPI header, we shall let IOMMU vendor 2148 * driver decide on what size it needs. Since the guest PASID bind data 2149 * can be vendor specific, larger argsz could be the result of extension 2150 * for one vendor but it should not affect another vendor. 2151 * Copy the remaining user data _after_ minsz 2152 */ 2153 if (copy_from_user((void *)data + minsz, udata + minsz, 2154 min_t(u32, data->argsz, sizeof(*data)) - minsz)) 2155 return -EFAULT; 2156 2157 return iommu_check_bind_data(data); 2158 } 2159 2160 int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, 2161 void __user *udata) 2162 { 2163 struct iommu_gpasid_bind_data data = { 0 }; 2164 int ret; 2165 2166 if (unlikely(!domain->ops->sva_bind_gpasid)) 2167 return -ENODEV; 2168 2169 ret = iommu_sva_prepare_bind_data(udata, &data); 2170 if (ret) 2171 return ret; 2172 2173 return domain->ops->sva_bind_gpasid(domain, dev, &data); 2174 } 2175 EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid); 2176 2177 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, 2178 ioasid_t pasid) 2179 { 2180 if (unlikely(!domain->ops->sva_unbind_gpasid)) 2181 return -ENODEV; 2182 2183 return domain->ops->sva_unbind_gpasid(dev, pasid); 2184 } 2185 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); 2186 2187 int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, 2188 void __user *udata) 2189 { 2190 struct iommu_gpasid_bind_data data = { 0 }; 2191 int ret; 2192 2193 if (unlikely(!domain->ops->sva_bind_gpasid)) 2194 return -ENODEV; 2195 2196 ret = iommu_sva_prepare_bind_data(udata, &data); 2197 if (ret) 2198 return ret; 2199 2200 return iommu_sva_unbind_gpasid(domain, dev, data.hpasid); 2201 } 2202 EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid); 2203 2204 static void __iommu_detach_device(struct iommu_domain *domain, 2205 struct device *dev) 2206 { 2207 if (iommu_is_attach_deferred(domain, dev)) 2208 return; 2209 2210 if (unlikely(domain->ops->detach_dev == NULL)) 2211 return; 2212 2213 domain->ops->detach_dev(domain, dev); 2214 trace_detach_device_from_domain(dev); 2215 } 2216 2217 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 2218 { 2219 struct iommu_group *group; 2220 2221 group = iommu_group_get(dev); 2222 if (!group) 2223 return; 2224 2225 mutex_lock(&group->mutex); 2226 if (iommu_group_device_count(group) != 1) { 2227 WARN_ON(1); 2228 goto out_unlock; 2229 } 2230 2231 __iommu_detach_group(domain, group); 2232 2233 out_unlock: 2234 mutex_unlock(&group->mutex); 2235 iommu_group_put(group); 2236 } 2237 EXPORT_SYMBOL_GPL(iommu_detach_device); 2238 2239 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 2240 { 2241 struct iommu_domain *domain; 2242 struct iommu_group *group; 2243 2244 group = iommu_group_get(dev); 2245 if (!group) 2246 return NULL; 2247 2248 domain = group->domain; 2249 2250 iommu_group_put(group); 2251 2252 return domain; 2253 } 2254 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 2255 2256 /* 2257 * For IOMMU_DOMAIN_DMA implementations which already provide their own 2258 * guarantees that the group and its default domain are valid and correct. 2259 */ 2260 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 2261 { 2262 return dev->iommu_group->default_domain; 2263 } 2264 2265 /* 2266 * IOMMU groups are really the natural working unit of the IOMMU, but 2267 * the IOMMU API works on domains and devices. Bridge that gap by 2268 * iterating over the devices in a group. Ideally we'd have a single 2269 * device which represents the requestor ID of the group, but we also 2270 * allow IOMMU drivers to create policy defined minimum sets, where 2271 * the physical hardware may be able to distiguish members, but we 2272 * wish to group them at a higher level (ex. untrusted multi-function 2273 * PCI devices). Thus we attach each device. 2274 */ 2275 static int iommu_group_do_attach_device(struct device *dev, void *data) 2276 { 2277 struct iommu_domain *domain = data; 2278 2279 return __iommu_attach_device(domain, dev); 2280 } 2281 2282 static int __iommu_attach_group(struct iommu_domain *domain, 2283 struct iommu_group *group) 2284 { 2285 int ret; 2286 2287 if (group->default_domain && group->domain != group->default_domain) 2288 return -EBUSY; 2289 2290 ret = __iommu_group_for_each_dev(group, domain, 2291 iommu_group_do_attach_device); 2292 if (ret == 0) 2293 group->domain = domain; 2294 2295 return ret; 2296 } 2297 2298 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 2299 { 2300 int ret; 2301 2302 mutex_lock(&group->mutex); 2303 ret = __iommu_attach_group(domain, group); 2304 mutex_unlock(&group->mutex); 2305 2306 return ret; 2307 } 2308 EXPORT_SYMBOL_GPL(iommu_attach_group); 2309 2310 static int iommu_group_do_detach_device(struct device *dev, void *data) 2311 { 2312 struct iommu_domain *domain = data; 2313 2314 __iommu_detach_device(domain, dev); 2315 2316 return 0; 2317 } 2318 2319 static void __iommu_detach_group(struct iommu_domain *domain, 2320 struct iommu_group *group) 2321 { 2322 int ret; 2323 2324 if (!group->default_domain) { 2325 __iommu_group_for_each_dev(group, domain, 2326 iommu_group_do_detach_device); 2327 group->domain = NULL; 2328 return; 2329 } 2330 2331 if (group->domain == group->default_domain) 2332 return; 2333 2334 /* Detach by re-attaching to the default domain */ 2335 ret = __iommu_group_for_each_dev(group, group->default_domain, 2336 iommu_group_do_attach_device); 2337 if (ret != 0) 2338 WARN_ON(1); 2339 else 2340 group->domain = group->default_domain; 2341 } 2342 2343 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 2344 { 2345 mutex_lock(&group->mutex); 2346 __iommu_detach_group(domain, group); 2347 mutex_unlock(&group->mutex); 2348 } 2349 EXPORT_SYMBOL_GPL(iommu_detach_group); 2350 2351 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 2352 { 2353 if (unlikely(domain->ops->iova_to_phys == NULL)) 2354 return 0; 2355 2356 return domain->ops->iova_to_phys(domain, iova); 2357 } 2358 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2359 2360 static size_t iommu_pgsize(struct iommu_domain *domain, 2361 unsigned long addr_merge, size_t size) 2362 { 2363 unsigned int pgsize_idx; 2364 size_t pgsize; 2365 2366 /* Max page size that still fits into 'size' */ 2367 pgsize_idx = __fls(size); 2368 2369 /* need to consider alignment requirements ? */ 2370 if (likely(addr_merge)) { 2371 /* Max page size allowed by address */ 2372 unsigned int align_pgsize_idx = __ffs(addr_merge); 2373 pgsize_idx = min(pgsize_idx, align_pgsize_idx); 2374 } 2375 2376 /* build a mask of acceptable page sizes */ 2377 pgsize = (1UL << (pgsize_idx + 1)) - 1; 2378 2379 /* throw away page sizes not supported by the hardware */ 2380 pgsize &= domain->pgsize_bitmap; 2381 2382 /* make sure we're still sane */ 2383 BUG_ON(!pgsize); 2384 2385 /* pick the biggest page */ 2386 pgsize_idx = __fls(pgsize); 2387 pgsize = 1UL << pgsize_idx; 2388 2389 return pgsize; 2390 } 2391 2392 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2393 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2394 { 2395 const struct iommu_ops *ops = domain->ops; 2396 unsigned long orig_iova = iova; 2397 unsigned int min_pagesz; 2398 size_t orig_size = size; 2399 phys_addr_t orig_paddr = paddr; 2400 int ret = 0; 2401 2402 if (unlikely(ops->map == NULL || 2403 domain->pgsize_bitmap == 0UL)) 2404 return -ENODEV; 2405 2406 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2407 return -EINVAL; 2408 2409 /* find out the minimum page size supported */ 2410 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2411 2412 /* 2413 * both the virtual address and the physical one, as well as 2414 * the size of the mapping, must be aligned (at least) to the 2415 * size of the smallest page supported by the hardware 2416 */ 2417 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2418 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2419 iova, &paddr, size, min_pagesz); 2420 return -EINVAL; 2421 } 2422 2423 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2424 2425 while (size) { 2426 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); 2427 2428 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", 2429 iova, &paddr, pgsize); 2430 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2431 2432 if (ret) 2433 break; 2434 2435 iova += pgsize; 2436 paddr += pgsize; 2437 size -= pgsize; 2438 } 2439 2440 /* unroll mapping in case something went wrong */ 2441 if (ret) 2442 iommu_unmap(domain, orig_iova, orig_size - size); 2443 else 2444 trace_map(orig_iova, orig_paddr, orig_size); 2445 2446 return ret; 2447 } 2448 2449 static int _iommu_map(struct iommu_domain *domain, unsigned long iova, 2450 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2451 { 2452 const struct iommu_ops *ops = domain->ops; 2453 int ret; 2454 2455 ret = __iommu_map(domain, iova, paddr, size, prot, gfp); 2456 if (ret == 0 && ops->iotlb_sync_map) 2457 ops->iotlb_sync_map(domain, iova, size); 2458 2459 return ret; 2460 } 2461 2462 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2463 phys_addr_t paddr, size_t size, int prot) 2464 { 2465 might_sleep(); 2466 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); 2467 } 2468 EXPORT_SYMBOL_GPL(iommu_map); 2469 2470 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, 2471 phys_addr_t paddr, size_t size, int prot) 2472 { 2473 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); 2474 } 2475 EXPORT_SYMBOL_GPL(iommu_map_atomic); 2476 2477 static size_t __iommu_unmap(struct iommu_domain *domain, 2478 unsigned long iova, size_t size, 2479 struct iommu_iotlb_gather *iotlb_gather) 2480 { 2481 const struct iommu_ops *ops = domain->ops; 2482 size_t unmapped_page, unmapped = 0; 2483 unsigned long orig_iova = iova; 2484 unsigned int min_pagesz; 2485 2486 if (unlikely(ops->unmap == NULL || 2487 domain->pgsize_bitmap == 0UL)) 2488 return 0; 2489 2490 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2491 return 0; 2492 2493 /* find out the minimum page size supported */ 2494 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2495 2496 /* 2497 * The virtual address, as well as the size of the mapping, must be 2498 * aligned (at least) to the size of the smallest page supported 2499 * by the hardware 2500 */ 2501 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2502 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2503 iova, size, min_pagesz); 2504 return 0; 2505 } 2506 2507 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2508 2509 /* 2510 * Keep iterating until we either unmap 'size' bytes (or more) 2511 * or we hit an area that isn't mapped. 2512 */ 2513 while (unmapped < size) { 2514 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); 2515 2516 unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather); 2517 if (!unmapped_page) 2518 break; 2519 2520 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2521 iova, unmapped_page); 2522 2523 iova += unmapped_page; 2524 unmapped += unmapped_page; 2525 } 2526 2527 trace_unmap(orig_iova, size, unmapped); 2528 return unmapped; 2529 } 2530 2531 size_t iommu_unmap(struct iommu_domain *domain, 2532 unsigned long iova, size_t size) 2533 { 2534 struct iommu_iotlb_gather iotlb_gather; 2535 size_t ret; 2536 2537 iommu_iotlb_gather_init(&iotlb_gather); 2538 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2539 iommu_iotlb_sync(domain, &iotlb_gather); 2540 2541 return ret; 2542 } 2543 EXPORT_SYMBOL_GPL(iommu_unmap); 2544 2545 size_t iommu_unmap_fast(struct iommu_domain *domain, 2546 unsigned long iova, size_t size, 2547 struct iommu_iotlb_gather *iotlb_gather) 2548 { 2549 return __iommu_unmap(domain, iova, size, iotlb_gather); 2550 } 2551 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2552 2553 static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2554 struct scatterlist *sg, unsigned int nents, int prot, 2555 gfp_t gfp) 2556 { 2557 const struct iommu_ops *ops = domain->ops; 2558 size_t len = 0, mapped = 0; 2559 phys_addr_t start; 2560 unsigned int i = 0; 2561 int ret; 2562 2563 while (i <= nents) { 2564 phys_addr_t s_phys = sg_phys(sg); 2565 2566 if (len && s_phys != start + len) { 2567 ret = __iommu_map(domain, iova + mapped, start, 2568 len, prot, gfp); 2569 2570 if (ret) 2571 goto out_err; 2572 2573 mapped += len; 2574 len = 0; 2575 } 2576 2577 if (len) { 2578 len += sg->length; 2579 } else { 2580 len = sg->length; 2581 start = s_phys; 2582 } 2583 2584 if (++i < nents) 2585 sg = sg_next(sg); 2586 } 2587 2588 if (ops->iotlb_sync_map) 2589 ops->iotlb_sync_map(domain, iova, mapped); 2590 return mapped; 2591 2592 out_err: 2593 /* undo mappings already done */ 2594 iommu_unmap(domain, iova, mapped); 2595 2596 return 0; 2597 2598 } 2599 2600 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2601 struct scatterlist *sg, unsigned int nents, int prot) 2602 { 2603 might_sleep(); 2604 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); 2605 } 2606 EXPORT_SYMBOL_GPL(iommu_map_sg); 2607 2608 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, 2609 struct scatterlist *sg, unsigned int nents, int prot) 2610 { 2611 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); 2612 } 2613 2614 /** 2615 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2616 * @domain: the iommu domain where the fault has happened 2617 * @dev: the device where the fault has happened 2618 * @iova: the faulting address 2619 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2620 * 2621 * This function should be called by the low-level IOMMU implementations 2622 * whenever IOMMU faults happen, to allow high-level users, that are 2623 * interested in such events, to know about them. 2624 * 2625 * This event may be useful for several possible use cases: 2626 * - mere logging of the event 2627 * - dynamic TLB/PTE loading 2628 * - if restarting of the faulting device is required 2629 * 2630 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2631 * PTE/TLB loading will one day be supported, implementations will be able 2632 * to tell whether it succeeded or not according to this return value). 2633 * 2634 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2635 * (though fault handlers can also return -ENOSYS, in case they want to 2636 * elicit the default behavior of the IOMMU drivers). 2637 */ 2638 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2639 unsigned long iova, int flags) 2640 { 2641 int ret = -ENOSYS; 2642 2643 /* 2644 * if upper layers showed interest and installed a fault handler, 2645 * invoke it. 2646 */ 2647 if (domain->handler) 2648 ret = domain->handler(domain, dev, iova, flags, 2649 domain->handler_token); 2650 2651 trace_io_page_fault(dev, iova, flags); 2652 return ret; 2653 } 2654 EXPORT_SYMBOL_GPL(report_iommu_fault); 2655 2656 static int __init iommu_init(void) 2657 { 2658 iommu_group_kset = kset_create_and_add("iommu_groups", 2659 NULL, kernel_kobj); 2660 BUG_ON(!iommu_group_kset); 2661 2662 iommu_debugfs_setup(); 2663 2664 return 0; 2665 } 2666 core_initcall(iommu_init); 2667 2668 int iommu_domain_get_attr(struct iommu_domain *domain, 2669 enum iommu_attr attr, void *data) 2670 { 2671 if (!domain->ops->domain_get_attr) 2672 return -EINVAL; 2673 return domain->ops->domain_get_attr(domain, attr, data); 2674 } 2675 EXPORT_SYMBOL_GPL(iommu_domain_get_attr); 2676 2677 int iommu_domain_set_attr(struct iommu_domain *domain, 2678 enum iommu_attr attr, void *data) 2679 { 2680 int ret = 0; 2681 2682 switch (attr) { 2683 default: 2684 if (domain->ops->domain_set_attr == NULL) 2685 return -EINVAL; 2686 2687 ret = domain->ops->domain_set_attr(domain, attr, data); 2688 } 2689 2690 return ret; 2691 } 2692 EXPORT_SYMBOL_GPL(iommu_domain_set_attr); 2693 2694 int iommu_enable_nesting(struct iommu_domain *domain) 2695 { 2696 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2697 return -EINVAL; 2698 if (!domain->ops->enable_nesting) 2699 return -EINVAL; 2700 return domain->ops->enable_nesting(domain); 2701 } 2702 EXPORT_SYMBOL_GPL(iommu_enable_nesting); 2703 2704 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 2705 unsigned long quirk) 2706 { 2707 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 2708 return -EINVAL; 2709 if (!domain->ops->set_pgtable_quirks) 2710 return -EINVAL; 2711 return domain->ops->set_pgtable_quirks(domain, quirk); 2712 } 2713 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); 2714 2715 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2716 { 2717 const struct iommu_ops *ops = dev->bus->iommu_ops; 2718 2719 if (ops && ops->get_resv_regions) 2720 ops->get_resv_regions(dev, list); 2721 } 2722 2723 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2724 { 2725 const struct iommu_ops *ops = dev->bus->iommu_ops; 2726 2727 if (ops && ops->put_resv_regions) 2728 ops->put_resv_regions(dev, list); 2729 } 2730 2731 /** 2732 * generic_iommu_put_resv_regions - Reserved region driver helper 2733 * @dev: device for which to free reserved regions 2734 * @list: reserved region list for device 2735 * 2736 * IOMMU drivers can use this to implement their .put_resv_regions() callback 2737 * for simple reservations. Memory allocated for each reserved region will be 2738 * freed. If an IOMMU driver allocates additional resources per region, it is 2739 * going to have to implement a custom callback. 2740 */ 2741 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list) 2742 { 2743 struct iommu_resv_region *entry, *next; 2744 2745 list_for_each_entry_safe(entry, next, list, list) 2746 kfree(entry); 2747 } 2748 EXPORT_SYMBOL(generic_iommu_put_resv_regions); 2749 2750 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2751 size_t length, int prot, 2752 enum iommu_resv_type type) 2753 { 2754 struct iommu_resv_region *region; 2755 2756 region = kzalloc(sizeof(*region), GFP_KERNEL); 2757 if (!region) 2758 return NULL; 2759 2760 INIT_LIST_HEAD(®ion->list); 2761 region->start = start; 2762 region->length = length; 2763 region->prot = prot; 2764 region->type = type; 2765 return region; 2766 } 2767 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2768 2769 void iommu_set_default_passthrough(bool cmd_line) 2770 { 2771 if (cmd_line) 2772 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2773 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2774 } 2775 2776 void iommu_set_default_translated(bool cmd_line) 2777 { 2778 if (cmd_line) 2779 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 2780 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2781 } 2782 2783 bool iommu_default_passthrough(void) 2784 { 2785 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2786 } 2787 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2788 2789 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2790 { 2791 const struct iommu_ops *ops = NULL; 2792 struct iommu_device *iommu; 2793 2794 spin_lock(&iommu_device_lock); 2795 list_for_each_entry(iommu, &iommu_device_list, list) 2796 if (iommu->fwnode == fwnode) { 2797 ops = iommu->ops; 2798 break; 2799 } 2800 spin_unlock(&iommu_device_lock); 2801 return ops; 2802 } 2803 2804 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2805 const struct iommu_ops *ops) 2806 { 2807 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2808 2809 if (fwspec) 2810 return ops == fwspec->ops ? 0 : -EINVAL; 2811 2812 if (!dev_iommu_get(dev)) 2813 return -ENOMEM; 2814 2815 /* Preallocate for the overwhelmingly common case of 1 ID */ 2816 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2817 if (!fwspec) 2818 return -ENOMEM; 2819 2820 of_node_get(to_of_node(iommu_fwnode)); 2821 fwspec->iommu_fwnode = iommu_fwnode; 2822 fwspec->ops = ops; 2823 dev_iommu_fwspec_set(dev, fwspec); 2824 return 0; 2825 } 2826 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2827 2828 void iommu_fwspec_free(struct device *dev) 2829 { 2830 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2831 2832 if (fwspec) { 2833 fwnode_handle_put(fwspec->iommu_fwnode); 2834 kfree(fwspec); 2835 dev_iommu_fwspec_set(dev, NULL); 2836 } 2837 } 2838 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2839 2840 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2841 { 2842 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2843 int i, new_num; 2844 2845 if (!fwspec) 2846 return -EINVAL; 2847 2848 new_num = fwspec->num_ids + num_ids; 2849 if (new_num > 1) { 2850 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2851 GFP_KERNEL); 2852 if (!fwspec) 2853 return -ENOMEM; 2854 2855 dev_iommu_fwspec_set(dev, fwspec); 2856 } 2857 2858 for (i = 0; i < num_ids; i++) 2859 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2860 2861 fwspec->num_ids = new_num; 2862 return 0; 2863 } 2864 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2865 2866 /* 2867 * Per device IOMMU features. 2868 */ 2869 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2870 { 2871 if (dev->iommu && dev->iommu->iommu_dev) { 2872 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2873 2874 if (ops->dev_enable_feat) 2875 return ops->dev_enable_feat(dev, feat); 2876 } 2877 2878 return -ENODEV; 2879 } 2880 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2881 2882 /* 2883 * The device drivers should do the necessary cleanups before calling this. 2884 * For example, before disabling the aux-domain feature, the device driver 2885 * should detach all aux-domains. Otherwise, this will return -EBUSY. 2886 */ 2887 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2888 { 2889 if (dev->iommu && dev->iommu->iommu_dev) { 2890 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2891 2892 if (ops->dev_disable_feat) 2893 return ops->dev_disable_feat(dev, feat); 2894 } 2895 2896 return -EBUSY; 2897 } 2898 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2899 2900 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) 2901 { 2902 if (dev->iommu && dev->iommu->iommu_dev) { 2903 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; 2904 2905 if (ops->dev_feat_enabled) 2906 return ops->dev_feat_enabled(dev, feat); 2907 } 2908 2909 return false; 2910 } 2911 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled); 2912 2913 /* 2914 * Aux-domain specific attach/detach. 2915 * 2916 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns 2917 * true. Also, as long as domains are attached to a device through this 2918 * interface, any tries to call iommu_attach_device() should fail 2919 * (iommu_detach_device() can't fail, so we fail when trying to re-attach). 2920 * This should make us safe against a device being attached to a guest as a 2921 * whole while there are still pasid users on it (aux and sva). 2922 */ 2923 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) 2924 { 2925 int ret = -ENODEV; 2926 2927 if (domain->ops->aux_attach_dev) 2928 ret = domain->ops->aux_attach_dev(domain, dev); 2929 2930 if (!ret) 2931 trace_attach_device_to_domain(dev); 2932 2933 return ret; 2934 } 2935 EXPORT_SYMBOL_GPL(iommu_aux_attach_device); 2936 2937 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) 2938 { 2939 if (domain->ops->aux_detach_dev) { 2940 domain->ops->aux_detach_dev(domain, dev); 2941 trace_detach_device_from_domain(dev); 2942 } 2943 } 2944 EXPORT_SYMBOL_GPL(iommu_aux_detach_device); 2945 2946 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) 2947 { 2948 int ret = -ENODEV; 2949 2950 if (domain->ops->aux_get_pasid) 2951 ret = domain->ops->aux_get_pasid(domain, dev); 2952 2953 return ret; 2954 } 2955 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); 2956 2957 /** 2958 * iommu_sva_bind_device() - Bind a process address space to a device 2959 * @dev: the device 2960 * @mm: the mm to bind, caller must hold a reference to it 2961 * 2962 * Create a bond between device and address space, allowing the device to access 2963 * the mm using the returned PASID. If a bond already exists between @device and 2964 * @mm, it is returned and an additional reference is taken. Caller must call 2965 * iommu_sva_unbind_device() to release each reference. 2966 * 2967 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to 2968 * initialize the required SVA features. 2969 * 2970 * On error, returns an ERR_PTR value. 2971 */ 2972 struct iommu_sva * 2973 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) 2974 { 2975 struct iommu_group *group; 2976 struct iommu_sva *handle = ERR_PTR(-EINVAL); 2977 const struct iommu_ops *ops = dev->bus->iommu_ops; 2978 2979 if (!ops || !ops->sva_bind) 2980 return ERR_PTR(-ENODEV); 2981 2982 group = iommu_group_get(dev); 2983 if (!group) 2984 return ERR_PTR(-ENODEV); 2985 2986 /* Ensure device count and domain don't change while we're binding */ 2987 mutex_lock(&group->mutex); 2988 2989 /* 2990 * To keep things simple, SVA currently doesn't support IOMMU groups 2991 * with more than one device. Existing SVA-capable systems are not 2992 * affected by the problems that required IOMMU groups (lack of ACS 2993 * isolation, device ID aliasing and other hardware issues). 2994 */ 2995 if (iommu_group_device_count(group) != 1) 2996 goto out_unlock; 2997 2998 handle = ops->sva_bind(dev, mm, drvdata); 2999 3000 out_unlock: 3001 mutex_unlock(&group->mutex); 3002 iommu_group_put(group); 3003 3004 return handle; 3005 } 3006 EXPORT_SYMBOL_GPL(iommu_sva_bind_device); 3007 3008 /** 3009 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device 3010 * @handle: the handle returned by iommu_sva_bind_device() 3011 * 3012 * Put reference to a bond between device and address space. The device should 3013 * not be issuing any more transaction for this PASID. All outstanding page 3014 * requests for this PASID must have been flushed to the IOMMU. 3015 */ 3016 void iommu_sva_unbind_device(struct iommu_sva *handle) 3017 { 3018 struct iommu_group *group; 3019 struct device *dev = handle->dev; 3020 const struct iommu_ops *ops = dev->bus->iommu_ops; 3021 3022 if (!ops || !ops->sva_unbind) 3023 return; 3024 3025 group = iommu_group_get(dev); 3026 if (!group) 3027 return; 3028 3029 mutex_lock(&group->mutex); 3030 ops->sva_unbind(handle); 3031 mutex_unlock(&group->mutex); 3032 3033 iommu_group_put(group); 3034 } 3035 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); 3036 3037 u32 iommu_sva_get_pasid(struct iommu_sva *handle) 3038 { 3039 const struct iommu_ops *ops = handle->dev->bus->iommu_ops; 3040 3041 if (!ops || !ops->sva_get_pasid) 3042 return IOMMU_PASID_INVALID; 3043 3044 return ops->sva_get_pasid(handle); 3045 } 3046 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); 3047 3048 /* 3049 * Changes the default domain of an iommu group that has *only* one device 3050 * 3051 * @group: The group for which the default domain should be changed 3052 * @prev_dev: The device in the group (this is used to make sure that the device 3053 * hasn't changed after the caller has called this function) 3054 * @type: The type of the new default domain that gets associated with the group 3055 * 3056 * Returns 0 on success and error code on failure 3057 * 3058 * Note: 3059 * 1. Presently, this function is called only when user requests to change the 3060 * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type 3061 * Please take a closer look if intended to use for other purposes. 3062 */ 3063 static int iommu_change_dev_def_domain(struct iommu_group *group, 3064 struct device *prev_dev, int type) 3065 { 3066 struct iommu_domain *prev_dom; 3067 struct group_device *grp_dev; 3068 int ret, dev_def_dom; 3069 struct device *dev; 3070 3071 if (!group) 3072 return -EINVAL; 3073 3074 mutex_lock(&group->mutex); 3075 3076 if (group->default_domain != group->domain) { 3077 dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n"); 3078 ret = -EBUSY; 3079 goto out; 3080 } 3081 3082 /* 3083 * iommu group wasn't locked while acquiring device lock in 3084 * iommu_group_store_type(). So, make sure that the device count hasn't 3085 * changed while acquiring device lock. 3086 * 3087 * Changing default domain of an iommu group with two or more devices 3088 * isn't supported because there could be a potential deadlock. Consider 3089 * the following scenario. T1 is trying to acquire device locks of all 3090 * the devices in the group and before it could acquire all of them, 3091 * there could be another thread T2 (from different sub-system and use 3092 * case) that has already acquired some of the device locks and might be 3093 * waiting for T1 to release other device locks. 3094 */ 3095 if (iommu_group_device_count(group) != 1) { 3096 dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n"); 3097 ret = -EINVAL; 3098 goto out; 3099 } 3100 3101 /* Since group has only one device */ 3102 grp_dev = list_first_entry(&group->devices, struct group_device, list); 3103 dev = grp_dev->dev; 3104 3105 if (prev_dev != dev) { 3106 dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n"); 3107 ret = -EBUSY; 3108 goto out; 3109 } 3110 3111 prev_dom = group->default_domain; 3112 if (!prev_dom) { 3113 ret = -EINVAL; 3114 goto out; 3115 } 3116 3117 dev_def_dom = iommu_get_def_domain_type(dev); 3118 if (!type) { 3119 /* 3120 * If the user hasn't requested any specific type of domain and 3121 * if the device supports both the domains, then default to the 3122 * domain the device was booted with 3123 */ 3124 type = dev_def_dom ? : iommu_def_domain_type; 3125 } else if (dev_def_dom && type != dev_def_dom) { 3126 dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n", 3127 iommu_domain_type_str(type)); 3128 ret = -EINVAL; 3129 goto out; 3130 } 3131 3132 /* 3133 * Switch to a new domain only if the requested domain type is different 3134 * from the existing default domain type 3135 */ 3136 if (prev_dom->type == type) { 3137 ret = 0; 3138 goto out; 3139 } 3140 3141 /* Sets group->default_domain to the newly allocated domain */ 3142 ret = iommu_group_alloc_default_domain(dev->bus, group, type); 3143 if (ret) 3144 goto out; 3145 3146 ret = iommu_create_device_direct_mappings(group, dev); 3147 if (ret) 3148 goto free_new_domain; 3149 3150 ret = __iommu_attach_device(group->default_domain, dev); 3151 if (ret) 3152 goto free_new_domain; 3153 3154 group->domain = group->default_domain; 3155 3156 /* 3157 * Release the mutex here because ops->probe_finalize() call-back of 3158 * some vendor IOMMU drivers calls arm_iommu_attach_device() which 3159 * in-turn might call back into IOMMU core code, where it tries to take 3160 * group->mutex, resulting in a deadlock. 3161 */ 3162 mutex_unlock(&group->mutex); 3163 3164 /* Make sure dma_ops is appropriatley set */ 3165 iommu_group_do_probe_finalize(dev, group->default_domain); 3166 iommu_domain_free(prev_dom); 3167 return 0; 3168 3169 free_new_domain: 3170 iommu_domain_free(group->default_domain); 3171 group->default_domain = prev_dom; 3172 group->domain = prev_dom; 3173 3174 out: 3175 mutex_unlock(&group->mutex); 3176 3177 return ret; 3178 } 3179 3180 /* 3181 * Changing the default domain through sysfs requires the users to ubind the 3182 * drivers from the devices in the iommu group. Return failure if this doesn't 3183 * meet. 3184 * 3185 * We need to consider the race between this and the device release path. 3186 * device_lock(dev) is used here to guarantee that the device release path 3187 * will not be entered at the same time. 3188 */ 3189 static ssize_t iommu_group_store_type(struct iommu_group *group, 3190 const char *buf, size_t count) 3191 { 3192 struct group_device *grp_dev; 3193 struct device *dev; 3194 int ret, req_type; 3195 3196 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 3197 return -EACCES; 3198 3199 if (WARN_ON(!group)) 3200 return -EINVAL; 3201 3202 if (sysfs_streq(buf, "identity")) 3203 req_type = IOMMU_DOMAIN_IDENTITY; 3204 else if (sysfs_streq(buf, "DMA")) 3205 req_type = IOMMU_DOMAIN_DMA; 3206 else if (sysfs_streq(buf, "auto")) 3207 req_type = 0; 3208 else 3209 return -EINVAL; 3210 3211 /* 3212 * Lock/Unlock the group mutex here before device lock to 3213 * 1. Make sure that the iommu group has only one device (this is a 3214 * prerequisite for step 2) 3215 * 2. Get struct *dev which is needed to lock device 3216 */ 3217 mutex_lock(&group->mutex); 3218 if (iommu_group_device_count(group) != 1) { 3219 mutex_unlock(&group->mutex); 3220 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n"); 3221 return -EINVAL; 3222 } 3223 3224 /* Since group has only one device */ 3225 grp_dev = list_first_entry(&group->devices, struct group_device, list); 3226 dev = grp_dev->dev; 3227 get_device(dev); 3228 3229 /* 3230 * Don't hold the group mutex because taking group mutex first and then 3231 * the device lock could potentially cause a deadlock as below. Assume 3232 * two threads T1 and T2. T1 is trying to change default domain of an 3233 * iommu group and T2 is trying to hot unplug a device or release [1] VF 3234 * of a PCIe device which is in the same iommu group. T1 takes group 3235 * mutex and before it could take device lock assume T2 has taken device 3236 * lock and is yet to take group mutex. Now, both the threads will be 3237 * waiting for the other thread to release lock. Below, lock order was 3238 * suggested. 3239 * device_lock(dev); 3240 * mutex_lock(&group->mutex); 3241 * iommu_change_dev_def_domain(); 3242 * mutex_unlock(&group->mutex); 3243 * device_unlock(dev); 3244 * 3245 * [1] Typical device release path 3246 * device_lock() from device/driver core code 3247 * -> bus_notifier() 3248 * -> iommu_bus_notifier() 3249 * -> iommu_release_device() 3250 * -> ops->release_device() vendor driver calls back iommu core code 3251 * -> mutex_lock() from iommu core code 3252 */ 3253 mutex_unlock(&group->mutex); 3254 3255 /* Check if the device in the group still has a driver bound to it */ 3256 device_lock(dev); 3257 if (device_is_bound(dev)) { 3258 pr_err_ratelimited("Device is still bound to driver\n"); 3259 ret = -EBUSY; 3260 goto out; 3261 } 3262 3263 ret = iommu_change_dev_def_domain(group, dev, req_type); 3264 ret = ret ?: count; 3265 3266 out: 3267 device_unlock(dev); 3268 put_device(dev); 3269 3270 return ret; 3271 } 3272