1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 */ 6 7 #define pr_fmt(fmt) "iommu: " fmt 8 9 #include <linux/device.h> 10 #include <linux/kernel.h> 11 #include <linux/bug.h> 12 #include <linux/types.h> 13 #include <linux/init.h> 14 #include <linux/export.h> 15 #include <linux/slab.h> 16 #include <linux/errno.h> 17 #include <linux/iommu.h> 18 #include <linux/idr.h> 19 #include <linux/notifier.h> 20 #include <linux/err.h> 21 #include <linux/pci.h> 22 #include <linux/bitops.h> 23 #include <linux/property.h> 24 #include <linux/fsl/mc.h> 25 #include <linux/module.h> 26 #include <trace/events/iommu.h> 27 28 static struct kset *iommu_group_kset; 29 static DEFINE_IDA(iommu_group_ida); 30 31 static unsigned int iommu_def_domain_type __read_mostly; 32 static bool iommu_dma_strict __read_mostly = true; 33 static u32 iommu_cmd_line __read_mostly; 34 35 struct iommu_group { 36 struct kobject kobj; 37 struct kobject *devices_kobj; 38 struct list_head devices; 39 struct mutex mutex; 40 struct blocking_notifier_head notifier; 41 void *iommu_data; 42 void (*iommu_data_release)(void *iommu_data); 43 char *name; 44 int id; 45 struct iommu_domain *default_domain; 46 struct iommu_domain *domain; 47 }; 48 49 struct group_device { 50 struct list_head list; 51 struct device *dev; 52 char *name; 53 }; 54 55 struct iommu_group_attribute { 56 struct attribute attr; 57 ssize_t (*show)(struct iommu_group *group, char *buf); 58 ssize_t (*store)(struct iommu_group *group, 59 const char *buf, size_t count); 60 }; 61 62 static const char * const iommu_group_resv_type_string[] = { 63 [IOMMU_RESV_DIRECT] = "direct", 64 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", 65 [IOMMU_RESV_RESERVED] = "reserved", 66 [IOMMU_RESV_MSI] = "msi", 67 [IOMMU_RESV_SW_MSI] = "msi", 68 }; 69 70 #define IOMMU_CMD_LINE_DMA_API BIT(0) 71 72 static void iommu_set_cmd_line_dma_api(void) 73 { 74 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; 75 } 76 77 static bool iommu_cmd_line_dma_api(void) 78 { 79 return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API); 80 } 81 82 static int iommu_alloc_default_domain(struct device *dev); 83 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 84 unsigned type); 85 static int __iommu_attach_device(struct iommu_domain *domain, 86 struct device *dev); 87 static int __iommu_attach_group(struct iommu_domain *domain, 88 struct iommu_group *group); 89 static void __iommu_detach_group(struct iommu_domain *domain, 90 struct iommu_group *group); 91 92 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 93 struct iommu_group_attribute iommu_group_attr_##_name = \ 94 __ATTR(_name, _mode, _show, _store) 95 96 #define to_iommu_group_attr(_attr) \ 97 container_of(_attr, struct iommu_group_attribute, attr) 98 #define to_iommu_group(_kobj) \ 99 container_of(_kobj, struct iommu_group, kobj) 100 101 static LIST_HEAD(iommu_device_list); 102 static DEFINE_SPINLOCK(iommu_device_lock); 103 104 /* 105 * Use a function instead of an array here because the domain-type is a 106 * bit-field, so an array would waste memory. 107 */ 108 static const char *iommu_domain_type_str(unsigned int t) 109 { 110 switch (t) { 111 case IOMMU_DOMAIN_BLOCKED: 112 return "Blocked"; 113 case IOMMU_DOMAIN_IDENTITY: 114 return "Passthrough"; 115 case IOMMU_DOMAIN_UNMANAGED: 116 return "Unmanaged"; 117 case IOMMU_DOMAIN_DMA: 118 return "Translated"; 119 default: 120 return "Unknown"; 121 } 122 } 123 124 static int __init iommu_subsys_init(void) 125 { 126 bool cmd_line = iommu_cmd_line_dma_api(); 127 128 if (!cmd_line) { 129 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) 130 iommu_set_default_passthrough(false); 131 else 132 iommu_set_default_translated(false); 133 134 if (iommu_default_passthrough() && mem_encrypt_active()) { 135 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); 136 iommu_set_default_translated(false); 137 } 138 } 139 140 pr_info("Default domain type: %s %s\n", 141 iommu_domain_type_str(iommu_def_domain_type), 142 cmd_line ? "(set via kernel command line)" : ""); 143 144 return 0; 145 } 146 subsys_initcall(iommu_subsys_init); 147 148 int iommu_device_register(struct iommu_device *iommu) 149 { 150 spin_lock(&iommu_device_lock); 151 list_add_tail(&iommu->list, &iommu_device_list); 152 spin_unlock(&iommu_device_lock); 153 return 0; 154 } 155 EXPORT_SYMBOL_GPL(iommu_device_register); 156 157 void iommu_device_unregister(struct iommu_device *iommu) 158 { 159 spin_lock(&iommu_device_lock); 160 list_del(&iommu->list); 161 spin_unlock(&iommu_device_lock); 162 } 163 EXPORT_SYMBOL_GPL(iommu_device_unregister); 164 165 static struct dev_iommu *dev_iommu_get(struct device *dev) 166 { 167 struct dev_iommu *param = dev->iommu; 168 169 if (param) 170 return param; 171 172 param = kzalloc(sizeof(*param), GFP_KERNEL); 173 if (!param) 174 return NULL; 175 176 mutex_init(¶m->lock); 177 dev->iommu = param; 178 return param; 179 } 180 181 static void dev_iommu_free(struct device *dev) 182 { 183 kfree(dev->iommu); 184 dev->iommu = NULL; 185 } 186 187 static int __iommu_probe_device(struct device *dev) 188 { 189 const struct iommu_ops *ops = dev->bus->iommu_ops; 190 struct iommu_device *iommu_dev; 191 struct iommu_group *group; 192 int ret; 193 194 iommu_dev = ops->probe_device(dev); 195 if (IS_ERR(iommu_dev)) 196 return PTR_ERR(iommu_dev); 197 198 dev->iommu->iommu_dev = iommu_dev; 199 200 group = iommu_group_get_for_dev(dev); 201 if (!IS_ERR(group)) { 202 ret = PTR_ERR(group); 203 goto out_release; 204 } 205 iommu_group_put(group); 206 207 iommu_device_link(iommu_dev, dev); 208 209 return 0; 210 211 out_release: 212 ops->release_device(dev); 213 214 return ret; 215 } 216 217 int iommu_probe_device(struct device *dev) 218 { 219 const struct iommu_ops *ops = dev->bus->iommu_ops; 220 int ret; 221 222 WARN_ON(dev->iommu_group); 223 if (!ops) 224 return -EINVAL; 225 226 if (!dev_iommu_get(dev)) 227 return -ENOMEM; 228 229 if (!try_module_get(ops->owner)) { 230 ret = -EINVAL; 231 goto err_free_dev_param; 232 } 233 234 if (ops->probe_device) { 235 struct iommu_group *group; 236 237 ret = __iommu_probe_device(dev); 238 239 /* 240 * Try to allocate a default domain - needs support from the 241 * IOMMU driver. There are still some drivers which don't 242 * support default domains, so the return value is not yet 243 * checked. 244 */ 245 if (!ret) 246 iommu_alloc_default_domain(dev); 247 248 group = iommu_group_get(dev); 249 if (group && group->default_domain) { 250 ret = __iommu_attach_device(group->default_domain, dev); 251 iommu_group_put(group); 252 } 253 254 } else { 255 ret = ops->add_device(dev); 256 } 257 258 if (ret) 259 goto err_module_put; 260 261 if (ops->probe_finalize) 262 ops->probe_finalize(dev); 263 264 return 0; 265 266 err_module_put: 267 module_put(ops->owner); 268 err_free_dev_param: 269 dev_iommu_free(dev); 270 return ret; 271 } 272 273 static void __iommu_release_device(struct device *dev) 274 { 275 const struct iommu_ops *ops = dev->bus->iommu_ops; 276 277 iommu_device_unlink(dev->iommu->iommu_dev, dev); 278 279 iommu_group_remove_device(dev); 280 281 ops->release_device(dev); 282 } 283 284 void iommu_release_device(struct device *dev) 285 { 286 const struct iommu_ops *ops = dev->bus->iommu_ops; 287 288 if (!dev->iommu) 289 return; 290 291 if (ops->release_device) 292 __iommu_release_device(dev); 293 else if (dev->iommu_group) 294 ops->remove_device(dev); 295 296 module_put(ops->owner); 297 dev_iommu_free(dev); 298 } 299 300 static int __init iommu_set_def_domain_type(char *str) 301 { 302 bool pt; 303 int ret; 304 305 ret = kstrtobool(str, &pt); 306 if (ret) 307 return ret; 308 309 if (pt) 310 iommu_set_default_passthrough(true); 311 else 312 iommu_set_default_translated(true); 313 314 return 0; 315 } 316 early_param("iommu.passthrough", iommu_set_def_domain_type); 317 318 static int __init iommu_dma_setup(char *str) 319 { 320 return kstrtobool(str, &iommu_dma_strict); 321 } 322 early_param("iommu.strict", iommu_dma_setup); 323 324 static ssize_t iommu_group_attr_show(struct kobject *kobj, 325 struct attribute *__attr, char *buf) 326 { 327 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 328 struct iommu_group *group = to_iommu_group(kobj); 329 ssize_t ret = -EIO; 330 331 if (attr->show) 332 ret = attr->show(group, buf); 333 return ret; 334 } 335 336 static ssize_t iommu_group_attr_store(struct kobject *kobj, 337 struct attribute *__attr, 338 const char *buf, size_t count) 339 { 340 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 341 struct iommu_group *group = to_iommu_group(kobj); 342 ssize_t ret = -EIO; 343 344 if (attr->store) 345 ret = attr->store(group, buf, count); 346 return ret; 347 } 348 349 static const struct sysfs_ops iommu_group_sysfs_ops = { 350 .show = iommu_group_attr_show, 351 .store = iommu_group_attr_store, 352 }; 353 354 static int iommu_group_create_file(struct iommu_group *group, 355 struct iommu_group_attribute *attr) 356 { 357 return sysfs_create_file(&group->kobj, &attr->attr); 358 } 359 360 static void iommu_group_remove_file(struct iommu_group *group, 361 struct iommu_group_attribute *attr) 362 { 363 sysfs_remove_file(&group->kobj, &attr->attr); 364 } 365 366 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 367 { 368 return sprintf(buf, "%s\n", group->name); 369 } 370 371 /** 372 * iommu_insert_resv_region - Insert a new region in the 373 * list of reserved regions. 374 * @new: new region to insert 375 * @regions: list of regions 376 * 377 * Elements are sorted by start address and overlapping segments 378 * of the same type are merged. 379 */ 380 int iommu_insert_resv_region(struct iommu_resv_region *new, 381 struct list_head *regions) 382 { 383 struct iommu_resv_region *iter, *tmp, *nr, *top; 384 LIST_HEAD(stack); 385 386 nr = iommu_alloc_resv_region(new->start, new->length, 387 new->prot, new->type); 388 if (!nr) 389 return -ENOMEM; 390 391 /* First add the new element based on start address sorting */ 392 list_for_each_entry(iter, regions, list) { 393 if (nr->start < iter->start || 394 (nr->start == iter->start && nr->type <= iter->type)) 395 break; 396 } 397 list_add_tail(&nr->list, &iter->list); 398 399 /* Merge overlapping segments of type nr->type in @regions, if any */ 400 list_for_each_entry_safe(iter, tmp, regions, list) { 401 phys_addr_t top_end, iter_end = iter->start + iter->length - 1; 402 403 /* no merge needed on elements of different types than @new */ 404 if (iter->type != new->type) { 405 list_move_tail(&iter->list, &stack); 406 continue; 407 } 408 409 /* look for the last stack element of same type as @iter */ 410 list_for_each_entry_reverse(top, &stack, list) 411 if (top->type == iter->type) 412 goto check_overlap; 413 414 list_move_tail(&iter->list, &stack); 415 continue; 416 417 check_overlap: 418 top_end = top->start + top->length - 1; 419 420 if (iter->start > top_end + 1) { 421 list_move_tail(&iter->list, &stack); 422 } else { 423 top->length = max(top_end, iter_end) - top->start + 1; 424 list_del(&iter->list); 425 kfree(iter); 426 } 427 } 428 list_splice(&stack, regions); 429 return 0; 430 } 431 432 static int 433 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 434 struct list_head *group_resv_regions) 435 { 436 struct iommu_resv_region *entry; 437 int ret = 0; 438 439 list_for_each_entry(entry, dev_resv_regions, list) { 440 ret = iommu_insert_resv_region(entry, group_resv_regions); 441 if (ret) 442 break; 443 } 444 return ret; 445 } 446 447 int iommu_get_group_resv_regions(struct iommu_group *group, 448 struct list_head *head) 449 { 450 struct group_device *device; 451 int ret = 0; 452 453 mutex_lock(&group->mutex); 454 list_for_each_entry(device, &group->devices, list) { 455 struct list_head dev_resv_regions; 456 457 INIT_LIST_HEAD(&dev_resv_regions); 458 iommu_get_resv_regions(device->dev, &dev_resv_regions); 459 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 460 iommu_put_resv_regions(device->dev, &dev_resv_regions); 461 if (ret) 462 break; 463 } 464 mutex_unlock(&group->mutex); 465 return ret; 466 } 467 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 468 469 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 470 char *buf) 471 { 472 struct iommu_resv_region *region, *next; 473 struct list_head group_resv_regions; 474 char *str = buf; 475 476 INIT_LIST_HEAD(&group_resv_regions); 477 iommu_get_group_resv_regions(group, &group_resv_regions); 478 479 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 480 str += sprintf(str, "0x%016llx 0x%016llx %s\n", 481 (long long int)region->start, 482 (long long int)(region->start + 483 region->length - 1), 484 iommu_group_resv_type_string[region->type]); 485 kfree(region); 486 } 487 488 return (str - buf); 489 } 490 491 static ssize_t iommu_group_show_type(struct iommu_group *group, 492 char *buf) 493 { 494 char *type = "unknown\n"; 495 496 if (group->default_domain) { 497 switch (group->default_domain->type) { 498 case IOMMU_DOMAIN_BLOCKED: 499 type = "blocked\n"; 500 break; 501 case IOMMU_DOMAIN_IDENTITY: 502 type = "identity\n"; 503 break; 504 case IOMMU_DOMAIN_UNMANAGED: 505 type = "unmanaged\n"; 506 break; 507 case IOMMU_DOMAIN_DMA: 508 type = "DMA\n"; 509 break; 510 } 511 } 512 strcpy(buf, type); 513 514 return strlen(type); 515 } 516 517 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 518 519 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 520 iommu_group_show_resv_regions, NULL); 521 522 static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL); 523 524 static void iommu_group_release(struct kobject *kobj) 525 { 526 struct iommu_group *group = to_iommu_group(kobj); 527 528 pr_debug("Releasing group %d\n", group->id); 529 530 if (group->iommu_data_release) 531 group->iommu_data_release(group->iommu_data); 532 533 ida_simple_remove(&iommu_group_ida, group->id); 534 535 if (group->default_domain) 536 iommu_domain_free(group->default_domain); 537 538 kfree(group->name); 539 kfree(group); 540 } 541 542 static struct kobj_type iommu_group_ktype = { 543 .sysfs_ops = &iommu_group_sysfs_ops, 544 .release = iommu_group_release, 545 }; 546 547 /** 548 * iommu_group_alloc - Allocate a new group 549 * 550 * This function is called by an iommu driver to allocate a new iommu 551 * group. The iommu group represents the minimum granularity of the iommu. 552 * Upon successful return, the caller holds a reference to the supplied 553 * group in order to hold the group until devices are added. Use 554 * iommu_group_put() to release this extra reference count, allowing the 555 * group to be automatically reclaimed once it has no devices or external 556 * references. 557 */ 558 struct iommu_group *iommu_group_alloc(void) 559 { 560 struct iommu_group *group; 561 int ret; 562 563 group = kzalloc(sizeof(*group), GFP_KERNEL); 564 if (!group) 565 return ERR_PTR(-ENOMEM); 566 567 group->kobj.kset = iommu_group_kset; 568 mutex_init(&group->mutex); 569 INIT_LIST_HEAD(&group->devices); 570 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); 571 572 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); 573 if (ret < 0) { 574 kfree(group); 575 return ERR_PTR(ret); 576 } 577 group->id = ret; 578 579 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 580 NULL, "%d", group->id); 581 if (ret) { 582 ida_simple_remove(&iommu_group_ida, group->id); 583 kfree(group); 584 return ERR_PTR(ret); 585 } 586 587 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 588 if (!group->devices_kobj) { 589 kobject_put(&group->kobj); /* triggers .release & free */ 590 return ERR_PTR(-ENOMEM); 591 } 592 593 /* 594 * The devices_kobj holds a reference on the group kobject, so 595 * as long as that exists so will the group. We can therefore 596 * use the devices_kobj for reference counting. 597 */ 598 kobject_put(&group->kobj); 599 600 ret = iommu_group_create_file(group, 601 &iommu_group_attr_reserved_regions); 602 if (ret) 603 return ERR_PTR(ret); 604 605 ret = iommu_group_create_file(group, &iommu_group_attr_type); 606 if (ret) 607 return ERR_PTR(ret); 608 609 pr_debug("Allocated group %d\n", group->id); 610 611 return group; 612 } 613 EXPORT_SYMBOL_GPL(iommu_group_alloc); 614 615 struct iommu_group *iommu_group_get_by_id(int id) 616 { 617 struct kobject *group_kobj; 618 struct iommu_group *group; 619 const char *name; 620 621 if (!iommu_group_kset) 622 return NULL; 623 624 name = kasprintf(GFP_KERNEL, "%d", id); 625 if (!name) 626 return NULL; 627 628 group_kobj = kset_find_obj(iommu_group_kset, name); 629 kfree(name); 630 631 if (!group_kobj) 632 return NULL; 633 634 group = container_of(group_kobj, struct iommu_group, kobj); 635 BUG_ON(group->id != id); 636 637 kobject_get(group->devices_kobj); 638 kobject_put(&group->kobj); 639 640 return group; 641 } 642 EXPORT_SYMBOL_GPL(iommu_group_get_by_id); 643 644 /** 645 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 646 * @group: the group 647 * 648 * iommu drivers can store data in the group for use when doing iommu 649 * operations. This function provides a way to retrieve it. Caller 650 * should hold a group reference. 651 */ 652 void *iommu_group_get_iommudata(struct iommu_group *group) 653 { 654 return group->iommu_data; 655 } 656 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 657 658 /** 659 * iommu_group_set_iommudata - set iommu_data for a group 660 * @group: the group 661 * @iommu_data: new data 662 * @release: release function for iommu_data 663 * 664 * iommu drivers can store data in the group for use when doing iommu 665 * operations. This function provides a way to set the data after 666 * the group has been allocated. Caller should hold a group reference. 667 */ 668 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 669 void (*release)(void *iommu_data)) 670 { 671 group->iommu_data = iommu_data; 672 group->iommu_data_release = release; 673 } 674 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 675 676 /** 677 * iommu_group_set_name - set name for a group 678 * @group: the group 679 * @name: name 680 * 681 * Allow iommu driver to set a name for a group. When set it will 682 * appear in a name attribute file under the group in sysfs. 683 */ 684 int iommu_group_set_name(struct iommu_group *group, const char *name) 685 { 686 int ret; 687 688 if (group->name) { 689 iommu_group_remove_file(group, &iommu_group_attr_name); 690 kfree(group->name); 691 group->name = NULL; 692 if (!name) 693 return 0; 694 } 695 696 group->name = kstrdup(name, GFP_KERNEL); 697 if (!group->name) 698 return -ENOMEM; 699 700 ret = iommu_group_create_file(group, &iommu_group_attr_name); 701 if (ret) { 702 kfree(group->name); 703 group->name = NULL; 704 return ret; 705 } 706 707 return 0; 708 } 709 EXPORT_SYMBOL_GPL(iommu_group_set_name); 710 711 static int iommu_group_create_direct_mappings(struct iommu_group *group, 712 struct device *dev) 713 { 714 struct iommu_domain *domain = group->default_domain; 715 struct iommu_resv_region *entry; 716 struct list_head mappings; 717 unsigned long pg_size; 718 int ret = 0; 719 720 if (!domain || domain->type != IOMMU_DOMAIN_DMA) 721 return 0; 722 723 BUG_ON(!domain->pgsize_bitmap); 724 725 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 726 INIT_LIST_HEAD(&mappings); 727 728 iommu_get_resv_regions(dev, &mappings); 729 730 /* We need to consider overlapping regions for different devices */ 731 list_for_each_entry(entry, &mappings, list) { 732 dma_addr_t start, end, addr; 733 734 if (domain->ops->apply_resv_region) 735 domain->ops->apply_resv_region(dev, domain, entry); 736 737 start = ALIGN(entry->start, pg_size); 738 end = ALIGN(entry->start + entry->length, pg_size); 739 740 if (entry->type != IOMMU_RESV_DIRECT && 741 entry->type != IOMMU_RESV_DIRECT_RELAXABLE) 742 continue; 743 744 for (addr = start; addr < end; addr += pg_size) { 745 phys_addr_t phys_addr; 746 747 phys_addr = iommu_iova_to_phys(domain, addr); 748 if (phys_addr) 749 continue; 750 751 ret = iommu_map(domain, addr, addr, pg_size, entry->prot); 752 if (ret) 753 goto out; 754 } 755 756 } 757 758 iommu_flush_tlb_all(domain); 759 760 out: 761 iommu_put_resv_regions(dev, &mappings); 762 763 return ret; 764 } 765 766 /** 767 * iommu_group_add_device - add a device to an iommu group 768 * @group: the group into which to add the device (reference should be held) 769 * @dev: the device 770 * 771 * This function is called by an iommu driver to add a device into a 772 * group. Adding a device increments the group reference count. 773 */ 774 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 775 { 776 int ret, i = 0; 777 struct group_device *device; 778 779 device = kzalloc(sizeof(*device), GFP_KERNEL); 780 if (!device) 781 return -ENOMEM; 782 783 device->dev = dev; 784 785 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 786 if (ret) 787 goto err_free_device; 788 789 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 790 rename: 791 if (!device->name) { 792 ret = -ENOMEM; 793 goto err_remove_link; 794 } 795 796 ret = sysfs_create_link_nowarn(group->devices_kobj, 797 &dev->kobj, device->name); 798 if (ret) { 799 if (ret == -EEXIST && i >= 0) { 800 /* 801 * Account for the slim chance of collision 802 * and append an instance to the name. 803 */ 804 kfree(device->name); 805 device->name = kasprintf(GFP_KERNEL, "%s.%d", 806 kobject_name(&dev->kobj), i++); 807 goto rename; 808 } 809 goto err_free_name; 810 } 811 812 kobject_get(group->devices_kobj); 813 814 dev->iommu_group = group; 815 816 iommu_group_create_direct_mappings(group, dev); 817 818 mutex_lock(&group->mutex); 819 list_add_tail(&device->list, &group->devices); 820 if (group->domain) 821 ret = __iommu_attach_device(group->domain, dev); 822 mutex_unlock(&group->mutex); 823 if (ret) 824 goto err_put_group; 825 826 /* Notify any listeners about change to group. */ 827 blocking_notifier_call_chain(&group->notifier, 828 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); 829 830 trace_add_device_to_group(group->id, dev); 831 832 dev_info(dev, "Adding to iommu group %d\n", group->id); 833 834 return 0; 835 836 err_put_group: 837 mutex_lock(&group->mutex); 838 list_del(&device->list); 839 mutex_unlock(&group->mutex); 840 dev->iommu_group = NULL; 841 kobject_put(group->devices_kobj); 842 sysfs_remove_link(group->devices_kobj, device->name); 843 err_free_name: 844 kfree(device->name); 845 err_remove_link: 846 sysfs_remove_link(&dev->kobj, "iommu_group"); 847 err_free_device: 848 kfree(device); 849 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 850 return ret; 851 } 852 EXPORT_SYMBOL_GPL(iommu_group_add_device); 853 854 /** 855 * iommu_group_remove_device - remove a device from it's current group 856 * @dev: device to be removed 857 * 858 * This function is called by an iommu driver to remove the device from 859 * it's current group. This decrements the iommu group reference count. 860 */ 861 void iommu_group_remove_device(struct device *dev) 862 { 863 struct iommu_group *group = dev->iommu_group; 864 struct group_device *tmp_device, *device = NULL; 865 866 dev_info(dev, "Removing from iommu group %d\n", group->id); 867 868 /* Pre-notify listeners that a device is being removed. */ 869 blocking_notifier_call_chain(&group->notifier, 870 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); 871 872 mutex_lock(&group->mutex); 873 list_for_each_entry(tmp_device, &group->devices, list) { 874 if (tmp_device->dev == dev) { 875 device = tmp_device; 876 list_del(&device->list); 877 break; 878 } 879 } 880 mutex_unlock(&group->mutex); 881 882 if (!device) 883 return; 884 885 sysfs_remove_link(group->devices_kobj, device->name); 886 sysfs_remove_link(&dev->kobj, "iommu_group"); 887 888 trace_remove_device_from_group(group->id, dev); 889 890 kfree(device->name); 891 kfree(device); 892 dev->iommu_group = NULL; 893 kobject_put(group->devices_kobj); 894 } 895 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 896 897 static int iommu_group_device_count(struct iommu_group *group) 898 { 899 struct group_device *entry; 900 int ret = 0; 901 902 list_for_each_entry(entry, &group->devices, list) 903 ret++; 904 905 return ret; 906 } 907 908 /** 909 * iommu_group_for_each_dev - iterate over each device in the group 910 * @group: the group 911 * @data: caller opaque data to be passed to callback function 912 * @fn: caller supplied callback function 913 * 914 * This function is called by group users to iterate over group devices. 915 * Callers should hold a reference count to the group during callback. 916 * The group->mutex is held across callbacks, which will block calls to 917 * iommu_group_add/remove_device. 918 */ 919 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 920 int (*fn)(struct device *, void *)) 921 { 922 struct group_device *device; 923 int ret = 0; 924 925 list_for_each_entry(device, &group->devices, list) { 926 ret = fn(device->dev, data); 927 if (ret) 928 break; 929 } 930 return ret; 931 } 932 933 934 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 935 int (*fn)(struct device *, void *)) 936 { 937 int ret; 938 939 mutex_lock(&group->mutex); 940 ret = __iommu_group_for_each_dev(group, data, fn); 941 mutex_unlock(&group->mutex); 942 943 return ret; 944 } 945 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 946 947 /** 948 * iommu_group_get - Return the group for a device and increment reference 949 * @dev: get the group that this device belongs to 950 * 951 * This function is called by iommu drivers and users to get the group 952 * for the specified device. If found, the group is returned and the group 953 * reference in incremented, else NULL. 954 */ 955 struct iommu_group *iommu_group_get(struct device *dev) 956 { 957 struct iommu_group *group = dev->iommu_group; 958 959 if (group) 960 kobject_get(group->devices_kobj); 961 962 return group; 963 } 964 EXPORT_SYMBOL_GPL(iommu_group_get); 965 966 /** 967 * iommu_group_ref_get - Increment reference on a group 968 * @group: the group to use, must not be NULL 969 * 970 * This function is called by iommu drivers to take additional references on an 971 * existing group. Returns the given group for convenience. 972 */ 973 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 974 { 975 kobject_get(group->devices_kobj); 976 return group; 977 } 978 EXPORT_SYMBOL_GPL(iommu_group_ref_get); 979 980 /** 981 * iommu_group_put - Decrement group reference 982 * @group: the group to use 983 * 984 * This function is called by iommu drivers and users to release the 985 * iommu group. Once the reference count is zero, the group is released. 986 */ 987 void iommu_group_put(struct iommu_group *group) 988 { 989 if (group) 990 kobject_put(group->devices_kobj); 991 } 992 EXPORT_SYMBOL_GPL(iommu_group_put); 993 994 /** 995 * iommu_group_register_notifier - Register a notifier for group changes 996 * @group: the group to watch 997 * @nb: notifier block to signal 998 * 999 * This function allows iommu group users to track changes in a group. 1000 * See include/linux/iommu.h for actions sent via this notifier. Caller 1001 * should hold a reference to the group throughout notifier registration. 1002 */ 1003 int iommu_group_register_notifier(struct iommu_group *group, 1004 struct notifier_block *nb) 1005 { 1006 return blocking_notifier_chain_register(&group->notifier, nb); 1007 } 1008 EXPORT_SYMBOL_GPL(iommu_group_register_notifier); 1009 1010 /** 1011 * iommu_group_unregister_notifier - Unregister a notifier 1012 * @group: the group to watch 1013 * @nb: notifier block to signal 1014 * 1015 * Unregister a previously registered group notifier block. 1016 */ 1017 int iommu_group_unregister_notifier(struct iommu_group *group, 1018 struct notifier_block *nb) 1019 { 1020 return blocking_notifier_chain_unregister(&group->notifier, nb); 1021 } 1022 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); 1023 1024 /** 1025 * iommu_register_device_fault_handler() - Register a device fault handler 1026 * @dev: the device 1027 * @handler: the fault handler 1028 * @data: private data passed as argument to the handler 1029 * 1030 * When an IOMMU fault event is received, this handler gets called with the 1031 * fault event and data as argument. The handler should return 0 on success. If 1032 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also 1033 * complete the fault by calling iommu_page_response() with one of the following 1034 * response code: 1035 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation 1036 * - IOMMU_PAGE_RESP_INVALID: terminate the fault 1037 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting 1038 * page faults if possible. 1039 * 1040 * Return 0 if the fault handler was installed successfully, or an error. 1041 */ 1042 int iommu_register_device_fault_handler(struct device *dev, 1043 iommu_dev_fault_handler_t handler, 1044 void *data) 1045 { 1046 struct dev_iommu *param = dev->iommu; 1047 int ret = 0; 1048 1049 if (!param) 1050 return -EINVAL; 1051 1052 mutex_lock(¶m->lock); 1053 /* Only allow one fault handler registered for each device */ 1054 if (param->fault_param) { 1055 ret = -EBUSY; 1056 goto done_unlock; 1057 } 1058 1059 get_device(dev); 1060 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL); 1061 if (!param->fault_param) { 1062 put_device(dev); 1063 ret = -ENOMEM; 1064 goto done_unlock; 1065 } 1066 param->fault_param->handler = handler; 1067 param->fault_param->data = data; 1068 mutex_init(¶m->fault_param->lock); 1069 INIT_LIST_HEAD(¶m->fault_param->faults); 1070 1071 done_unlock: 1072 mutex_unlock(¶m->lock); 1073 1074 return ret; 1075 } 1076 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); 1077 1078 /** 1079 * iommu_unregister_device_fault_handler() - Unregister the device fault handler 1080 * @dev: the device 1081 * 1082 * Remove the device fault handler installed with 1083 * iommu_register_device_fault_handler(). 1084 * 1085 * Return 0 on success, or an error. 1086 */ 1087 int iommu_unregister_device_fault_handler(struct device *dev) 1088 { 1089 struct dev_iommu *param = dev->iommu; 1090 int ret = 0; 1091 1092 if (!param) 1093 return -EINVAL; 1094 1095 mutex_lock(¶m->lock); 1096 1097 if (!param->fault_param) 1098 goto unlock; 1099 1100 /* we cannot unregister handler if there are pending faults */ 1101 if (!list_empty(¶m->fault_param->faults)) { 1102 ret = -EBUSY; 1103 goto unlock; 1104 } 1105 1106 kfree(param->fault_param); 1107 param->fault_param = NULL; 1108 put_device(dev); 1109 unlock: 1110 mutex_unlock(¶m->lock); 1111 1112 return ret; 1113 } 1114 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); 1115 1116 /** 1117 * iommu_report_device_fault() - Report fault event to device driver 1118 * @dev: the device 1119 * @evt: fault event data 1120 * 1121 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ 1122 * handler. When this function fails and the fault is recoverable, it is the 1123 * caller's responsibility to complete the fault. 1124 * 1125 * Return 0 on success, or an error. 1126 */ 1127 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1128 { 1129 struct dev_iommu *param = dev->iommu; 1130 struct iommu_fault_event *evt_pending = NULL; 1131 struct iommu_fault_param *fparam; 1132 int ret = 0; 1133 1134 if (!param || !evt) 1135 return -EINVAL; 1136 1137 /* we only report device fault if there is a handler registered */ 1138 mutex_lock(¶m->lock); 1139 fparam = param->fault_param; 1140 if (!fparam || !fparam->handler) { 1141 ret = -EINVAL; 1142 goto done_unlock; 1143 } 1144 1145 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ && 1146 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 1147 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event), 1148 GFP_KERNEL); 1149 if (!evt_pending) { 1150 ret = -ENOMEM; 1151 goto done_unlock; 1152 } 1153 mutex_lock(&fparam->lock); 1154 list_add_tail(&evt_pending->list, &fparam->faults); 1155 mutex_unlock(&fparam->lock); 1156 } 1157 1158 ret = fparam->handler(&evt->fault, fparam->data); 1159 if (ret && evt_pending) { 1160 mutex_lock(&fparam->lock); 1161 list_del(&evt_pending->list); 1162 mutex_unlock(&fparam->lock); 1163 kfree(evt_pending); 1164 } 1165 done_unlock: 1166 mutex_unlock(¶m->lock); 1167 return ret; 1168 } 1169 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 1170 1171 int iommu_page_response(struct device *dev, 1172 struct iommu_page_response *msg) 1173 { 1174 bool pasid_valid; 1175 int ret = -EINVAL; 1176 struct iommu_fault_event *evt; 1177 struct iommu_fault_page_request *prm; 1178 struct dev_iommu *param = dev->iommu; 1179 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1180 1181 if (!domain || !domain->ops->page_response) 1182 return -ENODEV; 1183 1184 if (!param || !param->fault_param) 1185 return -EINVAL; 1186 1187 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 || 1188 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID) 1189 return -EINVAL; 1190 1191 /* Only send response if there is a fault report pending */ 1192 mutex_lock(¶m->fault_param->lock); 1193 if (list_empty(¶m->fault_param->faults)) { 1194 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n"); 1195 goto done_unlock; 1196 } 1197 /* 1198 * Check if we have a matching page request pending to respond, 1199 * otherwise return -EINVAL 1200 */ 1201 list_for_each_entry(evt, ¶m->fault_param->faults, list) { 1202 prm = &evt->fault.prm; 1203 pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; 1204 1205 if ((pasid_valid && prm->pasid != msg->pasid) || 1206 prm->grpid != msg->grpid) 1207 continue; 1208 1209 /* Sanitize the reply */ 1210 msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0; 1211 1212 ret = domain->ops->page_response(dev, evt, msg); 1213 list_del(&evt->list); 1214 kfree(evt); 1215 break; 1216 } 1217 1218 done_unlock: 1219 mutex_unlock(¶m->fault_param->lock); 1220 return ret; 1221 } 1222 EXPORT_SYMBOL_GPL(iommu_page_response); 1223 1224 /** 1225 * iommu_group_id - Return ID for a group 1226 * @group: the group to ID 1227 * 1228 * Return the unique ID for the group matching the sysfs group number. 1229 */ 1230 int iommu_group_id(struct iommu_group *group) 1231 { 1232 return group->id; 1233 } 1234 EXPORT_SYMBOL_GPL(iommu_group_id); 1235 1236 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1237 unsigned long *devfns); 1238 1239 /* 1240 * To consider a PCI device isolated, we require ACS to support Source 1241 * Validation, Request Redirection, Completer Redirection, and Upstream 1242 * Forwarding. This effectively means that devices cannot spoof their 1243 * requester ID, requests and completions cannot be redirected, and all 1244 * transactions are forwarded upstream, even as it passes through a 1245 * bridge where the target device is downstream. 1246 */ 1247 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 1248 1249 /* 1250 * For multifunction devices which are not isolated from each other, find 1251 * all the other non-isolated functions and look for existing groups. For 1252 * each function, we also need to look for aliases to or from other devices 1253 * that may already have a group. 1254 */ 1255 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 1256 unsigned long *devfns) 1257 { 1258 struct pci_dev *tmp = NULL; 1259 struct iommu_group *group; 1260 1261 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 1262 return NULL; 1263 1264 for_each_pci_dev(tmp) { 1265 if (tmp == pdev || tmp->bus != pdev->bus || 1266 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 1267 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 1268 continue; 1269 1270 group = get_pci_alias_group(tmp, devfns); 1271 if (group) { 1272 pci_dev_put(tmp); 1273 return group; 1274 } 1275 } 1276 1277 return NULL; 1278 } 1279 1280 /* 1281 * Look for aliases to or from the given device for existing groups. DMA 1282 * aliases are only supported on the same bus, therefore the search 1283 * space is quite small (especially since we're really only looking at pcie 1284 * device, and therefore only expect multiple slots on the root complex or 1285 * downstream switch ports). It's conceivable though that a pair of 1286 * multifunction devices could have aliases between them that would cause a 1287 * loop. To prevent this, we use a bitmap to track where we've been. 1288 */ 1289 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 1290 unsigned long *devfns) 1291 { 1292 struct pci_dev *tmp = NULL; 1293 struct iommu_group *group; 1294 1295 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 1296 return NULL; 1297 1298 group = iommu_group_get(&pdev->dev); 1299 if (group) 1300 return group; 1301 1302 for_each_pci_dev(tmp) { 1303 if (tmp == pdev || tmp->bus != pdev->bus) 1304 continue; 1305 1306 /* We alias them or they alias us */ 1307 if (pci_devs_are_dma_aliases(pdev, tmp)) { 1308 group = get_pci_alias_group(tmp, devfns); 1309 if (group) { 1310 pci_dev_put(tmp); 1311 return group; 1312 } 1313 1314 group = get_pci_function_alias_group(tmp, devfns); 1315 if (group) { 1316 pci_dev_put(tmp); 1317 return group; 1318 } 1319 } 1320 } 1321 1322 return NULL; 1323 } 1324 1325 struct group_for_pci_data { 1326 struct pci_dev *pdev; 1327 struct iommu_group *group; 1328 }; 1329 1330 /* 1331 * DMA alias iterator callback, return the last seen device. Stop and return 1332 * the IOMMU group if we find one along the way. 1333 */ 1334 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 1335 { 1336 struct group_for_pci_data *data = opaque; 1337 1338 data->pdev = pdev; 1339 data->group = iommu_group_get(&pdev->dev); 1340 1341 return data->group != NULL; 1342 } 1343 1344 /* 1345 * Generic device_group call-back function. It just allocates one 1346 * iommu-group per device. 1347 */ 1348 struct iommu_group *generic_device_group(struct device *dev) 1349 { 1350 return iommu_group_alloc(); 1351 } 1352 EXPORT_SYMBOL_GPL(generic_device_group); 1353 1354 /* 1355 * Use standard PCI bus topology, isolation features, and DMA alias quirks 1356 * to find or create an IOMMU group for a device. 1357 */ 1358 struct iommu_group *pci_device_group(struct device *dev) 1359 { 1360 struct pci_dev *pdev = to_pci_dev(dev); 1361 struct group_for_pci_data data; 1362 struct pci_bus *bus; 1363 struct iommu_group *group = NULL; 1364 u64 devfns[4] = { 0 }; 1365 1366 if (WARN_ON(!dev_is_pci(dev))) 1367 return ERR_PTR(-EINVAL); 1368 1369 /* 1370 * Find the upstream DMA alias for the device. A device must not 1371 * be aliased due to topology in order to have its own IOMMU group. 1372 * If we find an alias along the way that already belongs to a 1373 * group, use it. 1374 */ 1375 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1376 return data.group; 1377 1378 pdev = data.pdev; 1379 1380 /* 1381 * Continue upstream from the point of minimum IOMMU granularity 1382 * due to aliases to the point where devices are protected from 1383 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1384 * group, use it. 1385 */ 1386 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1387 if (!bus->self) 1388 continue; 1389 1390 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1391 break; 1392 1393 pdev = bus->self; 1394 1395 group = iommu_group_get(&pdev->dev); 1396 if (group) 1397 return group; 1398 } 1399 1400 /* 1401 * Look for existing groups on device aliases. If we alias another 1402 * device or another device aliases us, use the same group. 1403 */ 1404 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1405 if (group) 1406 return group; 1407 1408 /* 1409 * Look for existing groups on non-isolated functions on the same 1410 * slot and aliases of those funcions, if any. No need to clear 1411 * the search bitmap, the tested devfns are still valid. 1412 */ 1413 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1414 if (group) 1415 return group; 1416 1417 /* No shared group found, allocate new */ 1418 return iommu_group_alloc(); 1419 } 1420 EXPORT_SYMBOL_GPL(pci_device_group); 1421 1422 /* Get the IOMMU group for device on fsl-mc bus */ 1423 struct iommu_group *fsl_mc_device_group(struct device *dev) 1424 { 1425 struct device *cont_dev = fsl_mc_cont_dev(dev); 1426 struct iommu_group *group; 1427 1428 group = iommu_group_get(cont_dev); 1429 if (!group) 1430 group = iommu_group_alloc(); 1431 return group; 1432 } 1433 EXPORT_SYMBOL_GPL(fsl_mc_device_group); 1434 1435 static int iommu_get_def_domain_type(struct device *dev) 1436 { 1437 const struct iommu_ops *ops = dev->bus->iommu_ops; 1438 unsigned int type = 0; 1439 1440 if (ops->def_domain_type) 1441 type = ops->def_domain_type(dev); 1442 1443 return (type == 0) ? iommu_def_domain_type : type; 1444 } 1445 1446 static int iommu_group_alloc_default_domain(struct bus_type *bus, 1447 struct iommu_group *group, 1448 unsigned int type) 1449 { 1450 struct iommu_domain *dom; 1451 1452 dom = __iommu_domain_alloc(bus, type); 1453 if (!dom && type != IOMMU_DOMAIN_DMA) { 1454 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA); 1455 if (dom) 1456 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1457 type, group->name); 1458 } 1459 1460 if (!dom) 1461 return -ENOMEM; 1462 1463 group->default_domain = dom; 1464 if (!group->domain) 1465 group->domain = dom; 1466 1467 if (!iommu_dma_strict) { 1468 int attr = 1; 1469 iommu_domain_set_attr(dom, 1470 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, 1471 &attr); 1472 } 1473 1474 return 0; 1475 } 1476 1477 static int iommu_alloc_default_domain(struct device *dev) 1478 { 1479 struct iommu_group *group; 1480 unsigned int type; 1481 1482 group = iommu_group_get(dev); 1483 if (!group) 1484 return -ENODEV; 1485 1486 if (group->default_domain) 1487 return 0; 1488 1489 type = iommu_get_def_domain_type(dev); 1490 1491 return iommu_group_alloc_default_domain(dev->bus, group, type); 1492 } 1493 1494 /** 1495 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1496 * @dev: target device 1497 * 1498 * This function is intended to be called by IOMMU drivers and extended to 1499 * support common, bus-defined algorithms when determining or creating the 1500 * IOMMU group for a device. On success, the caller will hold a reference 1501 * to the returned IOMMU group, which will already include the provided 1502 * device. The reference should be released with iommu_group_put(). 1503 */ 1504 struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1505 { 1506 const struct iommu_ops *ops = dev->bus->iommu_ops; 1507 struct iommu_group *group; 1508 int ret; 1509 1510 group = iommu_group_get(dev); 1511 if (group) 1512 return group; 1513 1514 if (!ops) 1515 return ERR_PTR(-EINVAL); 1516 1517 group = ops->device_group(dev); 1518 if (WARN_ON_ONCE(group == NULL)) 1519 return ERR_PTR(-EINVAL); 1520 1521 if (IS_ERR(group)) 1522 return group; 1523 1524 ret = iommu_group_add_device(group, dev); 1525 if (ret) 1526 goto out_put_group; 1527 1528 /* 1529 * Try to allocate a default domain - needs support from the 1530 * IOMMU driver. There are still some drivers which don't support 1531 * default domains, so the return value is not yet checked. Only 1532 * allocate the domain here when the driver still has the 1533 * add_device/remove_device call-backs implemented. 1534 */ 1535 if (!ops->probe_device) { 1536 iommu_alloc_default_domain(dev); 1537 1538 if (group->default_domain) 1539 ret = __iommu_attach_device(group->default_domain, dev); 1540 1541 if (ret) 1542 goto out_put_group; 1543 } 1544 1545 return group; 1546 1547 out_put_group: 1548 iommu_group_put(group); 1549 1550 return ERR_PTR(ret); 1551 } 1552 EXPORT_SYMBOL(iommu_group_get_for_dev); 1553 1554 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1555 { 1556 return group->default_domain; 1557 } 1558 1559 static int add_iommu_group(struct device *dev, void *data) 1560 { 1561 int ret = iommu_probe_device(dev); 1562 1563 /* 1564 * We ignore -ENODEV errors for now, as they just mean that the 1565 * device is not translated by an IOMMU. We still care about 1566 * other errors and fail to initialize when they happen. 1567 */ 1568 if (ret == -ENODEV) 1569 ret = 0; 1570 1571 return ret; 1572 } 1573 1574 static int remove_iommu_group(struct device *dev, void *data) 1575 { 1576 iommu_release_device(dev); 1577 1578 return 0; 1579 } 1580 1581 static int iommu_bus_notifier(struct notifier_block *nb, 1582 unsigned long action, void *data) 1583 { 1584 unsigned long group_action = 0; 1585 struct device *dev = data; 1586 struct iommu_group *group; 1587 1588 /* 1589 * ADD/DEL call into iommu driver ops if provided, which may 1590 * result in ADD/DEL notifiers to group->notifier 1591 */ 1592 if (action == BUS_NOTIFY_ADD_DEVICE) { 1593 int ret; 1594 1595 ret = iommu_probe_device(dev); 1596 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1597 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1598 iommu_release_device(dev); 1599 return NOTIFY_OK; 1600 } 1601 1602 /* 1603 * Remaining BUS_NOTIFYs get filtered and republished to the 1604 * group, if anyone is listening 1605 */ 1606 group = iommu_group_get(dev); 1607 if (!group) 1608 return 0; 1609 1610 switch (action) { 1611 case BUS_NOTIFY_BIND_DRIVER: 1612 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; 1613 break; 1614 case BUS_NOTIFY_BOUND_DRIVER: 1615 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; 1616 break; 1617 case BUS_NOTIFY_UNBIND_DRIVER: 1618 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; 1619 break; 1620 case BUS_NOTIFY_UNBOUND_DRIVER: 1621 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; 1622 break; 1623 } 1624 1625 if (group_action) 1626 blocking_notifier_call_chain(&group->notifier, 1627 group_action, dev); 1628 1629 iommu_group_put(group); 1630 return 0; 1631 } 1632 1633 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) 1634 { 1635 int err; 1636 struct notifier_block *nb; 1637 1638 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 1639 if (!nb) 1640 return -ENOMEM; 1641 1642 nb->notifier_call = iommu_bus_notifier; 1643 1644 err = bus_register_notifier(bus, nb); 1645 if (err) 1646 goto out_free; 1647 1648 err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group); 1649 if (err) 1650 goto out_err; 1651 1652 1653 return 0; 1654 1655 out_err: 1656 /* Clean up */ 1657 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group); 1658 bus_unregister_notifier(bus, nb); 1659 1660 out_free: 1661 kfree(nb); 1662 1663 return err; 1664 } 1665 1666 /** 1667 * bus_set_iommu - set iommu-callbacks for the bus 1668 * @bus: bus. 1669 * @ops: the callbacks provided by the iommu-driver 1670 * 1671 * This function is called by an iommu driver to set the iommu methods 1672 * used for a particular bus. Drivers for devices on that bus can use 1673 * the iommu-api after these ops are registered. 1674 * This special function is needed because IOMMUs are usually devices on 1675 * the bus itself, so the iommu drivers are not initialized when the bus 1676 * is set up. With this function the iommu-driver can set the iommu-ops 1677 * afterwards. 1678 */ 1679 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) 1680 { 1681 int err; 1682 1683 if (ops == NULL) { 1684 bus->iommu_ops = NULL; 1685 return 0; 1686 } 1687 1688 if (bus->iommu_ops != NULL) 1689 return -EBUSY; 1690 1691 bus->iommu_ops = ops; 1692 1693 /* Do IOMMU specific setup for this bus-type */ 1694 err = iommu_bus_init(bus, ops); 1695 if (err) 1696 bus->iommu_ops = NULL; 1697 1698 return err; 1699 } 1700 EXPORT_SYMBOL_GPL(bus_set_iommu); 1701 1702 bool iommu_present(struct bus_type *bus) 1703 { 1704 return bus->iommu_ops != NULL; 1705 } 1706 EXPORT_SYMBOL_GPL(iommu_present); 1707 1708 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) 1709 { 1710 if (!bus->iommu_ops || !bus->iommu_ops->capable) 1711 return false; 1712 1713 return bus->iommu_ops->capable(cap); 1714 } 1715 EXPORT_SYMBOL_GPL(iommu_capable); 1716 1717 /** 1718 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1719 * @domain: iommu domain 1720 * @handler: fault handler 1721 * @token: user data, will be passed back to the fault handler 1722 * 1723 * This function should be used by IOMMU users which want to be notified 1724 * whenever an IOMMU fault happens. 1725 * 1726 * The fault handler itself should return 0 on success, and an appropriate 1727 * error code otherwise. 1728 */ 1729 void iommu_set_fault_handler(struct iommu_domain *domain, 1730 iommu_fault_handler_t handler, 1731 void *token) 1732 { 1733 BUG_ON(!domain); 1734 1735 domain->handler = handler; 1736 domain->handler_token = token; 1737 } 1738 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1739 1740 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 1741 unsigned type) 1742 { 1743 struct iommu_domain *domain; 1744 1745 if (bus == NULL || bus->iommu_ops == NULL) 1746 return NULL; 1747 1748 domain = bus->iommu_ops->domain_alloc(type); 1749 if (!domain) 1750 return NULL; 1751 1752 domain->ops = bus->iommu_ops; 1753 domain->type = type; 1754 /* Assume all sizes by default; the driver may override this later */ 1755 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1756 1757 return domain; 1758 } 1759 1760 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 1761 { 1762 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1763 } 1764 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1765 1766 void iommu_domain_free(struct iommu_domain *domain) 1767 { 1768 domain->ops->domain_free(domain); 1769 } 1770 EXPORT_SYMBOL_GPL(iommu_domain_free); 1771 1772 static int __iommu_attach_device(struct iommu_domain *domain, 1773 struct device *dev) 1774 { 1775 int ret; 1776 if ((domain->ops->is_attach_deferred != NULL) && 1777 domain->ops->is_attach_deferred(domain, dev)) 1778 return 0; 1779 1780 if (unlikely(domain->ops->attach_dev == NULL)) 1781 return -ENODEV; 1782 1783 ret = domain->ops->attach_dev(domain, dev); 1784 if (!ret) 1785 trace_attach_device_to_domain(dev); 1786 return ret; 1787 } 1788 1789 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 1790 { 1791 struct iommu_group *group; 1792 int ret; 1793 1794 group = iommu_group_get(dev); 1795 if (!group) 1796 return -ENODEV; 1797 1798 /* 1799 * Lock the group to make sure the device-count doesn't 1800 * change while we are attaching 1801 */ 1802 mutex_lock(&group->mutex); 1803 ret = -EINVAL; 1804 if (iommu_group_device_count(group) != 1) 1805 goto out_unlock; 1806 1807 ret = __iommu_attach_group(domain, group); 1808 1809 out_unlock: 1810 mutex_unlock(&group->mutex); 1811 iommu_group_put(group); 1812 1813 return ret; 1814 } 1815 EXPORT_SYMBOL_GPL(iommu_attach_device); 1816 1817 int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev, 1818 struct iommu_cache_invalidate_info *inv_info) 1819 { 1820 if (unlikely(!domain->ops->cache_invalidate)) 1821 return -ENODEV; 1822 1823 return domain->ops->cache_invalidate(domain, dev, inv_info); 1824 } 1825 EXPORT_SYMBOL_GPL(iommu_cache_invalidate); 1826 1827 int iommu_sva_bind_gpasid(struct iommu_domain *domain, 1828 struct device *dev, struct iommu_gpasid_bind_data *data) 1829 { 1830 if (unlikely(!domain->ops->sva_bind_gpasid)) 1831 return -ENODEV; 1832 1833 return domain->ops->sva_bind_gpasid(domain, dev, data); 1834 } 1835 EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid); 1836 1837 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, 1838 ioasid_t pasid) 1839 { 1840 if (unlikely(!domain->ops->sva_unbind_gpasid)) 1841 return -ENODEV; 1842 1843 return domain->ops->sva_unbind_gpasid(dev, pasid); 1844 } 1845 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); 1846 1847 static void __iommu_detach_device(struct iommu_domain *domain, 1848 struct device *dev) 1849 { 1850 if ((domain->ops->is_attach_deferred != NULL) && 1851 domain->ops->is_attach_deferred(domain, dev)) 1852 return; 1853 1854 if (unlikely(domain->ops->detach_dev == NULL)) 1855 return; 1856 1857 domain->ops->detach_dev(domain, dev); 1858 trace_detach_device_from_domain(dev); 1859 } 1860 1861 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 1862 { 1863 struct iommu_group *group; 1864 1865 group = iommu_group_get(dev); 1866 if (!group) 1867 return; 1868 1869 mutex_lock(&group->mutex); 1870 if (iommu_group_device_count(group) != 1) { 1871 WARN_ON(1); 1872 goto out_unlock; 1873 } 1874 1875 __iommu_detach_group(domain, group); 1876 1877 out_unlock: 1878 mutex_unlock(&group->mutex); 1879 iommu_group_put(group); 1880 } 1881 EXPORT_SYMBOL_GPL(iommu_detach_device); 1882 1883 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 1884 { 1885 struct iommu_domain *domain; 1886 struct iommu_group *group; 1887 1888 group = iommu_group_get(dev); 1889 if (!group) 1890 return NULL; 1891 1892 domain = group->domain; 1893 1894 iommu_group_put(group); 1895 1896 return domain; 1897 } 1898 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 1899 1900 /* 1901 * For IOMMU_DOMAIN_DMA implementations which already provide their own 1902 * guarantees that the group and its default domain are valid and correct. 1903 */ 1904 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 1905 { 1906 return dev->iommu_group->default_domain; 1907 } 1908 1909 /* 1910 * IOMMU groups are really the natural working unit of the IOMMU, but 1911 * the IOMMU API works on domains and devices. Bridge that gap by 1912 * iterating over the devices in a group. Ideally we'd have a single 1913 * device which represents the requestor ID of the group, but we also 1914 * allow IOMMU drivers to create policy defined minimum sets, where 1915 * the physical hardware may be able to distiguish members, but we 1916 * wish to group them at a higher level (ex. untrusted multi-function 1917 * PCI devices). Thus we attach each device. 1918 */ 1919 static int iommu_group_do_attach_device(struct device *dev, void *data) 1920 { 1921 struct iommu_domain *domain = data; 1922 1923 return __iommu_attach_device(domain, dev); 1924 } 1925 1926 static int __iommu_attach_group(struct iommu_domain *domain, 1927 struct iommu_group *group) 1928 { 1929 int ret; 1930 1931 if (group->default_domain && group->domain != group->default_domain) 1932 return -EBUSY; 1933 1934 ret = __iommu_group_for_each_dev(group, domain, 1935 iommu_group_do_attach_device); 1936 if (ret == 0) 1937 group->domain = domain; 1938 1939 return ret; 1940 } 1941 1942 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 1943 { 1944 int ret; 1945 1946 mutex_lock(&group->mutex); 1947 ret = __iommu_attach_group(domain, group); 1948 mutex_unlock(&group->mutex); 1949 1950 return ret; 1951 } 1952 EXPORT_SYMBOL_GPL(iommu_attach_group); 1953 1954 static int iommu_group_do_detach_device(struct device *dev, void *data) 1955 { 1956 struct iommu_domain *domain = data; 1957 1958 __iommu_detach_device(domain, dev); 1959 1960 return 0; 1961 } 1962 1963 static void __iommu_detach_group(struct iommu_domain *domain, 1964 struct iommu_group *group) 1965 { 1966 int ret; 1967 1968 if (!group->default_domain) { 1969 __iommu_group_for_each_dev(group, domain, 1970 iommu_group_do_detach_device); 1971 group->domain = NULL; 1972 return; 1973 } 1974 1975 if (group->domain == group->default_domain) 1976 return; 1977 1978 /* Detach by re-attaching to the default domain */ 1979 ret = __iommu_group_for_each_dev(group, group->default_domain, 1980 iommu_group_do_attach_device); 1981 if (ret != 0) 1982 WARN_ON(1); 1983 else 1984 group->domain = group->default_domain; 1985 } 1986 1987 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 1988 { 1989 mutex_lock(&group->mutex); 1990 __iommu_detach_group(domain, group); 1991 mutex_unlock(&group->mutex); 1992 } 1993 EXPORT_SYMBOL_GPL(iommu_detach_group); 1994 1995 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 1996 { 1997 if (unlikely(domain->ops->iova_to_phys == NULL)) 1998 return 0; 1999 2000 return domain->ops->iova_to_phys(domain, iova); 2001 } 2002 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 2003 2004 static size_t iommu_pgsize(struct iommu_domain *domain, 2005 unsigned long addr_merge, size_t size) 2006 { 2007 unsigned int pgsize_idx; 2008 size_t pgsize; 2009 2010 /* Max page size that still fits into 'size' */ 2011 pgsize_idx = __fls(size); 2012 2013 /* need to consider alignment requirements ? */ 2014 if (likely(addr_merge)) { 2015 /* Max page size allowed by address */ 2016 unsigned int align_pgsize_idx = __ffs(addr_merge); 2017 pgsize_idx = min(pgsize_idx, align_pgsize_idx); 2018 } 2019 2020 /* build a mask of acceptable page sizes */ 2021 pgsize = (1UL << (pgsize_idx + 1)) - 1; 2022 2023 /* throw away page sizes not supported by the hardware */ 2024 pgsize &= domain->pgsize_bitmap; 2025 2026 /* make sure we're still sane */ 2027 BUG_ON(!pgsize); 2028 2029 /* pick the biggest page */ 2030 pgsize_idx = __fls(pgsize); 2031 pgsize = 1UL << pgsize_idx; 2032 2033 return pgsize; 2034 } 2035 2036 int __iommu_map(struct iommu_domain *domain, unsigned long iova, 2037 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 2038 { 2039 const struct iommu_ops *ops = domain->ops; 2040 unsigned long orig_iova = iova; 2041 unsigned int min_pagesz; 2042 size_t orig_size = size; 2043 phys_addr_t orig_paddr = paddr; 2044 int ret = 0; 2045 2046 if (unlikely(ops->map == NULL || 2047 domain->pgsize_bitmap == 0UL)) 2048 return -ENODEV; 2049 2050 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2051 return -EINVAL; 2052 2053 /* find out the minimum page size supported */ 2054 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2055 2056 /* 2057 * both the virtual address and the physical one, as well as 2058 * the size of the mapping, must be aligned (at least) to the 2059 * size of the smallest page supported by the hardware 2060 */ 2061 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 2062 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 2063 iova, &paddr, size, min_pagesz); 2064 return -EINVAL; 2065 } 2066 2067 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 2068 2069 while (size) { 2070 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); 2071 2072 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", 2073 iova, &paddr, pgsize); 2074 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); 2075 2076 if (ret) 2077 break; 2078 2079 iova += pgsize; 2080 paddr += pgsize; 2081 size -= pgsize; 2082 } 2083 2084 if (ops->iotlb_sync_map) 2085 ops->iotlb_sync_map(domain); 2086 2087 /* unroll mapping in case something went wrong */ 2088 if (ret) 2089 iommu_unmap(domain, orig_iova, orig_size - size); 2090 else 2091 trace_map(orig_iova, orig_paddr, orig_size); 2092 2093 return ret; 2094 } 2095 2096 int iommu_map(struct iommu_domain *domain, unsigned long iova, 2097 phys_addr_t paddr, size_t size, int prot) 2098 { 2099 might_sleep(); 2100 return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL); 2101 } 2102 EXPORT_SYMBOL_GPL(iommu_map); 2103 2104 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, 2105 phys_addr_t paddr, size_t size, int prot) 2106 { 2107 return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC); 2108 } 2109 EXPORT_SYMBOL_GPL(iommu_map_atomic); 2110 2111 static size_t __iommu_unmap(struct iommu_domain *domain, 2112 unsigned long iova, size_t size, 2113 struct iommu_iotlb_gather *iotlb_gather) 2114 { 2115 const struct iommu_ops *ops = domain->ops; 2116 size_t unmapped_page, unmapped = 0; 2117 unsigned long orig_iova = iova; 2118 unsigned int min_pagesz; 2119 2120 if (unlikely(ops->unmap == NULL || 2121 domain->pgsize_bitmap == 0UL)) 2122 return 0; 2123 2124 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 2125 return 0; 2126 2127 /* find out the minimum page size supported */ 2128 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 2129 2130 /* 2131 * The virtual address, as well as the size of the mapping, must be 2132 * aligned (at least) to the size of the smallest page supported 2133 * by the hardware 2134 */ 2135 if (!IS_ALIGNED(iova | size, min_pagesz)) { 2136 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 2137 iova, size, min_pagesz); 2138 return 0; 2139 } 2140 2141 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2142 2143 /* 2144 * Keep iterating until we either unmap 'size' bytes (or more) 2145 * or we hit an area that isn't mapped. 2146 */ 2147 while (unmapped < size) { 2148 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); 2149 2150 unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather); 2151 if (!unmapped_page) 2152 break; 2153 2154 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2155 iova, unmapped_page); 2156 2157 iova += unmapped_page; 2158 unmapped += unmapped_page; 2159 } 2160 2161 trace_unmap(orig_iova, size, unmapped); 2162 return unmapped; 2163 } 2164 2165 size_t iommu_unmap(struct iommu_domain *domain, 2166 unsigned long iova, size_t size) 2167 { 2168 struct iommu_iotlb_gather iotlb_gather; 2169 size_t ret; 2170 2171 iommu_iotlb_gather_init(&iotlb_gather); 2172 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); 2173 iommu_tlb_sync(domain, &iotlb_gather); 2174 2175 return ret; 2176 } 2177 EXPORT_SYMBOL_GPL(iommu_unmap); 2178 2179 size_t iommu_unmap_fast(struct iommu_domain *domain, 2180 unsigned long iova, size_t size, 2181 struct iommu_iotlb_gather *iotlb_gather) 2182 { 2183 return __iommu_unmap(domain, iova, size, iotlb_gather); 2184 } 2185 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2186 2187 size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2188 struct scatterlist *sg, unsigned int nents, int prot, 2189 gfp_t gfp) 2190 { 2191 size_t len = 0, mapped = 0; 2192 phys_addr_t start; 2193 unsigned int i = 0; 2194 int ret; 2195 2196 while (i <= nents) { 2197 phys_addr_t s_phys = sg_phys(sg); 2198 2199 if (len && s_phys != start + len) { 2200 ret = __iommu_map(domain, iova + mapped, start, 2201 len, prot, gfp); 2202 2203 if (ret) 2204 goto out_err; 2205 2206 mapped += len; 2207 len = 0; 2208 } 2209 2210 if (len) { 2211 len += sg->length; 2212 } else { 2213 len = sg->length; 2214 start = s_phys; 2215 } 2216 2217 if (++i < nents) 2218 sg = sg_next(sg); 2219 } 2220 2221 return mapped; 2222 2223 out_err: 2224 /* undo mappings already done */ 2225 iommu_unmap(domain, iova, mapped); 2226 2227 return 0; 2228 2229 } 2230 2231 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2232 struct scatterlist *sg, unsigned int nents, int prot) 2233 { 2234 might_sleep(); 2235 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); 2236 } 2237 EXPORT_SYMBOL_GPL(iommu_map_sg); 2238 2239 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, 2240 struct scatterlist *sg, unsigned int nents, int prot) 2241 { 2242 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); 2243 } 2244 EXPORT_SYMBOL_GPL(iommu_map_sg_atomic); 2245 2246 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, 2247 phys_addr_t paddr, u64 size, int prot) 2248 { 2249 if (unlikely(domain->ops->domain_window_enable == NULL)) 2250 return -ENODEV; 2251 2252 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, 2253 prot); 2254 } 2255 EXPORT_SYMBOL_GPL(iommu_domain_window_enable); 2256 2257 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) 2258 { 2259 if (unlikely(domain->ops->domain_window_disable == NULL)) 2260 return; 2261 2262 return domain->ops->domain_window_disable(domain, wnd_nr); 2263 } 2264 EXPORT_SYMBOL_GPL(iommu_domain_window_disable); 2265 2266 /** 2267 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 2268 * @domain: the iommu domain where the fault has happened 2269 * @dev: the device where the fault has happened 2270 * @iova: the faulting address 2271 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 2272 * 2273 * This function should be called by the low-level IOMMU implementations 2274 * whenever IOMMU faults happen, to allow high-level users, that are 2275 * interested in such events, to know about them. 2276 * 2277 * This event may be useful for several possible use cases: 2278 * - mere logging of the event 2279 * - dynamic TLB/PTE loading 2280 * - if restarting of the faulting device is required 2281 * 2282 * Returns 0 on success and an appropriate error code otherwise (if dynamic 2283 * PTE/TLB loading will one day be supported, implementations will be able 2284 * to tell whether it succeeded or not according to this return value). 2285 * 2286 * Specifically, -ENOSYS is returned if a fault handler isn't installed 2287 * (though fault handlers can also return -ENOSYS, in case they want to 2288 * elicit the default behavior of the IOMMU drivers). 2289 */ 2290 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 2291 unsigned long iova, int flags) 2292 { 2293 int ret = -ENOSYS; 2294 2295 /* 2296 * if upper layers showed interest and installed a fault handler, 2297 * invoke it. 2298 */ 2299 if (domain->handler) 2300 ret = domain->handler(domain, dev, iova, flags, 2301 domain->handler_token); 2302 2303 trace_io_page_fault(dev, iova, flags); 2304 return ret; 2305 } 2306 EXPORT_SYMBOL_GPL(report_iommu_fault); 2307 2308 static int __init iommu_init(void) 2309 { 2310 iommu_group_kset = kset_create_and_add("iommu_groups", 2311 NULL, kernel_kobj); 2312 BUG_ON(!iommu_group_kset); 2313 2314 iommu_debugfs_setup(); 2315 2316 return 0; 2317 } 2318 core_initcall(iommu_init); 2319 2320 int iommu_domain_get_attr(struct iommu_domain *domain, 2321 enum iommu_attr attr, void *data) 2322 { 2323 struct iommu_domain_geometry *geometry; 2324 bool *paging; 2325 int ret = 0; 2326 2327 switch (attr) { 2328 case DOMAIN_ATTR_GEOMETRY: 2329 geometry = data; 2330 *geometry = domain->geometry; 2331 2332 break; 2333 case DOMAIN_ATTR_PAGING: 2334 paging = data; 2335 *paging = (domain->pgsize_bitmap != 0UL); 2336 break; 2337 default: 2338 if (!domain->ops->domain_get_attr) 2339 return -EINVAL; 2340 2341 ret = domain->ops->domain_get_attr(domain, attr, data); 2342 } 2343 2344 return ret; 2345 } 2346 EXPORT_SYMBOL_GPL(iommu_domain_get_attr); 2347 2348 int iommu_domain_set_attr(struct iommu_domain *domain, 2349 enum iommu_attr attr, void *data) 2350 { 2351 int ret = 0; 2352 2353 switch (attr) { 2354 default: 2355 if (domain->ops->domain_set_attr == NULL) 2356 return -EINVAL; 2357 2358 ret = domain->ops->domain_set_attr(domain, attr, data); 2359 } 2360 2361 return ret; 2362 } 2363 EXPORT_SYMBOL_GPL(iommu_domain_set_attr); 2364 2365 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 2366 { 2367 const struct iommu_ops *ops = dev->bus->iommu_ops; 2368 2369 if (ops && ops->get_resv_regions) 2370 ops->get_resv_regions(dev, list); 2371 } 2372 2373 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 2374 { 2375 const struct iommu_ops *ops = dev->bus->iommu_ops; 2376 2377 if (ops && ops->put_resv_regions) 2378 ops->put_resv_regions(dev, list); 2379 } 2380 2381 /** 2382 * generic_iommu_put_resv_regions - Reserved region driver helper 2383 * @dev: device for which to free reserved regions 2384 * @list: reserved region list for device 2385 * 2386 * IOMMU drivers can use this to implement their .put_resv_regions() callback 2387 * for simple reservations. Memory allocated for each reserved region will be 2388 * freed. If an IOMMU driver allocates additional resources per region, it is 2389 * going to have to implement a custom callback. 2390 */ 2391 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list) 2392 { 2393 struct iommu_resv_region *entry, *next; 2394 2395 list_for_each_entry_safe(entry, next, list, list) 2396 kfree(entry); 2397 } 2398 EXPORT_SYMBOL(generic_iommu_put_resv_regions); 2399 2400 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 2401 size_t length, int prot, 2402 enum iommu_resv_type type) 2403 { 2404 struct iommu_resv_region *region; 2405 2406 region = kzalloc(sizeof(*region), GFP_KERNEL); 2407 if (!region) 2408 return NULL; 2409 2410 INIT_LIST_HEAD(®ion->list); 2411 region->start = start; 2412 region->length = length; 2413 region->prot = prot; 2414 region->type = type; 2415 return region; 2416 } 2417 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); 2418 2419 static int 2420 request_default_domain_for_dev(struct device *dev, unsigned long type) 2421 { 2422 struct iommu_domain *domain; 2423 struct iommu_group *group; 2424 int ret; 2425 2426 /* Device must already be in a group before calling this function */ 2427 group = iommu_group_get(dev); 2428 if (!group) 2429 return -EINVAL; 2430 2431 mutex_lock(&group->mutex); 2432 2433 ret = 0; 2434 if (group->default_domain && group->default_domain->type == type) 2435 goto out; 2436 2437 /* Don't change mappings of existing devices */ 2438 ret = -EBUSY; 2439 if (iommu_group_device_count(group) != 1) 2440 goto out; 2441 2442 ret = -ENOMEM; 2443 domain = __iommu_domain_alloc(dev->bus, type); 2444 if (!domain) 2445 goto out; 2446 2447 /* Attach the device to the domain */ 2448 ret = __iommu_attach_group(domain, group); 2449 if (ret) { 2450 iommu_domain_free(domain); 2451 goto out; 2452 } 2453 2454 /* Make the domain the default for this group */ 2455 if (group->default_domain) 2456 iommu_domain_free(group->default_domain); 2457 group->default_domain = domain; 2458 2459 iommu_group_create_direct_mappings(group, dev); 2460 2461 dev_info(dev, "Using iommu %s mapping\n", 2462 type == IOMMU_DOMAIN_DMA ? "dma" : "direct"); 2463 2464 ret = 0; 2465 out: 2466 mutex_unlock(&group->mutex); 2467 iommu_group_put(group); 2468 2469 return ret; 2470 } 2471 2472 /* Request that a device is direct mapped by the IOMMU */ 2473 int iommu_request_dm_for_dev(struct device *dev) 2474 { 2475 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY); 2476 } 2477 2478 /* Request that a device can't be direct mapped by the IOMMU */ 2479 int iommu_request_dma_domain_for_dev(struct device *dev) 2480 { 2481 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA); 2482 } 2483 2484 void iommu_set_default_passthrough(bool cmd_line) 2485 { 2486 if (cmd_line) 2487 iommu_set_cmd_line_dma_api(); 2488 2489 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 2490 } 2491 2492 void iommu_set_default_translated(bool cmd_line) 2493 { 2494 if (cmd_line) 2495 iommu_set_cmd_line_dma_api(); 2496 2497 iommu_def_domain_type = IOMMU_DOMAIN_DMA; 2498 } 2499 2500 bool iommu_default_passthrough(void) 2501 { 2502 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; 2503 } 2504 EXPORT_SYMBOL_GPL(iommu_default_passthrough); 2505 2506 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2507 { 2508 const struct iommu_ops *ops = NULL; 2509 struct iommu_device *iommu; 2510 2511 spin_lock(&iommu_device_lock); 2512 list_for_each_entry(iommu, &iommu_device_list, list) 2513 if (iommu->fwnode == fwnode) { 2514 ops = iommu->ops; 2515 break; 2516 } 2517 spin_unlock(&iommu_device_lock); 2518 return ops; 2519 } 2520 2521 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 2522 const struct iommu_ops *ops) 2523 { 2524 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2525 2526 if (fwspec) 2527 return ops == fwspec->ops ? 0 : -EINVAL; 2528 2529 if (!dev_iommu_get(dev)) 2530 return -ENOMEM; 2531 2532 /* Preallocate for the overwhelmingly common case of 1 ID */ 2533 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); 2534 if (!fwspec) 2535 return -ENOMEM; 2536 2537 of_node_get(to_of_node(iommu_fwnode)); 2538 fwspec->iommu_fwnode = iommu_fwnode; 2539 fwspec->ops = ops; 2540 dev_iommu_fwspec_set(dev, fwspec); 2541 return 0; 2542 } 2543 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2544 2545 void iommu_fwspec_free(struct device *dev) 2546 { 2547 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2548 2549 if (fwspec) { 2550 fwnode_handle_put(fwspec->iommu_fwnode); 2551 kfree(fwspec); 2552 dev_iommu_fwspec_set(dev, NULL); 2553 } 2554 } 2555 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2556 2557 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2558 { 2559 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2560 int i, new_num; 2561 2562 if (!fwspec) 2563 return -EINVAL; 2564 2565 new_num = fwspec->num_ids + num_ids; 2566 if (new_num > 1) { 2567 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), 2568 GFP_KERNEL); 2569 if (!fwspec) 2570 return -ENOMEM; 2571 2572 dev_iommu_fwspec_set(dev, fwspec); 2573 } 2574 2575 for (i = 0; i < num_ids; i++) 2576 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2577 2578 fwspec->num_ids = new_num; 2579 return 0; 2580 } 2581 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2582 2583 /* 2584 * Per device IOMMU features. 2585 */ 2586 bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat) 2587 { 2588 const struct iommu_ops *ops = dev->bus->iommu_ops; 2589 2590 if (ops && ops->dev_has_feat) 2591 return ops->dev_has_feat(dev, feat); 2592 2593 return false; 2594 } 2595 EXPORT_SYMBOL_GPL(iommu_dev_has_feature); 2596 2597 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2598 { 2599 const struct iommu_ops *ops = dev->bus->iommu_ops; 2600 2601 if (ops && ops->dev_enable_feat) 2602 return ops->dev_enable_feat(dev, feat); 2603 2604 return -ENODEV; 2605 } 2606 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2607 2608 /* 2609 * The device drivers should do the necessary cleanups before calling this. 2610 * For example, before disabling the aux-domain feature, the device driver 2611 * should detach all aux-domains. Otherwise, this will return -EBUSY. 2612 */ 2613 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2614 { 2615 const struct iommu_ops *ops = dev->bus->iommu_ops; 2616 2617 if (ops && ops->dev_disable_feat) 2618 return ops->dev_disable_feat(dev, feat); 2619 2620 return -EBUSY; 2621 } 2622 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2623 2624 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) 2625 { 2626 const struct iommu_ops *ops = dev->bus->iommu_ops; 2627 2628 if (ops && ops->dev_feat_enabled) 2629 return ops->dev_feat_enabled(dev, feat); 2630 2631 return false; 2632 } 2633 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled); 2634 2635 /* 2636 * Aux-domain specific attach/detach. 2637 * 2638 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns 2639 * true. Also, as long as domains are attached to a device through this 2640 * interface, any tries to call iommu_attach_device() should fail 2641 * (iommu_detach_device() can't fail, so we fail when trying to re-attach). 2642 * This should make us safe against a device being attached to a guest as a 2643 * whole while there are still pasid users on it (aux and sva). 2644 */ 2645 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) 2646 { 2647 int ret = -ENODEV; 2648 2649 if (domain->ops->aux_attach_dev) 2650 ret = domain->ops->aux_attach_dev(domain, dev); 2651 2652 if (!ret) 2653 trace_attach_device_to_domain(dev); 2654 2655 return ret; 2656 } 2657 EXPORT_SYMBOL_GPL(iommu_aux_attach_device); 2658 2659 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) 2660 { 2661 if (domain->ops->aux_detach_dev) { 2662 domain->ops->aux_detach_dev(domain, dev); 2663 trace_detach_device_from_domain(dev); 2664 } 2665 } 2666 EXPORT_SYMBOL_GPL(iommu_aux_detach_device); 2667 2668 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) 2669 { 2670 int ret = -ENODEV; 2671 2672 if (domain->ops->aux_get_pasid) 2673 ret = domain->ops->aux_get_pasid(domain, dev); 2674 2675 return ret; 2676 } 2677 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); 2678 2679 /** 2680 * iommu_sva_bind_device() - Bind a process address space to a device 2681 * @dev: the device 2682 * @mm: the mm to bind, caller must hold a reference to it 2683 * 2684 * Create a bond between device and address space, allowing the device to access 2685 * the mm using the returned PASID. If a bond already exists between @device and 2686 * @mm, it is returned and an additional reference is taken. Caller must call 2687 * iommu_sva_unbind_device() to release each reference. 2688 * 2689 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to 2690 * initialize the required SVA features. 2691 * 2692 * On error, returns an ERR_PTR value. 2693 */ 2694 struct iommu_sva * 2695 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) 2696 { 2697 struct iommu_group *group; 2698 struct iommu_sva *handle = ERR_PTR(-EINVAL); 2699 const struct iommu_ops *ops = dev->bus->iommu_ops; 2700 2701 if (!ops || !ops->sva_bind) 2702 return ERR_PTR(-ENODEV); 2703 2704 group = iommu_group_get(dev); 2705 if (!group) 2706 return ERR_PTR(-ENODEV); 2707 2708 /* Ensure device count and domain don't change while we're binding */ 2709 mutex_lock(&group->mutex); 2710 2711 /* 2712 * To keep things simple, SVA currently doesn't support IOMMU groups 2713 * with more than one device. Existing SVA-capable systems are not 2714 * affected by the problems that required IOMMU groups (lack of ACS 2715 * isolation, device ID aliasing and other hardware issues). 2716 */ 2717 if (iommu_group_device_count(group) != 1) 2718 goto out_unlock; 2719 2720 handle = ops->sva_bind(dev, mm, drvdata); 2721 2722 out_unlock: 2723 mutex_unlock(&group->mutex); 2724 iommu_group_put(group); 2725 2726 return handle; 2727 } 2728 EXPORT_SYMBOL_GPL(iommu_sva_bind_device); 2729 2730 /** 2731 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device 2732 * @handle: the handle returned by iommu_sva_bind_device() 2733 * 2734 * Put reference to a bond between device and address space. The device should 2735 * not be issuing any more transaction for this PASID. All outstanding page 2736 * requests for this PASID must have been flushed to the IOMMU. 2737 * 2738 * Returns 0 on success, or an error value 2739 */ 2740 void iommu_sva_unbind_device(struct iommu_sva *handle) 2741 { 2742 struct iommu_group *group; 2743 struct device *dev = handle->dev; 2744 const struct iommu_ops *ops = dev->bus->iommu_ops; 2745 2746 if (!ops || !ops->sva_unbind) 2747 return; 2748 2749 group = iommu_group_get(dev); 2750 if (!group) 2751 return; 2752 2753 mutex_lock(&group->mutex); 2754 ops->sva_unbind(handle); 2755 mutex_unlock(&group->mutex); 2756 2757 iommu_group_put(group); 2758 } 2759 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); 2760 2761 int iommu_sva_set_ops(struct iommu_sva *handle, 2762 const struct iommu_sva_ops *sva_ops) 2763 { 2764 if (handle->ops && handle->ops != sva_ops) 2765 return -EEXIST; 2766 2767 handle->ops = sva_ops; 2768 return 0; 2769 } 2770 EXPORT_SYMBOL_GPL(iommu_sva_set_ops); 2771 2772 int iommu_sva_get_pasid(struct iommu_sva *handle) 2773 { 2774 const struct iommu_ops *ops = handle->dev->bus->iommu_ops; 2775 2776 if (!ops || !ops->sva_get_pasid) 2777 return IOMMU_PASID_INVALID; 2778 2779 return ops->sva_get_pasid(handle); 2780 } 2781 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); 2782