1 /* 2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 3 * Author: Joerg Roedel <jroedel@suse.de> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published 7 * by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 */ 18 19 #define pr_fmt(fmt) "iommu: " fmt 20 21 #include <linux/device.h> 22 #include <linux/kernel.h> 23 #include <linux/bug.h> 24 #include <linux/types.h> 25 #include <linux/init.h> 26 #include <linux/export.h> 27 #include <linux/slab.h> 28 #include <linux/errno.h> 29 #include <linux/iommu.h> 30 #include <linux/idr.h> 31 #include <linux/notifier.h> 32 #include <linux/err.h> 33 #include <linux/pci.h> 34 #include <linux/bitops.h> 35 #include <linux/property.h> 36 #include <linux/fsl/mc.h> 37 #include <trace/events/iommu.h> 38 39 static struct kset *iommu_group_kset; 40 static DEFINE_IDA(iommu_group_ida); 41 #ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH 42 static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 43 #else 44 static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA; 45 #endif 46 static bool iommu_dma_strict __read_mostly = true; 47 48 struct iommu_group { 49 struct kobject kobj; 50 struct kobject *devices_kobj; 51 struct list_head devices; 52 struct mutex mutex; 53 struct blocking_notifier_head notifier; 54 void *iommu_data; 55 void (*iommu_data_release)(void *iommu_data); 56 char *name; 57 int id; 58 struct iommu_domain *default_domain; 59 struct iommu_domain *domain; 60 }; 61 62 struct group_device { 63 struct list_head list; 64 struct device *dev; 65 char *name; 66 }; 67 68 struct iommu_group_attribute { 69 struct attribute attr; 70 ssize_t (*show)(struct iommu_group *group, char *buf); 71 ssize_t (*store)(struct iommu_group *group, 72 const char *buf, size_t count); 73 }; 74 75 static const char * const iommu_group_resv_type_string[] = { 76 [IOMMU_RESV_DIRECT] = "direct", 77 [IOMMU_RESV_RESERVED] = "reserved", 78 [IOMMU_RESV_MSI] = "msi", 79 [IOMMU_RESV_SW_MSI] = "msi", 80 }; 81 82 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 83 struct iommu_group_attribute iommu_group_attr_##_name = \ 84 __ATTR(_name, _mode, _show, _store) 85 86 #define to_iommu_group_attr(_attr) \ 87 container_of(_attr, struct iommu_group_attribute, attr) 88 #define to_iommu_group(_kobj) \ 89 container_of(_kobj, struct iommu_group, kobj) 90 91 static LIST_HEAD(iommu_device_list); 92 static DEFINE_SPINLOCK(iommu_device_lock); 93 94 int iommu_device_register(struct iommu_device *iommu) 95 { 96 spin_lock(&iommu_device_lock); 97 list_add_tail(&iommu->list, &iommu_device_list); 98 spin_unlock(&iommu_device_lock); 99 100 return 0; 101 } 102 103 void iommu_device_unregister(struct iommu_device *iommu) 104 { 105 spin_lock(&iommu_device_lock); 106 list_del(&iommu->list); 107 spin_unlock(&iommu_device_lock); 108 } 109 110 int iommu_probe_device(struct device *dev) 111 { 112 const struct iommu_ops *ops = dev->bus->iommu_ops; 113 int ret = -EINVAL; 114 115 WARN_ON(dev->iommu_group); 116 117 if (ops) 118 ret = ops->add_device(dev); 119 120 return ret; 121 } 122 123 void iommu_release_device(struct device *dev) 124 { 125 const struct iommu_ops *ops = dev->bus->iommu_ops; 126 127 if (dev->iommu_group) 128 ops->remove_device(dev); 129 } 130 131 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 132 unsigned type); 133 static int __iommu_attach_device(struct iommu_domain *domain, 134 struct device *dev); 135 static int __iommu_attach_group(struct iommu_domain *domain, 136 struct iommu_group *group); 137 static void __iommu_detach_group(struct iommu_domain *domain, 138 struct iommu_group *group); 139 140 static int __init iommu_set_def_domain_type(char *str) 141 { 142 bool pt; 143 int ret; 144 145 ret = kstrtobool(str, &pt); 146 if (ret) 147 return ret; 148 149 iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; 150 return 0; 151 } 152 early_param("iommu.passthrough", iommu_set_def_domain_type); 153 154 static int __init iommu_dma_setup(char *str) 155 { 156 return kstrtobool(str, &iommu_dma_strict); 157 } 158 early_param("iommu.strict", iommu_dma_setup); 159 160 static ssize_t iommu_group_attr_show(struct kobject *kobj, 161 struct attribute *__attr, char *buf) 162 { 163 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 164 struct iommu_group *group = to_iommu_group(kobj); 165 ssize_t ret = -EIO; 166 167 if (attr->show) 168 ret = attr->show(group, buf); 169 return ret; 170 } 171 172 static ssize_t iommu_group_attr_store(struct kobject *kobj, 173 struct attribute *__attr, 174 const char *buf, size_t count) 175 { 176 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 177 struct iommu_group *group = to_iommu_group(kobj); 178 ssize_t ret = -EIO; 179 180 if (attr->store) 181 ret = attr->store(group, buf, count); 182 return ret; 183 } 184 185 static const struct sysfs_ops iommu_group_sysfs_ops = { 186 .show = iommu_group_attr_show, 187 .store = iommu_group_attr_store, 188 }; 189 190 static int iommu_group_create_file(struct iommu_group *group, 191 struct iommu_group_attribute *attr) 192 { 193 return sysfs_create_file(&group->kobj, &attr->attr); 194 } 195 196 static void iommu_group_remove_file(struct iommu_group *group, 197 struct iommu_group_attribute *attr) 198 { 199 sysfs_remove_file(&group->kobj, &attr->attr); 200 } 201 202 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 203 { 204 return sprintf(buf, "%s\n", group->name); 205 } 206 207 /** 208 * iommu_insert_resv_region - Insert a new region in the 209 * list of reserved regions. 210 * @new: new region to insert 211 * @regions: list of regions 212 * 213 * The new element is sorted by address with respect to the other 214 * regions of the same type. In case it overlaps with another 215 * region of the same type, regions are merged. In case it 216 * overlaps with another region of different type, regions are 217 * not merged. 218 */ 219 static int iommu_insert_resv_region(struct iommu_resv_region *new, 220 struct list_head *regions) 221 { 222 struct iommu_resv_region *region; 223 phys_addr_t start = new->start; 224 phys_addr_t end = new->start + new->length - 1; 225 struct list_head *pos = regions->next; 226 227 while (pos != regions) { 228 struct iommu_resv_region *entry = 229 list_entry(pos, struct iommu_resv_region, list); 230 phys_addr_t a = entry->start; 231 phys_addr_t b = entry->start + entry->length - 1; 232 int type = entry->type; 233 234 if (end < a) { 235 goto insert; 236 } else if (start > b) { 237 pos = pos->next; 238 } else if ((start >= a) && (end <= b)) { 239 if (new->type == type) 240 goto done; 241 else 242 pos = pos->next; 243 } else { 244 if (new->type == type) { 245 phys_addr_t new_start = min(a, start); 246 phys_addr_t new_end = max(b, end); 247 248 list_del(&entry->list); 249 entry->start = new_start; 250 entry->length = new_end - new_start + 1; 251 iommu_insert_resv_region(entry, regions); 252 } else { 253 pos = pos->next; 254 } 255 } 256 } 257 insert: 258 region = iommu_alloc_resv_region(new->start, new->length, 259 new->prot, new->type); 260 if (!region) 261 return -ENOMEM; 262 263 list_add_tail(®ion->list, pos); 264 done: 265 return 0; 266 } 267 268 static int 269 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 270 struct list_head *group_resv_regions) 271 { 272 struct iommu_resv_region *entry; 273 int ret = 0; 274 275 list_for_each_entry(entry, dev_resv_regions, list) { 276 ret = iommu_insert_resv_region(entry, group_resv_regions); 277 if (ret) 278 break; 279 } 280 return ret; 281 } 282 283 int iommu_get_group_resv_regions(struct iommu_group *group, 284 struct list_head *head) 285 { 286 struct group_device *device; 287 int ret = 0; 288 289 mutex_lock(&group->mutex); 290 list_for_each_entry(device, &group->devices, list) { 291 struct list_head dev_resv_regions; 292 293 INIT_LIST_HEAD(&dev_resv_regions); 294 iommu_get_resv_regions(device->dev, &dev_resv_regions); 295 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 296 iommu_put_resv_regions(device->dev, &dev_resv_regions); 297 if (ret) 298 break; 299 } 300 mutex_unlock(&group->mutex); 301 return ret; 302 } 303 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 304 305 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 306 char *buf) 307 { 308 struct iommu_resv_region *region, *next; 309 struct list_head group_resv_regions; 310 char *str = buf; 311 312 INIT_LIST_HEAD(&group_resv_regions); 313 iommu_get_group_resv_regions(group, &group_resv_regions); 314 315 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 316 str += sprintf(str, "0x%016llx 0x%016llx %s\n", 317 (long long int)region->start, 318 (long long int)(region->start + 319 region->length - 1), 320 iommu_group_resv_type_string[region->type]); 321 kfree(region); 322 } 323 324 return (str - buf); 325 } 326 327 static ssize_t iommu_group_show_type(struct iommu_group *group, 328 char *buf) 329 { 330 char *type = "unknown\n"; 331 332 if (group->default_domain) { 333 switch (group->default_domain->type) { 334 case IOMMU_DOMAIN_BLOCKED: 335 type = "blocked\n"; 336 break; 337 case IOMMU_DOMAIN_IDENTITY: 338 type = "identity\n"; 339 break; 340 case IOMMU_DOMAIN_UNMANAGED: 341 type = "unmanaged\n"; 342 break; 343 case IOMMU_DOMAIN_DMA: 344 type = "DMA"; 345 break; 346 } 347 } 348 strcpy(buf, type); 349 350 return strlen(type); 351 } 352 353 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 354 355 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 356 iommu_group_show_resv_regions, NULL); 357 358 static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL); 359 360 static void iommu_group_release(struct kobject *kobj) 361 { 362 struct iommu_group *group = to_iommu_group(kobj); 363 364 pr_debug("Releasing group %d\n", group->id); 365 366 if (group->iommu_data_release) 367 group->iommu_data_release(group->iommu_data); 368 369 ida_simple_remove(&iommu_group_ida, group->id); 370 371 if (group->default_domain) 372 iommu_domain_free(group->default_domain); 373 374 kfree(group->name); 375 kfree(group); 376 } 377 378 static struct kobj_type iommu_group_ktype = { 379 .sysfs_ops = &iommu_group_sysfs_ops, 380 .release = iommu_group_release, 381 }; 382 383 /** 384 * iommu_group_alloc - Allocate a new group 385 * 386 * This function is called by an iommu driver to allocate a new iommu 387 * group. The iommu group represents the minimum granularity of the iommu. 388 * Upon successful return, the caller holds a reference to the supplied 389 * group in order to hold the group until devices are added. Use 390 * iommu_group_put() to release this extra reference count, allowing the 391 * group to be automatically reclaimed once it has no devices or external 392 * references. 393 */ 394 struct iommu_group *iommu_group_alloc(void) 395 { 396 struct iommu_group *group; 397 int ret; 398 399 group = kzalloc(sizeof(*group), GFP_KERNEL); 400 if (!group) 401 return ERR_PTR(-ENOMEM); 402 403 group->kobj.kset = iommu_group_kset; 404 mutex_init(&group->mutex); 405 INIT_LIST_HEAD(&group->devices); 406 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); 407 408 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); 409 if (ret < 0) { 410 kfree(group); 411 return ERR_PTR(ret); 412 } 413 group->id = ret; 414 415 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 416 NULL, "%d", group->id); 417 if (ret) { 418 ida_simple_remove(&iommu_group_ida, group->id); 419 kfree(group); 420 return ERR_PTR(ret); 421 } 422 423 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 424 if (!group->devices_kobj) { 425 kobject_put(&group->kobj); /* triggers .release & free */ 426 return ERR_PTR(-ENOMEM); 427 } 428 429 /* 430 * The devices_kobj holds a reference on the group kobject, so 431 * as long as that exists so will the group. We can therefore 432 * use the devices_kobj for reference counting. 433 */ 434 kobject_put(&group->kobj); 435 436 ret = iommu_group_create_file(group, 437 &iommu_group_attr_reserved_regions); 438 if (ret) 439 return ERR_PTR(ret); 440 441 ret = iommu_group_create_file(group, &iommu_group_attr_type); 442 if (ret) 443 return ERR_PTR(ret); 444 445 pr_debug("Allocated group %d\n", group->id); 446 447 return group; 448 } 449 EXPORT_SYMBOL_GPL(iommu_group_alloc); 450 451 struct iommu_group *iommu_group_get_by_id(int id) 452 { 453 struct kobject *group_kobj; 454 struct iommu_group *group; 455 const char *name; 456 457 if (!iommu_group_kset) 458 return NULL; 459 460 name = kasprintf(GFP_KERNEL, "%d", id); 461 if (!name) 462 return NULL; 463 464 group_kobj = kset_find_obj(iommu_group_kset, name); 465 kfree(name); 466 467 if (!group_kobj) 468 return NULL; 469 470 group = container_of(group_kobj, struct iommu_group, kobj); 471 BUG_ON(group->id != id); 472 473 kobject_get(group->devices_kobj); 474 kobject_put(&group->kobj); 475 476 return group; 477 } 478 EXPORT_SYMBOL_GPL(iommu_group_get_by_id); 479 480 /** 481 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 482 * @group: the group 483 * 484 * iommu drivers can store data in the group for use when doing iommu 485 * operations. This function provides a way to retrieve it. Caller 486 * should hold a group reference. 487 */ 488 void *iommu_group_get_iommudata(struct iommu_group *group) 489 { 490 return group->iommu_data; 491 } 492 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 493 494 /** 495 * iommu_group_set_iommudata - set iommu_data for a group 496 * @group: the group 497 * @iommu_data: new data 498 * @release: release function for iommu_data 499 * 500 * iommu drivers can store data in the group for use when doing iommu 501 * operations. This function provides a way to set the data after 502 * the group has been allocated. Caller should hold a group reference. 503 */ 504 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 505 void (*release)(void *iommu_data)) 506 { 507 group->iommu_data = iommu_data; 508 group->iommu_data_release = release; 509 } 510 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 511 512 /** 513 * iommu_group_set_name - set name for a group 514 * @group: the group 515 * @name: name 516 * 517 * Allow iommu driver to set a name for a group. When set it will 518 * appear in a name attribute file under the group in sysfs. 519 */ 520 int iommu_group_set_name(struct iommu_group *group, const char *name) 521 { 522 int ret; 523 524 if (group->name) { 525 iommu_group_remove_file(group, &iommu_group_attr_name); 526 kfree(group->name); 527 group->name = NULL; 528 if (!name) 529 return 0; 530 } 531 532 group->name = kstrdup(name, GFP_KERNEL); 533 if (!group->name) 534 return -ENOMEM; 535 536 ret = iommu_group_create_file(group, &iommu_group_attr_name); 537 if (ret) { 538 kfree(group->name); 539 group->name = NULL; 540 return ret; 541 } 542 543 return 0; 544 } 545 EXPORT_SYMBOL_GPL(iommu_group_set_name); 546 547 static int iommu_group_create_direct_mappings(struct iommu_group *group, 548 struct device *dev) 549 { 550 struct iommu_domain *domain = group->default_domain; 551 struct iommu_resv_region *entry; 552 struct list_head mappings; 553 unsigned long pg_size; 554 int ret = 0; 555 556 if (!domain || domain->type != IOMMU_DOMAIN_DMA) 557 return 0; 558 559 BUG_ON(!domain->pgsize_bitmap); 560 561 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 562 INIT_LIST_HEAD(&mappings); 563 564 iommu_get_resv_regions(dev, &mappings); 565 566 /* We need to consider overlapping regions for different devices */ 567 list_for_each_entry(entry, &mappings, list) { 568 dma_addr_t start, end, addr; 569 570 if (domain->ops->apply_resv_region) 571 domain->ops->apply_resv_region(dev, domain, entry); 572 573 start = ALIGN(entry->start, pg_size); 574 end = ALIGN(entry->start + entry->length, pg_size); 575 576 if (entry->type != IOMMU_RESV_DIRECT) 577 continue; 578 579 for (addr = start; addr < end; addr += pg_size) { 580 phys_addr_t phys_addr; 581 582 phys_addr = iommu_iova_to_phys(domain, addr); 583 if (phys_addr) 584 continue; 585 586 ret = iommu_map(domain, addr, addr, pg_size, entry->prot); 587 if (ret) 588 goto out; 589 } 590 591 } 592 593 iommu_flush_tlb_all(domain); 594 595 out: 596 iommu_put_resv_regions(dev, &mappings); 597 598 return ret; 599 } 600 601 /** 602 * iommu_group_add_device - add a device to an iommu group 603 * @group: the group into which to add the device (reference should be held) 604 * @dev: the device 605 * 606 * This function is called by an iommu driver to add a device into a 607 * group. Adding a device increments the group reference count. 608 */ 609 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 610 { 611 int ret, i = 0; 612 struct group_device *device; 613 614 device = kzalloc(sizeof(*device), GFP_KERNEL); 615 if (!device) 616 return -ENOMEM; 617 618 device->dev = dev; 619 620 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 621 if (ret) 622 goto err_free_device; 623 624 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 625 rename: 626 if (!device->name) { 627 ret = -ENOMEM; 628 goto err_remove_link; 629 } 630 631 ret = sysfs_create_link_nowarn(group->devices_kobj, 632 &dev->kobj, device->name); 633 if (ret) { 634 if (ret == -EEXIST && i >= 0) { 635 /* 636 * Account for the slim chance of collision 637 * and append an instance to the name. 638 */ 639 kfree(device->name); 640 device->name = kasprintf(GFP_KERNEL, "%s.%d", 641 kobject_name(&dev->kobj), i++); 642 goto rename; 643 } 644 goto err_free_name; 645 } 646 647 kobject_get(group->devices_kobj); 648 649 dev->iommu_group = group; 650 651 iommu_group_create_direct_mappings(group, dev); 652 653 mutex_lock(&group->mutex); 654 list_add_tail(&device->list, &group->devices); 655 if (group->domain) 656 ret = __iommu_attach_device(group->domain, dev); 657 mutex_unlock(&group->mutex); 658 if (ret) 659 goto err_put_group; 660 661 /* Notify any listeners about change to group. */ 662 blocking_notifier_call_chain(&group->notifier, 663 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); 664 665 trace_add_device_to_group(group->id, dev); 666 667 dev_info(dev, "Adding to iommu group %d\n", group->id); 668 669 return 0; 670 671 err_put_group: 672 mutex_lock(&group->mutex); 673 list_del(&device->list); 674 mutex_unlock(&group->mutex); 675 dev->iommu_group = NULL; 676 kobject_put(group->devices_kobj); 677 err_free_name: 678 kfree(device->name); 679 err_remove_link: 680 sysfs_remove_link(&dev->kobj, "iommu_group"); 681 err_free_device: 682 kfree(device); 683 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 684 return ret; 685 } 686 EXPORT_SYMBOL_GPL(iommu_group_add_device); 687 688 /** 689 * iommu_group_remove_device - remove a device from it's current group 690 * @dev: device to be removed 691 * 692 * This function is called by an iommu driver to remove the device from 693 * it's current group. This decrements the iommu group reference count. 694 */ 695 void iommu_group_remove_device(struct device *dev) 696 { 697 struct iommu_group *group = dev->iommu_group; 698 struct group_device *tmp_device, *device = NULL; 699 700 dev_info(dev, "Removing from iommu group %d\n", group->id); 701 702 /* Pre-notify listeners that a device is being removed. */ 703 blocking_notifier_call_chain(&group->notifier, 704 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); 705 706 mutex_lock(&group->mutex); 707 list_for_each_entry(tmp_device, &group->devices, list) { 708 if (tmp_device->dev == dev) { 709 device = tmp_device; 710 list_del(&device->list); 711 break; 712 } 713 } 714 mutex_unlock(&group->mutex); 715 716 if (!device) 717 return; 718 719 sysfs_remove_link(group->devices_kobj, device->name); 720 sysfs_remove_link(&dev->kobj, "iommu_group"); 721 722 trace_remove_device_from_group(group->id, dev); 723 724 kfree(device->name); 725 kfree(device); 726 dev->iommu_group = NULL; 727 kobject_put(group->devices_kobj); 728 } 729 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 730 731 static int iommu_group_device_count(struct iommu_group *group) 732 { 733 struct group_device *entry; 734 int ret = 0; 735 736 list_for_each_entry(entry, &group->devices, list) 737 ret++; 738 739 return ret; 740 } 741 742 /** 743 * iommu_group_for_each_dev - iterate over each device in the group 744 * @group: the group 745 * @data: caller opaque data to be passed to callback function 746 * @fn: caller supplied callback function 747 * 748 * This function is called by group users to iterate over group devices. 749 * Callers should hold a reference count to the group during callback. 750 * The group->mutex is held across callbacks, which will block calls to 751 * iommu_group_add/remove_device. 752 */ 753 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 754 int (*fn)(struct device *, void *)) 755 { 756 struct group_device *device; 757 int ret = 0; 758 759 list_for_each_entry(device, &group->devices, list) { 760 ret = fn(device->dev, data); 761 if (ret) 762 break; 763 } 764 return ret; 765 } 766 767 768 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 769 int (*fn)(struct device *, void *)) 770 { 771 int ret; 772 773 mutex_lock(&group->mutex); 774 ret = __iommu_group_for_each_dev(group, data, fn); 775 mutex_unlock(&group->mutex); 776 777 return ret; 778 } 779 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 780 781 /** 782 * iommu_group_get - Return the group for a device and increment reference 783 * @dev: get the group that this device belongs to 784 * 785 * This function is called by iommu drivers and users to get the group 786 * for the specified device. If found, the group is returned and the group 787 * reference in incremented, else NULL. 788 */ 789 struct iommu_group *iommu_group_get(struct device *dev) 790 { 791 struct iommu_group *group = dev->iommu_group; 792 793 if (group) 794 kobject_get(group->devices_kobj); 795 796 return group; 797 } 798 EXPORT_SYMBOL_GPL(iommu_group_get); 799 800 /** 801 * iommu_group_ref_get - Increment reference on a group 802 * @group: the group to use, must not be NULL 803 * 804 * This function is called by iommu drivers to take additional references on an 805 * existing group. Returns the given group for convenience. 806 */ 807 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 808 { 809 kobject_get(group->devices_kobj); 810 return group; 811 } 812 813 /** 814 * iommu_group_put - Decrement group reference 815 * @group: the group to use 816 * 817 * This function is called by iommu drivers and users to release the 818 * iommu group. Once the reference count is zero, the group is released. 819 */ 820 void iommu_group_put(struct iommu_group *group) 821 { 822 if (group) 823 kobject_put(group->devices_kobj); 824 } 825 EXPORT_SYMBOL_GPL(iommu_group_put); 826 827 /** 828 * iommu_group_register_notifier - Register a notifier for group changes 829 * @group: the group to watch 830 * @nb: notifier block to signal 831 * 832 * This function allows iommu group users to track changes in a group. 833 * See include/linux/iommu.h for actions sent via this notifier. Caller 834 * should hold a reference to the group throughout notifier registration. 835 */ 836 int iommu_group_register_notifier(struct iommu_group *group, 837 struct notifier_block *nb) 838 { 839 return blocking_notifier_chain_register(&group->notifier, nb); 840 } 841 EXPORT_SYMBOL_GPL(iommu_group_register_notifier); 842 843 /** 844 * iommu_group_unregister_notifier - Unregister a notifier 845 * @group: the group to watch 846 * @nb: notifier block to signal 847 * 848 * Unregister a previously registered group notifier block. 849 */ 850 int iommu_group_unregister_notifier(struct iommu_group *group, 851 struct notifier_block *nb) 852 { 853 return blocking_notifier_chain_unregister(&group->notifier, nb); 854 } 855 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); 856 857 /** 858 * iommu_group_id - Return ID for a group 859 * @group: the group to ID 860 * 861 * Return the unique ID for the group matching the sysfs group number. 862 */ 863 int iommu_group_id(struct iommu_group *group) 864 { 865 return group->id; 866 } 867 EXPORT_SYMBOL_GPL(iommu_group_id); 868 869 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 870 unsigned long *devfns); 871 872 /* 873 * To consider a PCI device isolated, we require ACS to support Source 874 * Validation, Request Redirection, Completer Redirection, and Upstream 875 * Forwarding. This effectively means that devices cannot spoof their 876 * requester ID, requests and completions cannot be redirected, and all 877 * transactions are forwarded upstream, even as it passes through a 878 * bridge where the target device is downstream. 879 */ 880 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 881 882 /* 883 * For multifunction devices which are not isolated from each other, find 884 * all the other non-isolated functions and look for existing groups. For 885 * each function, we also need to look for aliases to or from other devices 886 * that may already have a group. 887 */ 888 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 889 unsigned long *devfns) 890 { 891 struct pci_dev *tmp = NULL; 892 struct iommu_group *group; 893 894 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 895 return NULL; 896 897 for_each_pci_dev(tmp) { 898 if (tmp == pdev || tmp->bus != pdev->bus || 899 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 900 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 901 continue; 902 903 group = get_pci_alias_group(tmp, devfns); 904 if (group) { 905 pci_dev_put(tmp); 906 return group; 907 } 908 } 909 910 return NULL; 911 } 912 913 /* 914 * Look for aliases to or from the given device for existing groups. DMA 915 * aliases are only supported on the same bus, therefore the search 916 * space is quite small (especially since we're really only looking at pcie 917 * device, and therefore only expect multiple slots on the root complex or 918 * downstream switch ports). It's conceivable though that a pair of 919 * multifunction devices could have aliases between them that would cause a 920 * loop. To prevent this, we use a bitmap to track where we've been. 921 */ 922 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 923 unsigned long *devfns) 924 { 925 struct pci_dev *tmp = NULL; 926 struct iommu_group *group; 927 928 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 929 return NULL; 930 931 group = iommu_group_get(&pdev->dev); 932 if (group) 933 return group; 934 935 for_each_pci_dev(tmp) { 936 if (tmp == pdev || tmp->bus != pdev->bus) 937 continue; 938 939 /* We alias them or they alias us */ 940 if (pci_devs_are_dma_aliases(pdev, tmp)) { 941 group = get_pci_alias_group(tmp, devfns); 942 if (group) { 943 pci_dev_put(tmp); 944 return group; 945 } 946 947 group = get_pci_function_alias_group(tmp, devfns); 948 if (group) { 949 pci_dev_put(tmp); 950 return group; 951 } 952 } 953 } 954 955 return NULL; 956 } 957 958 struct group_for_pci_data { 959 struct pci_dev *pdev; 960 struct iommu_group *group; 961 }; 962 963 /* 964 * DMA alias iterator callback, return the last seen device. Stop and return 965 * the IOMMU group if we find one along the way. 966 */ 967 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 968 { 969 struct group_for_pci_data *data = opaque; 970 971 data->pdev = pdev; 972 data->group = iommu_group_get(&pdev->dev); 973 974 return data->group != NULL; 975 } 976 977 /* 978 * Generic device_group call-back function. It just allocates one 979 * iommu-group per device. 980 */ 981 struct iommu_group *generic_device_group(struct device *dev) 982 { 983 return iommu_group_alloc(); 984 } 985 986 /* 987 * Use standard PCI bus topology, isolation features, and DMA alias quirks 988 * to find or create an IOMMU group for a device. 989 */ 990 struct iommu_group *pci_device_group(struct device *dev) 991 { 992 struct pci_dev *pdev = to_pci_dev(dev); 993 struct group_for_pci_data data; 994 struct pci_bus *bus; 995 struct iommu_group *group = NULL; 996 u64 devfns[4] = { 0 }; 997 998 if (WARN_ON(!dev_is_pci(dev))) 999 return ERR_PTR(-EINVAL); 1000 1001 /* 1002 * Find the upstream DMA alias for the device. A device must not 1003 * be aliased due to topology in order to have its own IOMMU group. 1004 * If we find an alias along the way that already belongs to a 1005 * group, use it. 1006 */ 1007 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1008 return data.group; 1009 1010 pdev = data.pdev; 1011 1012 /* 1013 * Continue upstream from the point of minimum IOMMU granularity 1014 * due to aliases to the point where devices are protected from 1015 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1016 * group, use it. 1017 */ 1018 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1019 if (!bus->self) 1020 continue; 1021 1022 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1023 break; 1024 1025 pdev = bus->self; 1026 1027 group = iommu_group_get(&pdev->dev); 1028 if (group) 1029 return group; 1030 } 1031 1032 /* 1033 * Look for existing groups on device aliases. If we alias another 1034 * device or another device aliases us, use the same group. 1035 */ 1036 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1037 if (group) 1038 return group; 1039 1040 /* 1041 * Look for existing groups on non-isolated functions on the same 1042 * slot and aliases of those funcions, if any. No need to clear 1043 * the search bitmap, the tested devfns are still valid. 1044 */ 1045 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1046 if (group) 1047 return group; 1048 1049 /* No shared group found, allocate new */ 1050 return iommu_group_alloc(); 1051 } 1052 1053 /* Get the IOMMU group for device on fsl-mc bus */ 1054 struct iommu_group *fsl_mc_device_group(struct device *dev) 1055 { 1056 struct device *cont_dev = fsl_mc_cont_dev(dev); 1057 struct iommu_group *group; 1058 1059 group = iommu_group_get(cont_dev); 1060 if (!group) 1061 group = iommu_group_alloc(); 1062 return group; 1063 } 1064 1065 /** 1066 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1067 * @dev: target device 1068 * 1069 * This function is intended to be called by IOMMU drivers and extended to 1070 * support common, bus-defined algorithms when determining or creating the 1071 * IOMMU group for a device. On success, the caller will hold a reference 1072 * to the returned IOMMU group, which will already include the provided 1073 * device. The reference should be released with iommu_group_put(). 1074 */ 1075 struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1076 { 1077 const struct iommu_ops *ops = dev->bus->iommu_ops; 1078 struct iommu_group *group; 1079 int ret; 1080 1081 group = iommu_group_get(dev); 1082 if (group) 1083 return group; 1084 1085 if (!ops) 1086 return ERR_PTR(-EINVAL); 1087 1088 group = ops->device_group(dev); 1089 if (WARN_ON_ONCE(group == NULL)) 1090 return ERR_PTR(-EINVAL); 1091 1092 if (IS_ERR(group)) 1093 return group; 1094 1095 /* 1096 * Try to allocate a default domain - needs support from the 1097 * IOMMU driver. 1098 */ 1099 if (!group->default_domain) { 1100 struct iommu_domain *dom; 1101 1102 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); 1103 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { 1104 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); 1105 if (dom) { 1106 dev_warn(dev, 1107 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA", 1108 iommu_def_domain_type); 1109 } 1110 } 1111 1112 group->default_domain = dom; 1113 if (!group->domain) 1114 group->domain = dom; 1115 1116 if (dom && !iommu_dma_strict) { 1117 int attr = 1; 1118 iommu_domain_set_attr(dom, 1119 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, 1120 &attr); 1121 } 1122 } 1123 1124 ret = iommu_group_add_device(group, dev); 1125 if (ret) { 1126 iommu_group_put(group); 1127 return ERR_PTR(ret); 1128 } 1129 1130 return group; 1131 } 1132 1133 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1134 { 1135 return group->default_domain; 1136 } 1137 1138 static int add_iommu_group(struct device *dev, void *data) 1139 { 1140 int ret = iommu_probe_device(dev); 1141 1142 /* 1143 * We ignore -ENODEV errors for now, as they just mean that the 1144 * device is not translated by an IOMMU. We still care about 1145 * other errors and fail to initialize when they happen. 1146 */ 1147 if (ret == -ENODEV) 1148 ret = 0; 1149 1150 return ret; 1151 } 1152 1153 static int remove_iommu_group(struct device *dev, void *data) 1154 { 1155 iommu_release_device(dev); 1156 1157 return 0; 1158 } 1159 1160 static int iommu_bus_notifier(struct notifier_block *nb, 1161 unsigned long action, void *data) 1162 { 1163 unsigned long group_action = 0; 1164 struct device *dev = data; 1165 struct iommu_group *group; 1166 1167 /* 1168 * ADD/DEL call into iommu driver ops if provided, which may 1169 * result in ADD/DEL notifiers to group->notifier 1170 */ 1171 if (action == BUS_NOTIFY_ADD_DEVICE) { 1172 int ret; 1173 1174 ret = iommu_probe_device(dev); 1175 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1176 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1177 iommu_release_device(dev); 1178 return NOTIFY_OK; 1179 } 1180 1181 /* 1182 * Remaining BUS_NOTIFYs get filtered and republished to the 1183 * group, if anyone is listening 1184 */ 1185 group = iommu_group_get(dev); 1186 if (!group) 1187 return 0; 1188 1189 switch (action) { 1190 case BUS_NOTIFY_BIND_DRIVER: 1191 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; 1192 break; 1193 case BUS_NOTIFY_BOUND_DRIVER: 1194 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; 1195 break; 1196 case BUS_NOTIFY_UNBIND_DRIVER: 1197 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; 1198 break; 1199 case BUS_NOTIFY_UNBOUND_DRIVER: 1200 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; 1201 break; 1202 } 1203 1204 if (group_action) 1205 blocking_notifier_call_chain(&group->notifier, 1206 group_action, dev); 1207 1208 iommu_group_put(group); 1209 return 0; 1210 } 1211 1212 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) 1213 { 1214 int err; 1215 struct notifier_block *nb; 1216 1217 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 1218 if (!nb) 1219 return -ENOMEM; 1220 1221 nb->notifier_call = iommu_bus_notifier; 1222 1223 err = bus_register_notifier(bus, nb); 1224 if (err) 1225 goto out_free; 1226 1227 err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group); 1228 if (err) 1229 goto out_err; 1230 1231 1232 return 0; 1233 1234 out_err: 1235 /* Clean up */ 1236 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group); 1237 bus_unregister_notifier(bus, nb); 1238 1239 out_free: 1240 kfree(nb); 1241 1242 return err; 1243 } 1244 1245 /** 1246 * bus_set_iommu - set iommu-callbacks for the bus 1247 * @bus: bus. 1248 * @ops: the callbacks provided by the iommu-driver 1249 * 1250 * This function is called by an iommu driver to set the iommu methods 1251 * used for a particular bus. Drivers for devices on that bus can use 1252 * the iommu-api after these ops are registered. 1253 * This special function is needed because IOMMUs are usually devices on 1254 * the bus itself, so the iommu drivers are not initialized when the bus 1255 * is set up. With this function the iommu-driver can set the iommu-ops 1256 * afterwards. 1257 */ 1258 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) 1259 { 1260 int err; 1261 1262 if (bus->iommu_ops != NULL) 1263 return -EBUSY; 1264 1265 bus->iommu_ops = ops; 1266 1267 /* Do IOMMU specific setup for this bus-type */ 1268 err = iommu_bus_init(bus, ops); 1269 if (err) 1270 bus->iommu_ops = NULL; 1271 1272 return err; 1273 } 1274 EXPORT_SYMBOL_GPL(bus_set_iommu); 1275 1276 bool iommu_present(struct bus_type *bus) 1277 { 1278 return bus->iommu_ops != NULL; 1279 } 1280 EXPORT_SYMBOL_GPL(iommu_present); 1281 1282 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) 1283 { 1284 if (!bus->iommu_ops || !bus->iommu_ops->capable) 1285 return false; 1286 1287 return bus->iommu_ops->capable(cap); 1288 } 1289 EXPORT_SYMBOL_GPL(iommu_capable); 1290 1291 /** 1292 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1293 * @domain: iommu domain 1294 * @handler: fault handler 1295 * @token: user data, will be passed back to the fault handler 1296 * 1297 * This function should be used by IOMMU users which want to be notified 1298 * whenever an IOMMU fault happens. 1299 * 1300 * The fault handler itself should return 0 on success, and an appropriate 1301 * error code otherwise. 1302 */ 1303 void iommu_set_fault_handler(struct iommu_domain *domain, 1304 iommu_fault_handler_t handler, 1305 void *token) 1306 { 1307 BUG_ON(!domain); 1308 1309 domain->handler = handler; 1310 domain->handler_token = token; 1311 } 1312 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1313 1314 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 1315 unsigned type) 1316 { 1317 struct iommu_domain *domain; 1318 1319 if (bus == NULL || bus->iommu_ops == NULL) 1320 return NULL; 1321 1322 domain = bus->iommu_ops->domain_alloc(type); 1323 if (!domain) 1324 return NULL; 1325 1326 domain->ops = bus->iommu_ops; 1327 domain->type = type; 1328 /* Assume all sizes by default; the driver may override this later */ 1329 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1330 1331 return domain; 1332 } 1333 1334 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 1335 { 1336 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1337 } 1338 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1339 1340 void iommu_domain_free(struct iommu_domain *domain) 1341 { 1342 domain->ops->domain_free(domain); 1343 } 1344 EXPORT_SYMBOL_GPL(iommu_domain_free); 1345 1346 static int __iommu_attach_device(struct iommu_domain *domain, 1347 struct device *dev) 1348 { 1349 int ret; 1350 if ((domain->ops->is_attach_deferred != NULL) && 1351 domain->ops->is_attach_deferred(domain, dev)) 1352 return 0; 1353 1354 if (unlikely(domain->ops->attach_dev == NULL)) 1355 return -ENODEV; 1356 1357 ret = domain->ops->attach_dev(domain, dev); 1358 if (!ret) 1359 trace_attach_device_to_domain(dev); 1360 return ret; 1361 } 1362 1363 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 1364 { 1365 struct iommu_group *group; 1366 int ret; 1367 1368 group = iommu_group_get(dev); 1369 if (!group) 1370 return -ENODEV; 1371 1372 /* 1373 * Lock the group to make sure the device-count doesn't 1374 * change while we are attaching 1375 */ 1376 mutex_lock(&group->mutex); 1377 ret = -EINVAL; 1378 if (iommu_group_device_count(group) != 1) 1379 goto out_unlock; 1380 1381 ret = __iommu_attach_group(domain, group); 1382 1383 out_unlock: 1384 mutex_unlock(&group->mutex); 1385 iommu_group_put(group); 1386 1387 return ret; 1388 } 1389 EXPORT_SYMBOL_GPL(iommu_attach_device); 1390 1391 static void __iommu_detach_device(struct iommu_domain *domain, 1392 struct device *dev) 1393 { 1394 if ((domain->ops->is_attach_deferred != NULL) && 1395 domain->ops->is_attach_deferred(domain, dev)) 1396 return; 1397 1398 if (unlikely(domain->ops->detach_dev == NULL)) 1399 return; 1400 1401 domain->ops->detach_dev(domain, dev); 1402 trace_detach_device_from_domain(dev); 1403 } 1404 1405 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 1406 { 1407 struct iommu_group *group; 1408 1409 group = iommu_group_get(dev); 1410 if (!group) 1411 return; 1412 1413 mutex_lock(&group->mutex); 1414 if (iommu_group_device_count(group) != 1) { 1415 WARN_ON(1); 1416 goto out_unlock; 1417 } 1418 1419 __iommu_detach_group(domain, group); 1420 1421 out_unlock: 1422 mutex_unlock(&group->mutex); 1423 iommu_group_put(group); 1424 } 1425 EXPORT_SYMBOL_GPL(iommu_detach_device); 1426 1427 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 1428 { 1429 struct iommu_domain *domain; 1430 struct iommu_group *group; 1431 1432 group = iommu_group_get(dev); 1433 if (!group) 1434 return NULL; 1435 1436 domain = group->domain; 1437 1438 iommu_group_put(group); 1439 1440 return domain; 1441 } 1442 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 1443 1444 /* 1445 * For IOMMU_DOMAIN_DMA implementations which already provide their own 1446 * guarantees that the group and its default domain are valid and correct. 1447 */ 1448 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 1449 { 1450 return dev->iommu_group->default_domain; 1451 } 1452 1453 /* 1454 * IOMMU groups are really the natural working unit of the IOMMU, but 1455 * the IOMMU API works on domains and devices. Bridge that gap by 1456 * iterating over the devices in a group. Ideally we'd have a single 1457 * device which represents the requestor ID of the group, but we also 1458 * allow IOMMU drivers to create policy defined minimum sets, where 1459 * the physical hardware may be able to distiguish members, but we 1460 * wish to group them at a higher level (ex. untrusted multi-function 1461 * PCI devices). Thus we attach each device. 1462 */ 1463 static int iommu_group_do_attach_device(struct device *dev, void *data) 1464 { 1465 struct iommu_domain *domain = data; 1466 1467 return __iommu_attach_device(domain, dev); 1468 } 1469 1470 static int __iommu_attach_group(struct iommu_domain *domain, 1471 struct iommu_group *group) 1472 { 1473 int ret; 1474 1475 if (group->default_domain && group->domain != group->default_domain) 1476 return -EBUSY; 1477 1478 ret = __iommu_group_for_each_dev(group, domain, 1479 iommu_group_do_attach_device); 1480 if (ret == 0) 1481 group->domain = domain; 1482 1483 return ret; 1484 } 1485 1486 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 1487 { 1488 int ret; 1489 1490 mutex_lock(&group->mutex); 1491 ret = __iommu_attach_group(domain, group); 1492 mutex_unlock(&group->mutex); 1493 1494 return ret; 1495 } 1496 EXPORT_SYMBOL_GPL(iommu_attach_group); 1497 1498 static int iommu_group_do_detach_device(struct device *dev, void *data) 1499 { 1500 struct iommu_domain *domain = data; 1501 1502 __iommu_detach_device(domain, dev); 1503 1504 return 0; 1505 } 1506 1507 static void __iommu_detach_group(struct iommu_domain *domain, 1508 struct iommu_group *group) 1509 { 1510 int ret; 1511 1512 if (!group->default_domain) { 1513 __iommu_group_for_each_dev(group, domain, 1514 iommu_group_do_detach_device); 1515 group->domain = NULL; 1516 return; 1517 } 1518 1519 if (group->domain == group->default_domain) 1520 return; 1521 1522 /* Detach by re-attaching to the default domain */ 1523 ret = __iommu_group_for_each_dev(group, group->default_domain, 1524 iommu_group_do_attach_device); 1525 if (ret != 0) 1526 WARN_ON(1); 1527 else 1528 group->domain = group->default_domain; 1529 } 1530 1531 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 1532 { 1533 mutex_lock(&group->mutex); 1534 __iommu_detach_group(domain, group); 1535 mutex_unlock(&group->mutex); 1536 } 1537 EXPORT_SYMBOL_GPL(iommu_detach_group); 1538 1539 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 1540 { 1541 if (unlikely(domain->ops->iova_to_phys == NULL)) 1542 return 0; 1543 1544 return domain->ops->iova_to_phys(domain, iova); 1545 } 1546 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 1547 1548 static size_t iommu_pgsize(struct iommu_domain *domain, 1549 unsigned long addr_merge, size_t size) 1550 { 1551 unsigned int pgsize_idx; 1552 size_t pgsize; 1553 1554 /* Max page size that still fits into 'size' */ 1555 pgsize_idx = __fls(size); 1556 1557 /* need to consider alignment requirements ? */ 1558 if (likely(addr_merge)) { 1559 /* Max page size allowed by address */ 1560 unsigned int align_pgsize_idx = __ffs(addr_merge); 1561 pgsize_idx = min(pgsize_idx, align_pgsize_idx); 1562 } 1563 1564 /* build a mask of acceptable page sizes */ 1565 pgsize = (1UL << (pgsize_idx + 1)) - 1; 1566 1567 /* throw away page sizes not supported by the hardware */ 1568 pgsize &= domain->pgsize_bitmap; 1569 1570 /* make sure we're still sane */ 1571 BUG_ON(!pgsize); 1572 1573 /* pick the biggest page */ 1574 pgsize_idx = __fls(pgsize); 1575 pgsize = 1UL << pgsize_idx; 1576 1577 return pgsize; 1578 } 1579 1580 int iommu_map(struct iommu_domain *domain, unsigned long iova, 1581 phys_addr_t paddr, size_t size, int prot) 1582 { 1583 const struct iommu_ops *ops = domain->ops; 1584 unsigned long orig_iova = iova; 1585 unsigned int min_pagesz; 1586 size_t orig_size = size; 1587 phys_addr_t orig_paddr = paddr; 1588 int ret = 0; 1589 1590 if (unlikely(ops->map == NULL || 1591 domain->pgsize_bitmap == 0UL)) 1592 return -ENODEV; 1593 1594 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 1595 return -EINVAL; 1596 1597 /* find out the minimum page size supported */ 1598 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 1599 1600 /* 1601 * both the virtual address and the physical one, as well as 1602 * the size of the mapping, must be aligned (at least) to the 1603 * size of the smallest page supported by the hardware 1604 */ 1605 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 1606 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 1607 iova, &paddr, size, min_pagesz); 1608 return -EINVAL; 1609 } 1610 1611 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 1612 1613 while (size) { 1614 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); 1615 1616 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", 1617 iova, &paddr, pgsize); 1618 1619 ret = ops->map(domain, iova, paddr, pgsize, prot); 1620 if (ret) 1621 break; 1622 1623 iova += pgsize; 1624 paddr += pgsize; 1625 size -= pgsize; 1626 } 1627 1628 if (ops->iotlb_sync_map) 1629 ops->iotlb_sync_map(domain); 1630 1631 /* unroll mapping in case something went wrong */ 1632 if (ret) 1633 iommu_unmap(domain, orig_iova, orig_size - size); 1634 else 1635 trace_map(orig_iova, orig_paddr, orig_size); 1636 1637 return ret; 1638 } 1639 EXPORT_SYMBOL_GPL(iommu_map); 1640 1641 static size_t __iommu_unmap(struct iommu_domain *domain, 1642 unsigned long iova, size_t size, 1643 bool sync) 1644 { 1645 const struct iommu_ops *ops = domain->ops; 1646 size_t unmapped_page, unmapped = 0; 1647 unsigned long orig_iova = iova; 1648 unsigned int min_pagesz; 1649 1650 if (unlikely(ops->unmap == NULL || 1651 domain->pgsize_bitmap == 0UL)) 1652 return 0; 1653 1654 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 1655 return 0; 1656 1657 /* find out the minimum page size supported */ 1658 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 1659 1660 /* 1661 * The virtual address, as well as the size of the mapping, must be 1662 * aligned (at least) to the size of the smallest page supported 1663 * by the hardware 1664 */ 1665 if (!IS_ALIGNED(iova | size, min_pagesz)) { 1666 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 1667 iova, size, min_pagesz); 1668 return 0; 1669 } 1670 1671 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 1672 1673 /* 1674 * Keep iterating until we either unmap 'size' bytes (or more) 1675 * or we hit an area that isn't mapped. 1676 */ 1677 while (unmapped < size) { 1678 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); 1679 1680 unmapped_page = ops->unmap(domain, iova, pgsize); 1681 if (!unmapped_page) 1682 break; 1683 1684 if (sync && ops->iotlb_range_add) 1685 ops->iotlb_range_add(domain, iova, pgsize); 1686 1687 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 1688 iova, unmapped_page); 1689 1690 iova += unmapped_page; 1691 unmapped += unmapped_page; 1692 } 1693 1694 if (sync && ops->iotlb_sync) 1695 ops->iotlb_sync(domain); 1696 1697 trace_unmap(orig_iova, size, unmapped); 1698 return unmapped; 1699 } 1700 1701 size_t iommu_unmap(struct iommu_domain *domain, 1702 unsigned long iova, size_t size) 1703 { 1704 return __iommu_unmap(domain, iova, size, true); 1705 } 1706 EXPORT_SYMBOL_GPL(iommu_unmap); 1707 1708 size_t iommu_unmap_fast(struct iommu_domain *domain, 1709 unsigned long iova, size_t size) 1710 { 1711 return __iommu_unmap(domain, iova, size, false); 1712 } 1713 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 1714 1715 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 1716 struct scatterlist *sg, unsigned int nents, int prot) 1717 { 1718 size_t len = 0, mapped = 0; 1719 phys_addr_t start; 1720 unsigned int i = 0; 1721 int ret; 1722 1723 while (i <= nents) { 1724 phys_addr_t s_phys = sg_phys(sg); 1725 1726 if (len && s_phys != start + len) { 1727 ret = iommu_map(domain, iova + mapped, start, len, prot); 1728 if (ret) 1729 goto out_err; 1730 1731 mapped += len; 1732 len = 0; 1733 } 1734 1735 if (len) { 1736 len += sg->length; 1737 } else { 1738 len = sg->length; 1739 start = s_phys; 1740 } 1741 1742 if (++i < nents) 1743 sg = sg_next(sg); 1744 } 1745 1746 return mapped; 1747 1748 out_err: 1749 /* undo mappings already done */ 1750 iommu_unmap(domain, iova, mapped); 1751 1752 return 0; 1753 1754 } 1755 EXPORT_SYMBOL_GPL(iommu_map_sg); 1756 1757 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, 1758 phys_addr_t paddr, u64 size, int prot) 1759 { 1760 if (unlikely(domain->ops->domain_window_enable == NULL)) 1761 return -ENODEV; 1762 1763 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, 1764 prot); 1765 } 1766 EXPORT_SYMBOL_GPL(iommu_domain_window_enable); 1767 1768 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) 1769 { 1770 if (unlikely(domain->ops->domain_window_disable == NULL)) 1771 return; 1772 1773 return domain->ops->domain_window_disable(domain, wnd_nr); 1774 } 1775 EXPORT_SYMBOL_GPL(iommu_domain_window_disable); 1776 1777 /** 1778 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 1779 * @domain: the iommu domain where the fault has happened 1780 * @dev: the device where the fault has happened 1781 * @iova: the faulting address 1782 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 1783 * 1784 * This function should be called by the low-level IOMMU implementations 1785 * whenever IOMMU faults happen, to allow high-level users, that are 1786 * interested in such events, to know about them. 1787 * 1788 * This event may be useful for several possible use cases: 1789 * - mere logging of the event 1790 * - dynamic TLB/PTE loading 1791 * - if restarting of the faulting device is required 1792 * 1793 * Returns 0 on success and an appropriate error code otherwise (if dynamic 1794 * PTE/TLB loading will one day be supported, implementations will be able 1795 * to tell whether it succeeded or not according to this return value). 1796 * 1797 * Specifically, -ENOSYS is returned if a fault handler isn't installed 1798 * (though fault handlers can also return -ENOSYS, in case they want to 1799 * elicit the default behavior of the IOMMU drivers). 1800 */ 1801 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 1802 unsigned long iova, int flags) 1803 { 1804 int ret = -ENOSYS; 1805 1806 /* 1807 * if upper layers showed interest and installed a fault handler, 1808 * invoke it. 1809 */ 1810 if (domain->handler) 1811 ret = domain->handler(domain, dev, iova, flags, 1812 domain->handler_token); 1813 1814 trace_io_page_fault(dev, iova, flags); 1815 return ret; 1816 } 1817 EXPORT_SYMBOL_GPL(report_iommu_fault); 1818 1819 static int __init iommu_init(void) 1820 { 1821 iommu_group_kset = kset_create_and_add("iommu_groups", 1822 NULL, kernel_kobj); 1823 BUG_ON(!iommu_group_kset); 1824 1825 iommu_debugfs_setup(); 1826 1827 return 0; 1828 } 1829 core_initcall(iommu_init); 1830 1831 int iommu_domain_get_attr(struct iommu_domain *domain, 1832 enum iommu_attr attr, void *data) 1833 { 1834 struct iommu_domain_geometry *geometry; 1835 bool *paging; 1836 int ret = 0; 1837 1838 switch (attr) { 1839 case DOMAIN_ATTR_GEOMETRY: 1840 geometry = data; 1841 *geometry = domain->geometry; 1842 1843 break; 1844 case DOMAIN_ATTR_PAGING: 1845 paging = data; 1846 *paging = (domain->pgsize_bitmap != 0UL); 1847 break; 1848 default: 1849 if (!domain->ops->domain_get_attr) 1850 return -EINVAL; 1851 1852 ret = domain->ops->domain_get_attr(domain, attr, data); 1853 } 1854 1855 return ret; 1856 } 1857 EXPORT_SYMBOL_GPL(iommu_domain_get_attr); 1858 1859 int iommu_domain_set_attr(struct iommu_domain *domain, 1860 enum iommu_attr attr, void *data) 1861 { 1862 int ret = 0; 1863 1864 switch (attr) { 1865 default: 1866 if (domain->ops->domain_set_attr == NULL) 1867 return -EINVAL; 1868 1869 ret = domain->ops->domain_set_attr(domain, attr, data); 1870 } 1871 1872 return ret; 1873 } 1874 EXPORT_SYMBOL_GPL(iommu_domain_set_attr); 1875 1876 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 1877 { 1878 const struct iommu_ops *ops = dev->bus->iommu_ops; 1879 1880 if (ops && ops->get_resv_regions) 1881 ops->get_resv_regions(dev, list); 1882 } 1883 1884 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 1885 { 1886 const struct iommu_ops *ops = dev->bus->iommu_ops; 1887 1888 if (ops && ops->put_resv_regions) 1889 ops->put_resv_regions(dev, list); 1890 } 1891 1892 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 1893 size_t length, int prot, 1894 enum iommu_resv_type type) 1895 { 1896 struct iommu_resv_region *region; 1897 1898 region = kzalloc(sizeof(*region), GFP_KERNEL); 1899 if (!region) 1900 return NULL; 1901 1902 INIT_LIST_HEAD(®ion->list); 1903 region->start = start; 1904 region->length = length; 1905 region->prot = prot; 1906 region->type = type; 1907 return region; 1908 } 1909 1910 /* Request that a device is direct mapped by the IOMMU */ 1911 int iommu_request_dm_for_dev(struct device *dev) 1912 { 1913 struct iommu_domain *dm_domain; 1914 struct iommu_group *group; 1915 int ret; 1916 1917 /* Device must already be in a group before calling this function */ 1918 group = iommu_group_get_for_dev(dev); 1919 if (IS_ERR(group)) 1920 return PTR_ERR(group); 1921 1922 mutex_lock(&group->mutex); 1923 1924 /* Check if the default domain is already direct mapped */ 1925 ret = 0; 1926 if (group->default_domain && 1927 group->default_domain->type == IOMMU_DOMAIN_IDENTITY) 1928 goto out; 1929 1930 /* Don't change mappings of existing devices */ 1931 ret = -EBUSY; 1932 if (iommu_group_device_count(group) != 1) 1933 goto out; 1934 1935 /* Allocate a direct mapped domain */ 1936 ret = -ENOMEM; 1937 dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY); 1938 if (!dm_domain) 1939 goto out; 1940 1941 /* Attach the device to the domain */ 1942 ret = __iommu_attach_group(dm_domain, group); 1943 if (ret) { 1944 iommu_domain_free(dm_domain); 1945 goto out; 1946 } 1947 1948 /* Make the direct mapped domain the default for this group */ 1949 if (group->default_domain) 1950 iommu_domain_free(group->default_domain); 1951 group->default_domain = dm_domain; 1952 1953 dev_info(dev, "Using iommu direct mapping\n"); 1954 1955 ret = 0; 1956 out: 1957 mutex_unlock(&group->mutex); 1958 iommu_group_put(group); 1959 1960 return ret; 1961 } 1962 1963 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 1964 { 1965 const struct iommu_ops *ops = NULL; 1966 struct iommu_device *iommu; 1967 1968 spin_lock(&iommu_device_lock); 1969 list_for_each_entry(iommu, &iommu_device_list, list) 1970 if (iommu->fwnode == fwnode) { 1971 ops = iommu->ops; 1972 break; 1973 } 1974 spin_unlock(&iommu_device_lock); 1975 return ops; 1976 } 1977 1978 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 1979 const struct iommu_ops *ops) 1980 { 1981 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1982 1983 if (fwspec) 1984 return ops == fwspec->ops ? 0 : -EINVAL; 1985 1986 fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); 1987 if (!fwspec) 1988 return -ENOMEM; 1989 1990 of_node_get(to_of_node(iommu_fwnode)); 1991 fwspec->iommu_fwnode = iommu_fwnode; 1992 fwspec->ops = ops; 1993 dev_iommu_fwspec_set(dev, fwspec); 1994 return 0; 1995 } 1996 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 1997 1998 void iommu_fwspec_free(struct device *dev) 1999 { 2000 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2001 2002 if (fwspec) { 2003 fwnode_handle_put(fwspec->iommu_fwnode); 2004 kfree(fwspec); 2005 dev_iommu_fwspec_set(dev, NULL); 2006 } 2007 } 2008 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2009 2010 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2011 { 2012 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2013 size_t size; 2014 int i; 2015 2016 if (!fwspec) 2017 return -EINVAL; 2018 2019 size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]); 2020 if (size > sizeof(*fwspec)) { 2021 fwspec = krealloc(fwspec, size, GFP_KERNEL); 2022 if (!fwspec) 2023 return -ENOMEM; 2024 2025 dev_iommu_fwspec_set(dev, fwspec); 2026 } 2027 2028 for (i = 0; i < num_ids; i++) 2029 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2030 2031 fwspec->num_ids += num_ids; 2032 return 0; 2033 } 2034 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2035 2036 /* 2037 * Per device IOMMU features. 2038 */ 2039 bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat) 2040 { 2041 const struct iommu_ops *ops = dev->bus->iommu_ops; 2042 2043 if (ops && ops->dev_has_feat) 2044 return ops->dev_has_feat(dev, feat); 2045 2046 return false; 2047 } 2048 EXPORT_SYMBOL_GPL(iommu_dev_has_feature); 2049 2050 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 2051 { 2052 const struct iommu_ops *ops = dev->bus->iommu_ops; 2053 2054 if (ops && ops->dev_enable_feat) 2055 return ops->dev_enable_feat(dev, feat); 2056 2057 return -ENODEV; 2058 } 2059 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); 2060 2061 /* 2062 * The device drivers should do the necessary cleanups before calling this. 2063 * For example, before disabling the aux-domain feature, the device driver 2064 * should detach all aux-domains. Otherwise, this will return -EBUSY. 2065 */ 2066 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 2067 { 2068 const struct iommu_ops *ops = dev->bus->iommu_ops; 2069 2070 if (ops && ops->dev_disable_feat) 2071 return ops->dev_disable_feat(dev, feat); 2072 2073 return -EBUSY; 2074 } 2075 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); 2076 2077 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) 2078 { 2079 const struct iommu_ops *ops = dev->bus->iommu_ops; 2080 2081 if (ops && ops->dev_feat_enabled) 2082 return ops->dev_feat_enabled(dev, feat); 2083 2084 return false; 2085 } 2086 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled); 2087 2088 /* 2089 * Aux-domain specific attach/detach. 2090 * 2091 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns 2092 * true. Also, as long as domains are attached to a device through this 2093 * interface, any tries to call iommu_attach_device() should fail 2094 * (iommu_detach_device() can't fail, so we fail when trying to re-attach). 2095 * This should make us safe against a device being attached to a guest as a 2096 * whole while there are still pasid users on it (aux and sva). 2097 */ 2098 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) 2099 { 2100 int ret = -ENODEV; 2101 2102 if (domain->ops->aux_attach_dev) 2103 ret = domain->ops->aux_attach_dev(domain, dev); 2104 2105 if (!ret) 2106 trace_attach_device_to_domain(dev); 2107 2108 return ret; 2109 } 2110 EXPORT_SYMBOL_GPL(iommu_aux_attach_device); 2111 2112 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) 2113 { 2114 if (domain->ops->aux_detach_dev) { 2115 domain->ops->aux_detach_dev(domain, dev); 2116 trace_detach_device_from_domain(dev); 2117 } 2118 } 2119 EXPORT_SYMBOL_GPL(iommu_aux_detach_device); 2120 2121 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) 2122 { 2123 int ret = -ENODEV; 2124 2125 if (domain->ops->aux_get_pasid) 2126 ret = domain->ops->aux_get_pasid(domain, dev); 2127 2128 return ret; 2129 } 2130 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); 2131 2132 /** 2133 * iommu_sva_bind_device() - Bind a process address space to a device 2134 * @dev: the device 2135 * @mm: the mm to bind, caller must hold a reference to it 2136 * 2137 * Create a bond between device and address space, allowing the device to access 2138 * the mm using the returned PASID. If a bond already exists between @device and 2139 * @mm, it is returned and an additional reference is taken. Caller must call 2140 * iommu_sva_unbind_device() to release each reference. 2141 * 2142 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to 2143 * initialize the required SVA features. 2144 * 2145 * On error, returns an ERR_PTR value. 2146 */ 2147 struct iommu_sva * 2148 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) 2149 { 2150 struct iommu_group *group; 2151 struct iommu_sva *handle = ERR_PTR(-EINVAL); 2152 const struct iommu_ops *ops = dev->bus->iommu_ops; 2153 2154 if (!ops || !ops->sva_bind) 2155 return ERR_PTR(-ENODEV); 2156 2157 group = iommu_group_get(dev); 2158 if (!group) 2159 return ERR_PTR(-ENODEV); 2160 2161 /* Ensure device count and domain don't change while we're binding */ 2162 mutex_lock(&group->mutex); 2163 2164 /* 2165 * To keep things simple, SVA currently doesn't support IOMMU groups 2166 * with more than one device. Existing SVA-capable systems are not 2167 * affected by the problems that required IOMMU groups (lack of ACS 2168 * isolation, device ID aliasing and other hardware issues). 2169 */ 2170 if (iommu_group_device_count(group) != 1) 2171 goto out_unlock; 2172 2173 handle = ops->sva_bind(dev, mm, drvdata); 2174 2175 out_unlock: 2176 mutex_unlock(&group->mutex); 2177 iommu_group_put(group); 2178 2179 return handle; 2180 } 2181 EXPORT_SYMBOL_GPL(iommu_sva_bind_device); 2182 2183 /** 2184 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device 2185 * @handle: the handle returned by iommu_sva_bind_device() 2186 * 2187 * Put reference to a bond between device and address space. The device should 2188 * not be issuing any more transaction for this PASID. All outstanding page 2189 * requests for this PASID must have been flushed to the IOMMU. 2190 * 2191 * Returns 0 on success, or an error value 2192 */ 2193 void iommu_sva_unbind_device(struct iommu_sva *handle) 2194 { 2195 struct iommu_group *group; 2196 struct device *dev = handle->dev; 2197 const struct iommu_ops *ops = dev->bus->iommu_ops; 2198 2199 if (!ops || !ops->sva_unbind) 2200 return; 2201 2202 group = iommu_group_get(dev); 2203 if (!group) 2204 return; 2205 2206 mutex_lock(&group->mutex); 2207 ops->sva_unbind(handle); 2208 mutex_unlock(&group->mutex); 2209 2210 iommu_group_put(group); 2211 } 2212 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); 2213 2214 int iommu_sva_set_ops(struct iommu_sva *handle, 2215 const struct iommu_sva_ops *sva_ops) 2216 { 2217 if (handle->ops && handle->ops != sva_ops) 2218 return -EEXIST; 2219 2220 handle->ops = sva_ops; 2221 return 0; 2222 } 2223 EXPORT_SYMBOL_GPL(iommu_sva_set_ops); 2224 2225 int iommu_sva_get_pasid(struct iommu_sva *handle) 2226 { 2227 const struct iommu_ops *ops = handle->dev->bus->iommu_ops; 2228 2229 if (!ops || !ops->sva_get_pasid) 2230 return IOMMU_PASID_INVALID; 2231 2232 return ops->sva_get_pasid(handle); 2233 } 2234 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); 2235