1 /* 2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 3 * Author: Joerg Roedel <jroedel@suse.de> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published 7 * by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 */ 18 19 #define pr_fmt(fmt) "iommu: " fmt 20 21 #include <linux/device.h> 22 #include <linux/kernel.h> 23 #include <linux/bug.h> 24 #include <linux/types.h> 25 #include <linux/init.h> 26 #include <linux/export.h> 27 #include <linux/slab.h> 28 #include <linux/errno.h> 29 #include <linux/iommu.h> 30 #include <linux/idr.h> 31 #include <linux/notifier.h> 32 #include <linux/err.h> 33 #include <linux/pci.h> 34 #include <linux/bitops.h> 35 #include <linux/property.h> 36 #include <linux/fsl/mc.h> 37 #include <trace/events/iommu.h> 38 39 static struct kset *iommu_group_kset; 40 static DEFINE_IDA(iommu_group_ida); 41 #ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH 42 static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 43 #else 44 static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA; 45 #endif 46 static bool iommu_dma_strict __read_mostly = true; 47 48 struct iommu_callback_data { 49 const struct iommu_ops *ops; 50 }; 51 52 struct iommu_group { 53 struct kobject kobj; 54 struct kobject *devices_kobj; 55 struct list_head devices; 56 struct mutex mutex; 57 struct blocking_notifier_head notifier; 58 void *iommu_data; 59 void (*iommu_data_release)(void *iommu_data); 60 char *name; 61 int id; 62 struct iommu_domain *default_domain; 63 struct iommu_domain *domain; 64 }; 65 66 struct group_device { 67 struct list_head list; 68 struct device *dev; 69 char *name; 70 }; 71 72 struct iommu_group_attribute { 73 struct attribute attr; 74 ssize_t (*show)(struct iommu_group *group, char *buf); 75 ssize_t (*store)(struct iommu_group *group, 76 const char *buf, size_t count); 77 }; 78 79 static const char * const iommu_group_resv_type_string[] = { 80 [IOMMU_RESV_DIRECT] = "direct", 81 [IOMMU_RESV_RESERVED] = "reserved", 82 [IOMMU_RESV_MSI] = "msi", 83 [IOMMU_RESV_SW_MSI] = "msi", 84 }; 85 86 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 87 struct iommu_group_attribute iommu_group_attr_##_name = \ 88 __ATTR(_name, _mode, _show, _store) 89 90 #define to_iommu_group_attr(_attr) \ 91 container_of(_attr, struct iommu_group_attribute, attr) 92 #define to_iommu_group(_kobj) \ 93 container_of(_kobj, struct iommu_group, kobj) 94 95 static LIST_HEAD(iommu_device_list); 96 static DEFINE_SPINLOCK(iommu_device_lock); 97 98 int iommu_device_register(struct iommu_device *iommu) 99 { 100 spin_lock(&iommu_device_lock); 101 list_add_tail(&iommu->list, &iommu_device_list); 102 spin_unlock(&iommu_device_lock); 103 104 return 0; 105 } 106 107 void iommu_device_unregister(struct iommu_device *iommu) 108 { 109 spin_lock(&iommu_device_lock); 110 list_del(&iommu->list); 111 spin_unlock(&iommu_device_lock); 112 } 113 114 int iommu_probe_device(struct device *dev) 115 { 116 const struct iommu_ops *ops = dev->bus->iommu_ops; 117 int ret = -EINVAL; 118 119 WARN_ON(dev->iommu_group); 120 121 if (ops) 122 ret = ops->add_device(dev); 123 124 return ret; 125 } 126 127 void iommu_release_device(struct device *dev) 128 { 129 const struct iommu_ops *ops = dev->bus->iommu_ops; 130 131 if (dev->iommu_group) 132 ops->remove_device(dev); 133 } 134 135 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 136 unsigned type); 137 static int __iommu_attach_device(struct iommu_domain *domain, 138 struct device *dev); 139 static int __iommu_attach_group(struct iommu_domain *domain, 140 struct iommu_group *group); 141 static void __iommu_detach_group(struct iommu_domain *domain, 142 struct iommu_group *group); 143 144 static int __init iommu_set_def_domain_type(char *str) 145 { 146 bool pt; 147 int ret; 148 149 ret = kstrtobool(str, &pt); 150 if (ret) 151 return ret; 152 153 iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; 154 return 0; 155 } 156 early_param("iommu.passthrough", iommu_set_def_domain_type); 157 158 static int __init iommu_dma_setup(char *str) 159 { 160 return kstrtobool(str, &iommu_dma_strict); 161 } 162 early_param("iommu.strict", iommu_dma_setup); 163 164 static ssize_t iommu_group_attr_show(struct kobject *kobj, 165 struct attribute *__attr, char *buf) 166 { 167 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 168 struct iommu_group *group = to_iommu_group(kobj); 169 ssize_t ret = -EIO; 170 171 if (attr->show) 172 ret = attr->show(group, buf); 173 return ret; 174 } 175 176 static ssize_t iommu_group_attr_store(struct kobject *kobj, 177 struct attribute *__attr, 178 const char *buf, size_t count) 179 { 180 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); 181 struct iommu_group *group = to_iommu_group(kobj); 182 ssize_t ret = -EIO; 183 184 if (attr->store) 185 ret = attr->store(group, buf, count); 186 return ret; 187 } 188 189 static const struct sysfs_ops iommu_group_sysfs_ops = { 190 .show = iommu_group_attr_show, 191 .store = iommu_group_attr_store, 192 }; 193 194 static int iommu_group_create_file(struct iommu_group *group, 195 struct iommu_group_attribute *attr) 196 { 197 return sysfs_create_file(&group->kobj, &attr->attr); 198 } 199 200 static void iommu_group_remove_file(struct iommu_group *group, 201 struct iommu_group_attribute *attr) 202 { 203 sysfs_remove_file(&group->kobj, &attr->attr); 204 } 205 206 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) 207 { 208 return sprintf(buf, "%s\n", group->name); 209 } 210 211 /** 212 * iommu_insert_resv_region - Insert a new region in the 213 * list of reserved regions. 214 * @new: new region to insert 215 * @regions: list of regions 216 * 217 * The new element is sorted by address with respect to the other 218 * regions of the same type. In case it overlaps with another 219 * region of the same type, regions are merged. In case it 220 * overlaps with another region of different type, regions are 221 * not merged. 222 */ 223 static int iommu_insert_resv_region(struct iommu_resv_region *new, 224 struct list_head *regions) 225 { 226 struct iommu_resv_region *region; 227 phys_addr_t start = new->start; 228 phys_addr_t end = new->start + new->length - 1; 229 struct list_head *pos = regions->next; 230 231 while (pos != regions) { 232 struct iommu_resv_region *entry = 233 list_entry(pos, struct iommu_resv_region, list); 234 phys_addr_t a = entry->start; 235 phys_addr_t b = entry->start + entry->length - 1; 236 int type = entry->type; 237 238 if (end < a) { 239 goto insert; 240 } else if (start > b) { 241 pos = pos->next; 242 } else if ((start >= a) && (end <= b)) { 243 if (new->type == type) 244 goto done; 245 else 246 pos = pos->next; 247 } else { 248 if (new->type == type) { 249 phys_addr_t new_start = min(a, start); 250 phys_addr_t new_end = max(b, end); 251 252 list_del(&entry->list); 253 entry->start = new_start; 254 entry->length = new_end - new_start + 1; 255 iommu_insert_resv_region(entry, regions); 256 } else { 257 pos = pos->next; 258 } 259 } 260 } 261 insert: 262 region = iommu_alloc_resv_region(new->start, new->length, 263 new->prot, new->type); 264 if (!region) 265 return -ENOMEM; 266 267 list_add_tail(®ion->list, pos); 268 done: 269 return 0; 270 } 271 272 static int 273 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, 274 struct list_head *group_resv_regions) 275 { 276 struct iommu_resv_region *entry; 277 int ret = 0; 278 279 list_for_each_entry(entry, dev_resv_regions, list) { 280 ret = iommu_insert_resv_region(entry, group_resv_regions); 281 if (ret) 282 break; 283 } 284 return ret; 285 } 286 287 int iommu_get_group_resv_regions(struct iommu_group *group, 288 struct list_head *head) 289 { 290 struct group_device *device; 291 int ret = 0; 292 293 mutex_lock(&group->mutex); 294 list_for_each_entry(device, &group->devices, list) { 295 struct list_head dev_resv_regions; 296 297 INIT_LIST_HEAD(&dev_resv_regions); 298 iommu_get_resv_regions(device->dev, &dev_resv_regions); 299 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); 300 iommu_put_resv_regions(device->dev, &dev_resv_regions); 301 if (ret) 302 break; 303 } 304 mutex_unlock(&group->mutex); 305 return ret; 306 } 307 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); 308 309 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, 310 char *buf) 311 { 312 struct iommu_resv_region *region, *next; 313 struct list_head group_resv_regions; 314 char *str = buf; 315 316 INIT_LIST_HEAD(&group_resv_regions); 317 iommu_get_group_resv_regions(group, &group_resv_regions); 318 319 list_for_each_entry_safe(region, next, &group_resv_regions, list) { 320 str += sprintf(str, "0x%016llx 0x%016llx %s\n", 321 (long long int)region->start, 322 (long long int)(region->start + 323 region->length - 1), 324 iommu_group_resv_type_string[region->type]); 325 kfree(region); 326 } 327 328 return (str - buf); 329 } 330 331 static ssize_t iommu_group_show_type(struct iommu_group *group, 332 char *buf) 333 { 334 char *type = "unknown\n"; 335 336 if (group->default_domain) { 337 switch (group->default_domain->type) { 338 case IOMMU_DOMAIN_BLOCKED: 339 type = "blocked\n"; 340 break; 341 case IOMMU_DOMAIN_IDENTITY: 342 type = "identity\n"; 343 break; 344 case IOMMU_DOMAIN_UNMANAGED: 345 type = "unmanaged\n"; 346 break; 347 case IOMMU_DOMAIN_DMA: 348 type = "DMA"; 349 break; 350 } 351 } 352 strcpy(buf, type); 353 354 return strlen(type); 355 } 356 357 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 358 359 static IOMMU_GROUP_ATTR(reserved_regions, 0444, 360 iommu_group_show_resv_regions, NULL); 361 362 static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL); 363 364 static void iommu_group_release(struct kobject *kobj) 365 { 366 struct iommu_group *group = to_iommu_group(kobj); 367 368 pr_debug("Releasing group %d\n", group->id); 369 370 if (group->iommu_data_release) 371 group->iommu_data_release(group->iommu_data); 372 373 ida_simple_remove(&iommu_group_ida, group->id); 374 375 if (group->default_domain) 376 iommu_domain_free(group->default_domain); 377 378 kfree(group->name); 379 kfree(group); 380 } 381 382 static struct kobj_type iommu_group_ktype = { 383 .sysfs_ops = &iommu_group_sysfs_ops, 384 .release = iommu_group_release, 385 }; 386 387 /** 388 * iommu_group_alloc - Allocate a new group 389 * 390 * This function is called by an iommu driver to allocate a new iommu 391 * group. The iommu group represents the minimum granularity of the iommu. 392 * Upon successful return, the caller holds a reference to the supplied 393 * group in order to hold the group until devices are added. Use 394 * iommu_group_put() to release this extra reference count, allowing the 395 * group to be automatically reclaimed once it has no devices or external 396 * references. 397 */ 398 struct iommu_group *iommu_group_alloc(void) 399 { 400 struct iommu_group *group; 401 int ret; 402 403 group = kzalloc(sizeof(*group), GFP_KERNEL); 404 if (!group) 405 return ERR_PTR(-ENOMEM); 406 407 group->kobj.kset = iommu_group_kset; 408 mutex_init(&group->mutex); 409 INIT_LIST_HEAD(&group->devices); 410 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); 411 412 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); 413 if (ret < 0) { 414 kfree(group); 415 return ERR_PTR(ret); 416 } 417 group->id = ret; 418 419 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, 420 NULL, "%d", group->id); 421 if (ret) { 422 ida_simple_remove(&iommu_group_ida, group->id); 423 kfree(group); 424 return ERR_PTR(ret); 425 } 426 427 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); 428 if (!group->devices_kobj) { 429 kobject_put(&group->kobj); /* triggers .release & free */ 430 return ERR_PTR(-ENOMEM); 431 } 432 433 /* 434 * The devices_kobj holds a reference on the group kobject, so 435 * as long as that exists so will the group. We can therefore 436 * use the devices_kobj for reference counting. 437 */ 438 kobject_put(&group->kobj); 439 440 ret = iommu_group_create_file(group, 441 &iommu_group_attr_reserved_regions); 442 if (ret) 443 return ERR_PTR(ret); 444 445 ret = iommu_group_create_file(group, &iommu_group_attr_type); 446 if (ret) 447 return ERR_PTR(ret); 448 449 pr_debug("Allocated group %d\n", group->id); 450 451 return group; 452 } 453 EXPORT_SYMBOL_GPL(iommu_group_alloc); 454 455 struct iommu_group *iommu_group_get_by_id(int id) 456 { 457 struct kobject *group_kobj; 458 struct iommu_group *group; 459 const char *name; 460 461 if (!iommu_group_kset) 462 return NULL; 463 464 name = kasprintf(GFP_KERNEL, "%d", id); 465 if (!name) 466 return NULL; 467 468 group_kobj = kset_find_obj(iommu_group_kset, name); 469 kfree(name); 470 471 if (!group_kobj) 472 return NULL; 473 474 group = container_of(group_kobj, struct iommu_group, kobj); 475 BUG_ON(group->id != id); 476 477 kobject_get(group->devices_kobj); 478 kobject_put(&group->kobj); 479 480 return group; 481 } 482 EXPORT_SYMBOL_GPL(iommu_group_get_by_id); 483 484 /** 485 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 486 * @group: the group 487 * 488 * iommu drivers can store data in the group for use when doing iommu 489 * operations. This function provides a way to retrieve it. Caller 490 * should hold a group reference. 491 */ 492 void *iommu_group_get_iommudata(struct iommu_group *group) 493 { 494 return group->iommu_data; 495 } 496 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); 497 498 /** 499 * iommu_group_set_iommudata - set iommu_data for a group 500 * @group: the group 501 * @iommu_data: new data 502 * @release: release function for iommu_data 503 * 504 * iommu drivers can store data in the group for use when doing iommu 505 * operations. This function provides a way to set the data after 506 * the group has been allocated. Caller should hold a group reference. 507 */ 508 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 509 void (*release)(void *iommu_data)) 510 { 511 group->iommu_data = iommu_data; 512 group->iommu_data_release = release; 513 } 514 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); 515 516 /** 517 * iommu_group_set_name - set name for a group 518 * @group: the group 519 * @name: name 520 * 521 * Allow iommu driver to set a name for a group. When set it will 522 * appear in a name attribute file under the group in sysfs. 523 */ 524 int iommu_group_set_name(struct iommu_group *group, const char *name) 525 { 526 int ret; 527 528 if (group->name) { 529 iommu_group_remove_file(group, &iommu_group_attr_name); 530 kfree(group->name); 531 group->name = NULL; 532 if (!name) 533 return 0; 534 } 535 536 group->name = kstrdup(name, GFP_KERNEL); 537 if (!group->name) 538 return -ENOMEM; 539 540 ret = iommu_group_create_file(group, &iommu_group_attr_name); 541 if (ret) { 542 kfree(group->name); 543 group->name = NULL; 544 return ret; 545 } 546 547 return 0; 548 } 549 EXPORT_SYMBOL_GPL(iommu_group_set_name); 550 551 static int iommu_group_create_direct_mappings(struct iommu_group *group, 552 struct device *dev) 553 { 554 struct iommu_domain *domain = group->default_domain; 555 struct iommu_resv_region *entry; 556 struct list_head mappings; 557 unsigned long pg_size; 558 int ret = 0; 559 560 if (!domain || domain->type != IOMMU_DOMAIN_DMA) 561 return 0; 562 563 BUG_ON(!domain->pgsize_bitmap); 564 565 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 566 INIT_LIST_HEAD(&mappings); 567 568 iommu_get_resv_regions(dev, &mappings); 569 570 /* We need to consider overlapping regions for different devices */ 571 list_for_each_entry(entry, &mappings, list) { 572 dma_addr_t start, end, addr; 573 574 if (domain->ops->apply_resv_region) 575 domain->ops->apply_resv_region(dev, domain, entry); 576 577 start = ALIGN(entry->start, pg_size); 578 end = ALIGN(entry->start + entry->length, pg_size); 579 580 if (entry->type != IOMMU_RESV_DIRECT) 581 continue; 582 583 for (addr = start; addr < end; addr += pg_size) { 584 phys_addr_t phys_addr; 585 586 phys_addr = iommu_iova_to_phys(domain, addr); 587 if (phys_addr) 588 continue; 589 590 ret = iommu_map(domain, addr, addr, pg_size, entry->prot); 591 if (ret) 592 goto out; 593 } 594 595 } 596 597 iommu_flush_tlb_all(domain); 598 599 out: 600 iommu_put_resv_regions(dev, &mappings); 601 602 return ret; 603 } 604 605 /** 606 * iommu_group_add_device - add a device to an iommu group 607 * @group: the group into which to add the device (reference should be held) 608 * @dev: the device 609 * 610 * This function is called by an iommu driver to add a device into a 611 * group. Adding a device increments the group reference count. 612 */ 613 int iommu_group_add_device(struct iommu_group *group, struct device *dev) 614 { 615 int ret, i = 0; 616 struct group_device *device; 617 618 device = kzalloc(sizeof(*device), GFP_KERNEL); 619 if (!device) 620 return -ENOMEM; 621 622 device->dev = dev; 623 624 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 625 if (ret) 626 goto err_free_device; 627 628 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 629 rename: 630 if (!device->name) { 631 ret = -ENOMEM; 632 goto err_remove_link; 633 } 634 635 ret = sysfs_create_link_nowarn(group->devices_kobj, 636 &dev->kobj, device->name); 637 if (ret) { 638 if (ret == -EEXIST && i >= 0) { 639 /* 640 * Account for the slim chance of collision 641 * and append an instance to the name. 642 */ 643 kfree(device->name); 644 device->name = kasprintf(GFP_KERNEL, "%s.%d", 645 kobject_name(&dev->kobj), i++); 646 goto rename; 647 } 648 goto err_free_name; 649 } 650 651 kobject_get(group->devices_kobj); 652 653 dev->iommu_group = group; 654 655 iommu_group_create_direct_mappings(group, dev); 656 657 mutex_lock(&group->mutex); 658 list_add_tail(&device->list, &group->devices); 659 if (group->domain) 660 ret = __iommu_attach_device(group->domain, dev); 661 mutex_unlock(&group->mutex); 662 if (ret) 663 goto err_put_group; 664 665 /* Notify any listeners about change to group. */ 666 blocking_notifier_call_chain(&group->notifier, 667 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); 668 669 trace_add_device_to_group(group->id, dev); 670 671 dev_info(dev, "Adding to iommu group %d\n", group->id); 672 673 return 0; 674 675 err_put_group: 676 mutex_lock(&group->mutex); 677 list_del(&device->list); 678 mutex_unlock(&group->mutex); 679 dev->iommu_group = NULL; 680 kobject_put(group->devices_kobj); 681 err_free_name: 682 kfree(device->name); 683 err_remove_link: 684 sysfs_remove_link(&dev->kobj, "iommu_group"); 685 err_free_device: 686 kfree(device); 687 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); 688 return ret; 689 } 690 EXPORT_SYMBOL_GPL(iommu_group_add_device); 691 692 /** 693 * iommu_group_remove_device - remove a device from it's current group 694 * @dev: device to be removed 695 * 696 * This function is called by an iommu driver to remove the device from 697 * it's current group. This decrements the iommu group reference count. 698 */ 699 void iommu_group_remove_device(struct device *dev) 700 { 701 struct iommu_group *group = dev->iommu_group; 702 struct group_device *tmp_device, *device = NULL; 703 704 dev_info(dev, "Removing from iommu group %d\n", group->id); 705 706 /* Pre-notify listeners that a device is being removed. */ 707 blocking_notifier_call_chain(&group->notifier, 708 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); 709 710 mutex_lock(&group->mutex); 711 list_for_each_entry(tmp_device, &group->devices, list) { 712 if (tmp_device->dev == dev) { 713 device = tmp_device; 714 list_del(&device->list); 715 break; 716 } 717 } 718 mutex_unlock(&group->mutex); 719 720 if (!device) 721 return; 722 723 sysfs_remove_link(group->devices_kobj, device->name); 724 sysfs_remove_link(&dev->kobj, "iommu_group"); 725 726 trace_remove_device_from_group(group->id, dev); 727 728 kfree(device->name); 729 kfree(device); 730 dev->iommu_group = NULL; 731 kobject_put(group->devices_kobj); 732 } 733 EXPORT_SYMBOL_GPL(iommu_group_remove_device); 734 735 static int iommu_group_device_count(struct iommu_group *group) 736 { 737 struct group_device *entry; 738 int ret = 0; 739 740 list_for_each_entry(entry, &group->devices, list) 741 ret++; 742 743 return ret; 744 } 745 746 /** 747 * iommu_group_for_each_dev - iterate over each device in the group 748 * @group: the group 749 * @data: caller opaque data to be passed to callback function 750 * @fn: caller supplied callback function 751 * 752 * This function is called by group users to iterate over group devices. 753 * Callers should hold a reference count to the group during callback. 754 * The group->mutex is held across callbacks, which will block calls to 755 * iommu_group_add/remove_device. 756 */ 757 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 758 int (*fn)(struct device *, void *)) 759 { 760 struct group_device *device; 761 int ret = 0; 762 763 list_for_each_entry(device, &group->devices, list) { 764 ret = fn(device->dev, data); 765 if (ret) 766 break; 767 } 768 return ret; 769 } 770 771 772 int iommu_group_for_each_dev(struct iommu_group *group, void *data, 773 int (*fn)(struct device *, void *)) 774 { 775 int ret; 776 777 mutex_lock(&group->mutex); 778 ret = __iommu_group_for_each_dev(group, data, fn); 779 mutex_unlock(&group->mutex); 780 781 return ret; 782 } 783 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); 784 785 /** 786 * iommu_group_get - Return the group for a device and increment reference 787 * @dev: get the group that this device belongs to 788 * 789 * This function is called by iommu drivers and users to get the group 790 * for the specified device. If found, the group is returned and the group 791 * reference in incremented, else NULL. 792 */ 793 struct iommu_group *iommu_group_get(struct device *dev) 794 { 795 struct iommu_group *group = dev->iommu_group; 796 797 if (group) 798 kobject_get(group->devices_kobj); 799 800 return group; 801 } 802 EXPORT_SYMBOL_GPL(iommu_group_get); 803 804 /** 805 * iommu_group_ref_get - Increment reference on a group 806 * @group: the group to use, must not be NULL 807 * 808 * This function is called by iommu drivers to take additional references on an 809 * existing group. Returns the given group for convenience. 810 */ 811 struct iommu_group *iommu_group_ref_get(struct iommu_group *group) 812 { 813 kobject_get(group->devices_kobj); 814 return group; 815 } 816 817 /** 818 * iommu_group_put - Decrement group reference 819 * @group: the group to use 820 * 821 * This function is called by iommu drivers and users to release the 822 * iommu group. Once the reference count is zero, the group is released. 823 */ 824 void iommu_group_put(struct iommu_group *group) 825 { 826 if (group) 827 kobject_put(group->devices_kobj); 828 } 829 EXPORT_SYMBOL_GPL(iommu_group_put); 830 831 /** 832 * iommu_group_register_notifier - Register a notifier for group changes 833 * @group: the group to watch 834 * @nb: notifier block to signal 835 * 836 * This function allows iommu group users to track changes in a group. 837 * See include/linux/iommu.h for actions sent via this notifier. Caller 838 * should hold a reference to the group throughout notifier registration. 839 */ 840 int iommu_group_register_notifier(struct iommu_group *group, 841 struct notifier_block *nb) 842 { 843 return blocking_notifier_chain_register(&group->notifier, nb); 844 } 845 EXPORT_SYMBOL_GPL(iommu_group_register_notifier); 846 847 /** 848 * iommu_group_unregister_notifier - Unregister a notifier 849 * @group: the group to watch 850 * @nb: notifier block to signal 851 * 852 * Unregister a previously registered group notifier block. 853 */ 854 int iommu_group_unregister_notifier(struct iommu_group *group, 855 struct notifier_block *nb) 856 { 857 return blocking_notifier_chain_unregister(&group->notifier, nb); 858 } 859 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); 860 861 /** 862 * iommu_group_id - Return ID for a group 863 * @group: the group to ID 864 * 865 * Return the unique ID for the group matching the sysfs group number. 866 */ 867 int iommu_group_id(struct iommu_group *group) 868 { 869 return group->id; 870 } 871 EXPORT_SYMBOL_GPL(iommu_group_id); 872 873 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 874 unsigned long *devfns); 875 876 /* 877 * To consider a PCI device isolated, we require ACS to support Source 878 * Validation, Request Redirection, Completer Redirection, and Upstream 879 * Forwarding. This effectively means that devices cannot spoof their 880 * requester ID, requests and completions cannot be redirected, and all 881 * transactions are forwarded upstream, even as it passes through a 882 * bridge where the target device is downstream. 883 */ 884 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) 885 886 /* 887 * For multifunction devices which are not isolated from each other, find 888 * all the other non-isolated functions and look for existing groups. For 889 * each function, we also need to look for aliases to or from other devices 890 * that may already have a group. 891 */ 892 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, 893 unsigned long *devfns) 894 { 895 struct pci_dev *tmp = NULL; 896 struct iommu_group *group; 897 898 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) 899 return NULL; 900 901 for_each_pci_dev(tmp) { 902 if (tmp == pdev || tmp->bus != pdev->bus || 903 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || 904 pci_acs_enabled(tmp, REQ_ACS_FLAGS)) 905 continue; 906 907 group = get_pci_alias_group(tmp, devfns); 908 if (group) { 909 pci_dev_put(tmp); 910 return group; 911 } 912 } 913 914 return NULL; 915 } 916 917 /* 918 * Look for aliases to or from the given device for existing groups. DMA 919 * aliases are only supported on the same bus, therefore the search 920 * space is quite small (especially since we're really only looking at pcie 921 * device, and therefore only expect multiple slots on the root complex or 922 * downstream switch ports). It's conceivable though that a pair of 923 * multifunction devices could have aliases between them that would cause a 924 * loop. To prevent this, we use a bitmap to track where we've been. 925 */ 926 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, 927 unsigned long *devfns) 928 { 929 struct pci_dev *tmp = NULL; 930 struct iommu_group *group; 931 932 if (test_and_set_bit(pdev->devfn & 0xff, devfns)) 933 return NULL; 934 935 group = iommu_group_get(&pdev->dev); 936 if (group) 937 return group; 938 939 for_each_pci_dev(tmp) { 940 if (tmp == pdev || tmp->bus != pdev->bus) 941 continue; 942 943 /* We alias them or they alias us */ 944 if (pci_devs_are_dma_aliases(pdev, tmp)) { 945 group = get_pci_alias_group(tmp, devfns); 946 if (group) { 947 pci_dev_put(tmp); 948 return group; 949 } 950 951 group = get_pci_function_alias_group(tmp, devfns); 952 if (group) { 953 pci_dev_put(tmp); 954 return group; 955 } 956 } 957 } 958 959 return NULL; 960 } 961 962 struct group_for_pci_data { 963 struct pci_dev *pdev; 964 struct iommu_group *group; 965 }; 966 967 /* 968 * DMA alias iterator callback, return the last seen device. Stop and return 969 * the IOMMU group if we find one along the way. 970 */ 971 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) 972 { 973 struct group_for_pci_data *data = opaque; 974 975 data->pdev = pdev; 976 data->group = iommu_group_get(&pdev->dev); 977 978 return data->group != NULL; 979 } 980 981 /* 982 * Generic device_group call-back function. It just allocates one 983 * iommu-group per device. 984 */ 985 struct iommu_group *generic_device_group(struct device *dev) 986 { 987 return iommu_group_alloc(); 988 } 989 990 /* 991 * Use standard PCI bus topology, isolation features, and DMA alias quirks 992 * to find or create an IOMMU group for a device. 993 */ 994 struct iommu_group *pci_device_group(struct device *dev) 995 { 996 struct pci_dev *pdev = to_pci_dev(dev); 997 struct group_for_pci_data data; 998 struct pci_bus *bus; 999 struct iommu_group *group = NULL; 1000 u64 devfns[4] = { 0 }; 1001 1002 if (WARN_ON(!dev_is_pci(dev))) 1003 return ERR_PTR(-EINVAL); 1004 1005 /* 1006 * Find the upstream DMA alias for the device. A device must not 1007 * be aliased due to topology in order to have its own IOMMU group. 1008 * If we find an alias along the way that already belongs to a 1009 * group, use it. 1010 */ 1011 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) 1012 return data.group; 1013 1014 pdev = data.pdev; 1015 1016 /* 1017 * Continue upstream from the point of minimum IOMMU granularity 1018 * due to aliases to the point where devices are protected from 1019 * peer-to-peer DMA by PCI ACS. Again, if we find an existing 1020 * group, use it. 1021 */ 1022 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { 1023 if (!bus->self) 1024 continue; 1025 1026 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) 1027 break; 1028 1029 pdev = bus->self; 1030 1031 group = iommu_group_get(&pdev->dev); 1032 if (group) 1033 return group; 1034 } 1035 1036 /* 1037 * Look for existing groups on device aliases. If we alias another 1038 * device or another device aliases us, use the same group. 1039 */ 1040 group = get_pci_alias_group(pdev, (unsigned long *)devfns); 1041 if (group) 1042 return group; 1043 1044 /* 1045 * Look for existing groups on non-isolated functions on the same 1046 * slot and aliases of those funcions, if any. No need to clear 1047 * the search bitmap, the tested devfns are still valid. 1048 */ 1049 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); 1050 if (group) 1051 return group; 1052 1053 /* No shared group found, allocate new */ 1054 return iommu_group_alloc(); 1055 } 1056 1057 /* Get the IOMMU group for device on fsl-mc bus */ 1058 struct iommu_group *fsl_mc_device_group(struct device *dev) 1059 { 1060 struct device *cont_dev = fsl_mc_cont_dev(dev); 1061 struct iommu_group *group; 1062 1063 group = iommu_group_get(cont_dev); 1064 if (!group) 1065 group = iommu_group_alloc(); 1066 return group; 1067 } 1068 1069 /** 1070 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 1071 * @dev: target device 1072 * 1073 * This function is intended to be called by IOMMU drivers and extended to 1074 * support common, bus-defined algorithms when determining or creating the 1075 * IOMMU group for a device. On success, the caller will hold a reference 1076 * to the returned IOMMU group, which will already include the provided 1077 * device. The reference should be released with iommu_group_put(). 1078 */ 1079 struct iommu_group *iommu_group_get_for_dev(struct device *dev) 1080 { 1081 const struct iommu_ops *ops = dev->bus->iommu_ops; 1082 struct iommu_group *group; 1083 int ret; 1084 1085 group = iommu_group_get(dev); 1086 if (group) 1087 return group; 1088 1089 if (!ops) 1090 return ERR_PTR(-EINVAL); 1091 1092 group = ops->device_group(dev); 1093 if (WARN_ON_ONCE(group == NULL)) 1094 return ERR_PTR(-EINVAL); 1095 1096 if (IS_ERR(group)) 1097 return group; 1098 1099 /* 1100 * Try to allocate a default domain - needs support from the 1101 * IOMMU driver. 1102 */ 1103 if (!group->default_domain) { 1104 struct iommu_domain *dom; 1105 1106 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); 1107 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { 1108 dev_warn(dev, 1109 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA", 1110 iommu_def_domain_type); 1111 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); 1112 } 1113 1114 group->default_domain = dom; 1115 if (!group->domain) 1116 group->domain = dom; 1117 1118 if (dom && !iommu_dma_strict) { 1119 int attr = 1; 1120 iommu_domain_set_attr(dom, 1121 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, 1122 &attr); 1123 } 1124 } 1125 1126 ret = iommu_group_add_device(group, dev); 1127 if (ret) { 1128 iommu_group_put(group); 1129 return ERR_PTR(ret); 1130 } 1131 1132 return group; 1133 } 1134 1135 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1136 { 1137 return group->default_domain; 1138 } 1139 1140 static int add_iommu_group(struct device *dev, void *data) 1141 { 1142 int ret = iommu_probe_device(dev); 1143 1144 /* 1145 * We ignore -ENODEV errors for now, as they just mean that the 1146 * device is not translated by an IOMMU. We still care about 1147 * other errors and fail to initialize when they happen. 1148 */ 1149 if (ret == -ENODEV) 1150 ret = 0; 1151 1152 return ret; 1153 } 1154 1155 static int remove_iommu_group(struct device *dev, void *data) 1156 { 1157 iommu_release_device(dev); 1158 1159 return 0; 1160 } 1161 1162 static int iommu_bus_notifier(struct notifier_block *nb, 1163 unsigned long action, void *data) 1164 { 1165 unsigned long group_action = 0; 1166 struct device *dev = data; 1167 struct iommu_group *group; 1168 1169 /* 1170 * ADD/DEL call into iommu driver ops if provided, which may 1171 * result in ADD/DEL notifiers to group->notifier 1172 */ 1173 if (action == BUS_NOTIFY_ADD_DEVICE) { 1174 int ret; 1175 1176 ret = iommu_probe_device(dev); 1177 return (ret) ? NOTIFY_DONE : NOTIFY_OK; 1178 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { 1179 iommu_release_device(dev); 1180 return NOTIFY_OK; 1181 } 1182 1183 /* 1184 * Remaining BUS_NOTIFYs get filtered and republished to the 1185 * group, if anyone is listening 1186 */ 1187 group = iommu_group_get(dev); 1188 if (!group) 1189 return 0; 1190 1191 switch (action) { 1192 case BUS_NOTIFY_BIND_DRIVER: 1193 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; 1194 break; 1195 case BUS_NOTIFY_BOUND_DRIVER: 1196 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; 1197 break; 1198 case BUS_NOTIFY_UNBIND_DRIVER: 1199 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; 1200 break; 1201 case BUS_NOTIFY_UNBOUND_DRIVER: 1202 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; 1203 break; 1204 } 1205 1206 if (group_action) 1207 blocking_notifier_call_chain(&group->notifier, 1208 group_action, dev); 1209 1210 iommu_group_put(group); 1211 return 0; 1212 } 1213 1214 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) 1215 { 1216 int err; 1217 struct notifier_block *nb; 1218 struct iommu_callback_data cb = { 1219 .ops = ops, 1220 }; 1221 1222 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 1223 if (!nb) 1224 return -ENOMEM; 1225 1226 nb->notifier_call = iommu_bus_notifier; 1227 1228 err = bus_register_notifier(bus, nb); 1229 if (err) 1230 goto out_free; 1231 1232 err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group); 1233 if (err) 1234 goto out_err; 1235 1236 1237 return 0; 1238 1239 out_err: 1240 /* Clean up */ 1241 bus_for_each_dev(bus, NULL, &cb, remove_iommu_group); 1242 bus_unregister_notifier(bus, nb); 1243 1244 out_free: 1245 kfree(nb); 1246 1247 return err; 1248 } 1249 1250 /** 1251 * bus_set_iommu - set iommu-callbacks for the bus 1252 * @bus: bus. 1253 * @ops: the callbacks provided by the iommu-driver 1254 * 1255 * This function is called by an iommu driver to set the iommu methods 1256 * used for a particular bus. Drivers for devices on that bus can use 1257 * the iommu-api after these ops are registered. 1258 * This special function is needed because IOMMUs are usually devices on 1259 * the bus itself, so the iommu drivers are not initialized when the bus 1260 * is set up. With this function the iommu-driver can set the iommu-ops 1261 * afterwards. 1262 */ 1263 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) 1264 { 1265 int err; 1266 1267 if (bus->iommu_ops != NULL) 1268 return -EBUSY; 1269 1270 bus->iommu_ops = ops; 1271 1272 /* Do IOMMU specific setup for this bus-type */ 1273 err = iommu_bus_init(bus, ops); 1274 if (err) 1275 bus->iommu_ops = NULL; 1276 1277 return err; 1278 } 1279 EXPORT_SYMBOL_GPL(bus_set_iommu); 1280 1281 bool iommu_present(struct bus_type *bus) 1282 { 1283 return bus->iommu_ops != NULL; 1284 } 1285 EXPORT_SYMBOL_GPL(iommu_present); 1286 1287 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) 1288 { 1289 if (!bus->iommu_ops || !bus->iommu_ops->capable) 1290 return false; 1291 1292 return bus->iommu_ops->capable(cap); 1293 } 1294 EXPORT_SYMBOL_GPL(iommu_capable); 1295 1296 /** 1297 * iommu_set_fault_handler() - set a fault handler for an iommu domain 1298 * @domain: iommu domain 1299 * @handler: fault handler 1300 * @token: user data, will be passed back to the fault handler 1301 * 1302 * This function should be used by IOMMU users which want to be notified 1303 * whenever an IOMMU fault happens. 1304 * 1305 * The fault handler itself should return 0 on success, and an appropriate 1306 * error code otherwise. 1307 */ 1308 void iommu_set_fault_handler(struct iommu_domain *domain, 1309 iommu_fault_handler_t handler, 1310 void *token) 1311 { 1312 BUG_ON(!domain); 1313 1314 domain->handler = handler; 1315 domain->handler_token = token; 1316 } 1317 EXPORT_SYMBOL_GPL(iommu_set_fault_handler); 1318 1319 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 1320 unsigned type) 1321 { 1322 struct iommu_domain *domain; 1323 1324 if (bus == NULL || bus->iommu_ops == NULL) 1325 return NULL; 1326 1327 domain = bus->iommu_ops->domain_alloc(type); 1328 if (!domain) 1329 return NULL; 1330 1331 domain->ops = bus->iommu_ops; 1332 domain->type = type; 1333 /* Assume all sizes by default; the driver may override this later */ 1334 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; 1335 1336 return domain; 1337 } 1338 1339 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 1340 { 1341 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); 1342 } 1343 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 1344 1345 void iommu_domain_free(struct iommu_domain *domain) 1346 { 1347 domain->ops->domain_free(domain); 1348 } 1349 EXPORT_SYMBOL_GPL(iommu_domain_free); 1350 1351 static int __iommu_attach_device(struct iommu_domain *domain, 1352 struct device *dev) 1353 { 1354 int ret; 1355 if ((domain->ops->is_attach_deferred != NULL) && 1356 domain->ops->is_attach_deferred(domain, dev)) 1357 return 0; 1358 1359 if (unlikely(domain->ops->attach_dev == NULL)) 1360 return -ENODEV; 1361 1362 ret = domain->ops->attach_dev(domain, dev); 1363 if (!ret) 1364 trace_attach_device_to_domain(dev); 1365 return ret; 1366 } 1367 1368 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) 1369 { 1370 struct iommu_group *group; 1371 int ret; 1372 1373 group = iommu_group_get(dev); 1374 if (!group) 1375 return -ENODEV; 1376 1377 /* 1378 * Lock the group to make sure the device-count doesn't 1379 * change while we are attaching 1380 */ 1381 mutex_lock(&group->mutex); 1382 ret = -EINVAL; 1383 if (iommu_group_device_count(group) != 1) 1384 goto out_unlock; 1385 1386 ret = __iommu_attach_group(domain, group); 1387 1388 out_unlock: 1389 mutex_unlock(&group->mutex); 1390 iommu_group_put(group); 1391 1392 return ret; 1393 } 1394 EXPORT_SYMBOL_GPL(iommu_attach_device); 1395 1396 static void __iommu_detach_device(struct iommu_domain *domain, 1397 struct device *dev) 1398 { 1399 if ((domain->ops->is_attach_deferred != NULL) && 1400 domain->ops->is_attach_deferred(domain, dev)) 1401 return; 1402 1403 if (unlikely(domain->ops->detach_dev == NULL)) 1404 return; 1405 1406 domain->ops->detach_dev(domain, dev); 1407 trace_detach_device_from_domain(dev); 1408 } 1409 1410 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) 1411 { 1412 struct iommu_group *group; 1413 1414 group = iommu_group_get(dev); 1415 if (!group) 1416 return; 1417 1418 mutex_lock(&group->mutex); 1419 if (iommu_group_device_count(group) != 1) { 1420 WARN_ON(1); 1421 goto out_unlock; 1422 } 1423 1424 __iommu_detach_group(domain, group); 1425 1426 out_unlock: 1427 mutex_unlock(&group->mutex); 1428 iommu_group_put(group); 1429 } 1430 EXPORT_SYMBOL_GPL(iommu_detach_device); 1431 1432 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 1433 { 1434 struct iommu_domain *domain; 1435 struct iommu_group *group; 1436 1437 group = iommu_group_get(dev); 1438 if (!group) 1439 return NULL; 1440 1441 domain = group->domain; 1442 1443 iommu_group_put(group); 1444 1445 return domain; 1446 } 1447 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); 1448 1449 /* 1450 * For IOMMU_DOMAIN_DMA implementations which already provide their own 1451 * guarantees that the group and its default domain are valid and correct. 1452 */ 1453 struct iommu_domain *iommu_get_dma_domain(struct device *dev) 1454 { 1455 return dev->iommu_group->default_domain; 1456 } 1457 1458 /* 1459 * IOMMU groups are really the natural working unit of the IOMMU, but 1460 * the IOMMU API works on domains and devices. Bridge that gap by 1461 * iterating over the devices in a group. Ideally we'd have a single 1462 * device which represents the requestor ID of the group, but we also 1463 * allow IOMMU drivers to create policy defined minimum sets, where 1464 * the physical hardware may be able to distiguish members, but we 1465 * wish to group them at a higher level (ex. untrusted multi-function 1466 * PCI devices). Thus we attach each device. 1467 */ 1468 static int iommu_group_do_attach_device(struct device *dev, void *data) 1469 { 1470 struct iommu_domain *domain = data; 1471 1472 return __iommu_attach_device(domain, dev); 1473 } 1474 1475 static int __iommu_attach_group(struct iommu_domain *domain, 1476 struct iommu_group *group) 1477 { 1478 int ret; 1479 1480 if (group->default_domain && group->domain != group->default_domain) 1481 return -EBUSY; 1482 1483 ret = __iommu_group_for_each_dev(group, domain, 1484 iommu_group_do_attach_device); 1485 if (ret == 0) 1486 group->domain = domain; 1487 1488 return ret; 1489 } 1490 1491 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 1492 { 1493 int ret; 1494 1495 mutex_lock(&group->mutex); 1496 ret = __iommu_attach_group(domain, group); 1497 mutex_unlock(&group->mutex); 1498 1499 return ret; 1500 } 1501 EXPORT_SYMBOL_GPL(iommu_attach_group); 1502 1503 static int iommu_group_do_detach_device(struct device *dev, void *data) 1504 { 1505 struct iommu_domain *domain = data; 1506 1507 __iommu_detach_device(domain, dev); 1508 1509 return 0; 1510 } 1511 1512 static void __iommu_detach_group(struct iommu_domain *domain, 1513 struct iommu_group *group) 1514 { 1515 int ret; 1516 1517 if (!group->default_domain) { 1518 __iommu_group_for_each_dev(group, domain, 1519 iommu_group_do_detach_device); 1520 group->domain = NULL; 1521 return; 1522 } 1523 1524 if (group->domain == group->default_domain) 1525 return; 1526 1527 /* Detach by re-attaching to the default domain */ 1528 ret = __iommu_group_for_each_dev(group, group->default_domain, 1529 iommu_group_do_attach_device); 1530 if (ret != 0) 1531 WARN_ON(1); 1532 else 1533 group->domain = group->default_domain; 1534 } 1535 1536 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 1537 { 1538 mutex_lock(&group->mutex); 1539 __iommu_detach_group(domain, group); 1540 mutex_unlock(&group->mutex); 1541 } 1542 EXPORT_SYMBOL_GPL(iommu_detach_group); 1543 1544 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 1545 { 1546 if (unlikely(domain->ops->iova_to_phys == NULL)) 1547 return 0; 1548 1549 return domain->ops->iova_to_phys(domain, iova); 1550 } 1551 EXPORT_SYMBOL_GPL(iommu_iova_to_phys); 1552 1553 static size_t iommu_pgsize(struct iommu_domain *domain, 1554 unsigned long addr_merge, size_t size) 1555 { 1556 unsigned int pgsize_idx; 1557 size_t pgsize; 1558 1559 /* Max page size that still fits into 'size' */ 1560 pgsize_idx = __fls(size); 1561 1562 /* need to consider alignment requirements ? */ 1563 if (likely(addr_merge)) { 1564 /* Max page size allowed by address */ 1565 unsigned int align_pgsize_idx = __ffs(addr_merge); 1566 pgsize_idx = min(pgsize_idx, align_pgsize_idx); 1567 } 1568 1569 /* build a mask of acceptable page sizes */ 1570 pgsize = (1UL << (pgsize_idx + 1)) - 1; 1571 1572 /* throw away page sizes not supported by the hardware */ 1573 pgsize &= domain->pgsize_bitmap; 1574 1575 /* make sure we're still sane */ 1576 BUG_ON(!pgsize); 1577 1578 /* pick the biggest page */ 1579 pgsize_idx = __fls(pgsize); 1580 pgsize = 1UL << pgsize_idx; 1581 1582 return pgsize; 1583 } 1584 1585 int iommu_map(struct iommu_domain *domain, unsigned long iova, 1586 phys_addr_t paddr, size_t size, int prot) 1587 { 1588 const struct iommu_ops *ops = domain->ops; 1589 unsigned long orig_iova = iova; 1590 unsigned int min_pagesz; 1591 size_t orig_size = size; 1592 phys_addr_t orig_paddr = paddr; 1593 int ret = 0; 1594 1595 if (unlikely(ops->map == NULL || 1596 domain->pgsize_bitmap == 0UL)) 1597 return -ENODEV; 1598 1599 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 1600 return -EINVAL; 1601 1602 /* find out the minimum page size supported */ 1603 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 1604 1605 /* 1606 * both the virtual address and the physical one, as well as 1607 * the size of the mapping, must be aligned (at least) to the 1608 * size of the smallest page supported by the hardware 1609 */ 1610 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { 1611 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", 1612 iova, &paddr, size, min_pagesz); 1613 return -EINVAL; 1614 } 1615 1616 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); 1617 1618 while (size) { 1619 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); 1620 1621 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", 1622 iova, &paddr, pgsize); 1623 1624 ret = ops->map(domain, iova, paddr, pgsize, prot); 1625 if (ret) 1626 break; 1627 1628 iova += pgsize; 1629 paddr += pgsize; 1630 size -= pgsize; 1631 } 1632 1633 if (ops->iotlb_sync_map) 1634 ops->iotlb_sync_map(domain); 1635 1636 /* unroll mapping in case something went wrong */ 1637 if (ret) 1638 iommu_unmap(domain, orig_iova, orig_size - size); 1639 else 1640 trace_map(orig_iova, orig_paddr, orig_size); 1641 1642 return ret; 1643 } 1644 EXPORT_SYMBOL_GPL(iommu_map); 1645 1646 static size_t __iommu_unmap(struct iommu_domain *domain, 1647 unsigned long iova, size_t size, 1648 bool sync) 1649 { 1650 const struct iommu_ops *ops = domain->ops; 1651 size_t unmapped_page, unmapped = 0; 1652 unsigned long orig_iova = iova; 1653 unsigned int min_pagesz; 1654 1655 if (unlikely(ops->unmap == NULL || 1656 domain->pgsize_bitmap == 0UL)) 1657 return 0; 1658 1659 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) 1660 return 0; 1661 1662 /* find out the minimum page size supported */ 1663 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); 1664 1665 /* 1666 * The virtual address, as well as the size of the mapping, must be 1667 * aligned (at least) to the size of the smallest page supported 1668 * by the hardware 1669 */ 1670 if (!IS_ALIGNED(iova | size, min_pagesz)) { 1671 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", 1672 iova, size, min_pagesz); 1673 return 0; 1674 } 1675 1676 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 1677 1678 /* 1679 * Keep iterating until we either unmap 'size' bytes (or more) 1680 * or we hit an area that isn't mapped. 1681 */ 1682 while (unmapped < size) { 1683 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); 1684 1685 unmapped_page = ops->unmap(domain, iova, pgsize); 1686 if (!unmapped_page) 1687 break; 1688 1689 if (sync && ops->iotlb_range_add) 1690 ops->iotlb_range_add(domain, iova, pgsize); 1691 1692 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 1693 iova, unmapped_page); 1694 1695 iova += unmapped_page; 1696 unmapped += unmapped_page; 1697 } 1698 1699 if (sync && ops->iotlb_sync) 1700 ops->iotlb_sync(domain); 1701 1702 trace_unmap(orig_iova, size, unmapped); 1703 return unmapped; 1704 } 1705 1706 size_t iommu_unmap(struct iommu_domain *domain, 1707 unsigned long iova, size_t size) 1708 { 1709 return __iommu_unmap(domain, iova, size, true); 1710 } 1711 EXPORT_SYMBOL_GPL(iommu_unmap); 1712 1713 size_t iommu_unmap_fast(struct iommu_domain *domain, 1714 unsigned long iova, size_t size) 1715 { 1716 return __iommu_unmap(domain, iova, size, false); 1717 } 1718 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 1719 1720 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 1721 struct scatterlist *sg, unsigned int nents, int prot) 1722 { 1723 size_t len = 0, mapped = 0; 1724 phys_addr_t start; 1725 unsigned int i = 0; 1726 int ret; 1727 1728 while (i <= nents) { 1729 phys_addr_t s_phys = sg_phys(sg); 1730 1731 if (len && s_phys != start + len) { 1732 ret = iommu_map(domain, iova + mapped, start, len, prot); 1733 if (ret) 1734 goto out_err; 1735 1736 mapped += len; 1737 len = 0; 1738 } 1739 1740 if (len) { 1741 len += sg->length; 1742 } else { 1743 len = sg->length; 1744 start = s_phys; 1745 } 1746 1747 if (++i < nents) 1748 sg = sg_next(sg); 1749 } 1750 1751 return mapped; 1752 1753 out_err: 1754 /* undo mappings already done */ 1755 iommu_unmap(domain, iova, mapped); 1756 1757 return 0; 1758 1759 } 1760 EXPORT_SYMBOL_GPL(iommu_map_sg); 1761 1762 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, 1763 phys_addr_t paddr, u64 size, int prot) 1764 { 1765 if (unlikely(domain->ops->domain_window_enable == NULL)) 1766 return -ENODEV; 1767 1768 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, 1769 prot); 1770 } 1771 EXPORT_SYMBOL_GPL(iommu_domain_window_enable); 1772 1773 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) 1774 { 1775 if (unlikely(domain->ops->domain_window_disable == NULL)) 1776 return; 1777 1778 return domain->ops->domain_window_disable(domain, wnd_nr); 1779 } 1780 EXPORT_SYMBOL_GPL(iommu_domain_window_disable); 1781 1782 /** 1783 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework 1784 * @domain: the iommu domain where the fault has happened 1785 * @dev: the device where the fault has happened 1786 * @iova: the faulting address 1787 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) 1788 * 1789 * This function should be called by the low-level IOMMU implementations 1790 * whenever IOMMU faults happen, to allow high-level users, that are 1791 * interested in such events, to know about them. 1792 * 1793 * This event may be useful for several possible use cases: 1794 * - mere logging of the event 1795 * - dynamic TLB/PTE loading 1796 * - if restarting of the faulting device is required 1797 * 1798 * Returns 0 on success and an appropriate error code otherwise (if dynamic 1799 * PTE/TLB loading will one day be supported, implementations will be able 1800 * to tell whether it succeeded or not according to this return value). 1801 * 1802 * Specifically, -ENOSYS is returned if a fault handler isn't installed 1803 * (though fault handlers can also return -ENOSYS, in case they want to 1804 * elicit the default behavior of the IOMMU drivers). 1805 */ 1806 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 1807 unsigned long iova, int flags) 1808 { 1809 int ret = -ENOSYS; 1810 1811 /* 1812 * if upper layers showed interest and installed a fault handler, 1813 * invoke it. 1814 */ 1815 if (domain->handler) 1816 ret = domain->handler(domain, dev, iova, flags, 1817 domain->handler_token); 1818 1819 trace_io_page_fault(dev, iova, flags); 1820 return ret; 1821 } 1822 EXPORT_SYMBOL_GPL(report_iommu_fault); 1823 1824 static int __init iommu_init(void) 1825 { 1826 iommu_group_kset = kset_create_and_add("iommu_groups", 1827 NULL, kernel_kobj); 1828 BUG_ON(!iommu_group_kset); 1829 1830 iommu_debugfs_setup(); 1831 1832 return 0; 1833 } 1834 core_initcall(iommu_init); 1835 1836 int iommu_domain_get_attr(struct iommu_domain *domain, 1837 enum iommu_attr attr, void *data) 1838 { 1839 struct iommu_domain_geometry *geometry; 1840 bool *paging; 1841 int ret = 0; 1842 1843 switch (attr) { 1844 case DOMAIN_ATTR_GEOMETRY: 1845 geometry = data; 1846 *geometry = domain->geometry; 1847 1848 break; 1849 case DOMAIN_ATTR_PAGING: 1850 paging = data; 1851 *paging = (domain->pgsize_bitmap != 0UL); 1852 break; 1853 default: 1854 if (!domain->ops->domain_get_attr) 1855 return -EINVAL; 1856 1857 ret = domain->ops->domain_get_attr(domain, attr, data); 1858 } 1859 1860 return ret; 1861 } 1862 EXPORT_SYMBOL_GPL(iommu_domain_get_attr); 1863 1864 int iommu_domain_set_attr(struct iommu_domain *domain, 1865 enum iommu_attr attr, void *data) 1866 { 1867 int ret = 0; 1868 1869 switch (attr) { 1870 default: 1871 if (domain->ops->domain_set_attr == NULL) 1872 return -EINVAL; 1873 1874 ret = domain->ops->domain_set_attr(domain, attr, data); 1875 } 1876 1877 return ret; 1878 } 1879 EXPORT_SYMBOL_GPL(iommu_domain_set_attr); 1880 1881 void iommu_get_resv_regions(struct device *dev, struct list_head *list) 1882 { 1883 const struct iommu_ops *ops = dev->bus->iommu_ops; 1884 1885 if (ops && ops->get_resv_regions) 1886 ops->get_resv_regions(dev, list); 1887 } 1888 1889 void iommu_put_resv_regions(struct device *dev, struct list_head *list) 1890 { 1891 const struct iommu_ops *ops = dev->bus->iommu_ops; 1892 1893 if (ops && ops->put_resv_regions) 1894 ops->put_resv_regions(dev, list); 1895 } 1896 1897 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 1898 size_t length, int prot, 1899 enum iommu_resv_type type) 1900 { 1901 struct iommu_resv_region *region; 1902 1903 region = kzalloc(sizeof(*region), GFP_KERNEL); 1904 if (!region) 1905 return NULL; 1906 1907 INIT_LIST_HEAD(®ion->list); 1908 region->start = start; 1909 region->length = length; 1910 region->prot = prot; 1911 region->type = type; 1912 return region; 1913 } 1914 1915 /* Request that a device is direct mapped by the IOMMU */ 1916 int iommu_request_dm_for_dev(struct device *dev) 1917 { 1918 struct iommu_domain *dm_domain; 1919 struct iommu_group *group; 1920 int ret; 1921 1922 /* Device must already be in a group before calling this function */ 1923 group = iommu_group_get_for_dev(dev); 1924 if (IS_ERR(group)) 1925 return PTR_ERR(group); 1926 1927 mutex_lock(&group->mutex); 1928 1929 /* Check if the default domain is already direct mapped */ 1930 ret = 0; 1931 if (group->default_domain && 1932 group->default_domain->type == IOMMU_DOMAIN_IDENTITY) 1933 goto out; 1934 1935 /* Don't change mappings of existing devices */ 1936 ret = -EBUSY; 1937 if (iommu_group_device_count(group) != 1) 1938 goto out; 1939 1940 /* Allocate a direct mapped domain */ 1941 ret = -ENOMEM; 1942 dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY); 1943 if (!dm_domain) 1944 goto out; 1945 1946 /* Attach the device to the domain */ 1947 ret = __iommu_attach_group(dm_domain, group); 1948 if (ret) { 1949 iommu_domain_free(dm_domain); 1950 goto out; 1951 } 1952 1953 /* Make the direct mapped domain the default for this group */ 1954 if (group->default_domain) 1955 iommu_domain_free(group->default_domain); 1956 group->default_domain = dm_domain; 1957 1958 dev_info(dev, "Using iommu direct mapping\n"); 1959 1960 ret = 0; 1961 out: 1962 mutex_unlock(&group->mutex); 1963 iommu_group_put(group); 1964 1965 return ret; 1966 } 1967 1968 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 1969 { 1970 const struct iommu_ops *ops = NULL; 1971 struct iommu_device *iommu; 1972 1973 spin_lock(&iommu_device_lock); 1974 list_for_each_entry(iommu, &iommu_device_list, list) 1975 if (iommu->fwnode == fwnode) { 1976 ops = iommu->ops; 1977 break; 1978 } 1979 spin_unlock(&iommu_device_lock); 1980 return ops; 1981 } 1982 1983 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 1984 const struct iommu_ops *ops) 1985 { 1986 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1987 1988 if (fwspec) 1989 return ops == fwspec->ops ? 0 : -EINVAL; 1990 1991 fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); 1992 if (!fwspec) 1993 return -ENOMEM; 1994 1995 of_node_get(to_of_node(iommu_fwnode)); 1996 fwspec->iommu_fwnode = iommu_fwnode; 1997 fwspec->ops = ops; 1998 dev_iommu_fwspec_set(dev, fwspec); 1999 return 0; 2000 } 2001 EXPORT_SYMBOL_GPL(iommu_fwspec_init); 2002 2003 void iommu_fwspec_free(struct device *dev) 2004 { 2005 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2006 2007 if (fwspec) { 2008 fwnode_handle_put(fwspec->iommu_fwnode); 2009 kfree(fwspec); 2010 dev_iommu_fwspec_set(dev, NULL); 2011 } 2012 } 2013 EXPORT_SYMBOL_GPL(iommu_fwspec_free); 2014 2015 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) 2016 { 2017 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2018 size_t size; 2019 int i; 2020 2021 if (!fwspec) 2022 return -EINVAL; 2023 2024 size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]); 2025 if (size > sizeof(*fwspec)) { 2026 fwspec = krealloc(fwspec, size, GFP_KERNEL); 2027 if (!fwspec) 2028 return -ENOMEM; 2029 2030 dev_iommu_fwspec_set(dev, fwspec); 2031 } 2032 2033 for (i = 0; i < num_ids; i++) 2034 fwspec->ids[fwspec->num_ids + i] = ids[i]; 2035 2036 fwspec->num_ids += num_ids; 2037 return 0; 2038 } 2039 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 2040