1 /* 2 * Copyright (C) 2016, Semihalf 3 * Author: Tomasz Nowicki <tn@semihalf.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * This file implements early detection/parsing of I/O mapping 15 * reported to OS through firmware via I/O Remapping Table (IORT) 16 * IORT document number: ARM DEN 0049A 17 */ 18 19 #define pr_fmt(fmt) "ACPI: IORT: " fmt 20 21 #include <linux/acpi_iort.h> 22 #include <linux/iommu.h> 23 #include <linux/kernel.h> 24 #include <linux/list.h> 25 #include <linux/pci.h> 26 #include <linux/platform_device.h> 27 #include <linux/slab.h> 28 29 #define IORT_TYPE_MASK(type) (1 << (type)) 30 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) 31 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ 32 (1 << ACPI_IORT_NODE_SMMU_V3)) 33 34 struct iort_its_msi_chip { 35 struct list_head list; 36 struct fwnode_handle *fw_node; 37 u32 translation_id; 38 }; 39 40 struct iort_fwnode { 41 struct list_head list; 42 struct acpi_iort_node *iort_node; 43 struct fwnode_handle *fwnode; 44 }; 45 static LIST_HEAD(iort_fwnode_list); 46 static DEFINE_SPINLOCK(iort_fwnode_lock); 47 48 /** 49 * iort_set_fwnode() - Create iort_fwnode and use it to register 50 * iommu data in the iort_fwnode_list 51 * 52 * @node: IORT table node associated with the IOMMU 53 * @fwnode: fwnode associated with the IORT node 54 * 55 * Returns: 0 on success 56 * <0 on failure 57 */ 58 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node, 59 struct fwnode_handle *fwnode) 60 { 61 struct iort_fwnode *np; 62 63 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC); 64 65 if (WARN_ON(!np)) 66 return -ENOMEM; 67 68 INIT_LIST_HEAD(&np->list); 69 np->iort_node = iort_node; 70 np->fwnode = fwnode; 71 72 spin_lock(&iort_fwnode_lock); 73 list_add_tail(&np->list, &iort_fwnode_list); 74 spin_unlock(&iort_fwnode_lock); 75 76 return 0; 77 } 78 79 /** 80 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node 81 * 82 * @node: IORT table node to be looked-up 83 * 84 * Returns: fwnode_handle pointer on success, NULL on failure 85 */ 86 static inline 87 struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node) 88 { 89 struct iort_fwnode *curr; 90 struct fwnode_handle *fwnode = NULL; 91 92 spin_lock(&iort_fwnode_lock); 93 list_for_each_entry(curr, &iort_fwnode_list, list) { 94 if (curr->iort_node == node) { 95 fwnode = curr->fwnode; 96 break; 97 } 98 } 99 spin_unlock(&iort_fwnode_lock); 100 101 return fwnode; 102 } 103 104 /** 105 * iort_delete_fwnode() - Delete fwnode associated with an IORT node 106 * 107 * @node: IORT table node associated with fwnode to delete 108 */ 109 static inline void iort_delete_fwnode(struct acpi_iort_node *node) 110 { 111 struct iort_fwnode *curr, *tmp; 112 113 spin_lock(&iort_fwnode_lock); 114 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) { 115 if (curr->iort_node == node) { 116 list_del(&curr->list); 117 kfree(curr); 118 break; 119 } 120 } 121 spin_unlock(&iort_fwnode_lock); 122 } 123 124 typedef acpi_status (*iort_find_node_callback) 125 (struct acpi_iort_node *node, void *context); 126 127 /* Root pointer to the mapped IORT table */ 128 static struct acpi_table_header *iort_table; 129 130 static LIST_HEAD(iort_msi_chip_list); 131 static DEFINE_SPINLOCK(iort_msi_chip_lock); 132 133 /** 134 * iort_register_domain_token() - register domain token and related ITS ID 135 * to the list from where we can get it back later on. 136 * @trans_id: ITS ID. 137 * @fw_node: Domain token. 138 * 139 * Returns: 0 on success, -ENOMEM if no memory when allocating list element 140 */ 141 int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node) 142 { 143 struct iort_its_msi_chip *its_msi_chip; 144 145 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL); 146 if (!its_msi_chip) 147 return -ENOMEM; 148 149 its_msi_chip->fw_node = fw_node; 150 its_msi_chip->translation_id = trans_id; 151 152 spin_lock(&iort_msi_chip_lock); 153 list_add(&its_msi_chip->list, &iort_msi_chip_list); 154 spin_unlock(&iort_msi_chip_lock); 155 156 return 0; 157 } 158 159 /** 160 * iort_deregister_domain_token() - Deregister domain token based on ITS ID 161 * @trans_id: ITS ID. 162 * 163 * Returns: none. 164 */ 165 void iort_deregister_domain_token(int trans_id) 166 { 167 struct iort_its_msi_chip *its_msi_chip, *t; 168 169 spin_lock(&iort_msi_chip_lock); 170 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) { 171 if (its_msi_chip->translation_id == trans_id) { 172 list_del(&its_msi_chip->list); 173 kfree(its_msi_chip); 174 break; 175 } 176 } 177 spin_unlock(&iort_msi_chip_lock); 178 } 179 180 /** 181 * iort_find_domain_token() - Find domain token based on given ITS ID 182 * @trans_id: ITS ID. 183 * 184 * Returns: domain token when find on the list, NULL otherwise 185 */ 186 struct fwnode_handle *iort_find_domain_token(int trans_id) 187 { 188 struct fwnode_handle *fw_node = NULL; 189 struct iort_its_msi_chip *its_msi_chip; 190 191 spin_lock(&iort_msi_chip_lock); 192 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 193 if (its_msi_chip->translation_id == trans_id) { 194 fw_node = its_msi_chip->fw_node; 195 break; 196 } 197 } 198 spin_unlock(&iort_msi_chip_lock); 199 200 return fw_node; 201 } 202 203 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type, 204 iort_find_node_callback callback, 205 void *context) 206 { 207 struct acpi_iort_node *iort_node, *iort_end; 208 struct acpi_table_iort *iort; 209 int i; 210 211 if (!iort_table) 212 return NULL; 213 214 /* Get the first IORT node */ 215 iort = (struct acpi_table_iort *)iort_table; 216 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 217 iort->node_offset); 218 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 219 iort_table->length); 220 221 for (i = 0; i < iort->node_count; i++) { 222 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, 223 "IORT node pointer overflows, bad table!\n")) 224 return NULL; 225 226 if (iort_node->type == type && 227 ACPI_SUCCESS(callback(iort_node, context))) 228 return iort_node; 229 230 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 231 iort_node->length); 232 } 233 234 return NULL; 235 } 236 237 static acpi_status 238 iort_match_type_callback(struct acpi_iort_node *node, void *context) 239 { 240 return AE_OK; 241 } 242 243 bool iort_node_match(u8 type) 244 { 245 struct acpi_iort_node *node; 246 247 node = iort_scan_node(type, iort_match_type_callback, NULL); 248 249 return node != NULL; 250 } 251 252 static acpi_status iort_match_node_callback(struct acpi_iort_node *node, 253 void *context) 254 { 255 struct device *dev = context; 256 acpi_status status = AE_NOT_FOUND; 257 258 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { 259 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 260 struct acpi_device *adev = to_acpi_device_node(dev->fwnode); 261 struct acpi_iort_named_component *ncomp; 262 263 if (!adev) 264 goto out; 265 266 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); 267 if (ACPI_FAILURE(status)) { 268 dev_warn(dev, "Can't get device full path name\n"); 269 goto out; 270 } 271 272 ncomp = (struct acpi_iort_named_component *)node->node_data; 273 status = !strcmp(ncomp->device_name, buf.pointer) ? 274 AE_OK : AE_NOT_FOUND; 275 acpi_os_free(buf.pointer); 276 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 277 struct acpi_iort_root_complex *pci_rc; 278 struct pci_bus *bus; 279 280 bus = to_pci_bus(dev); 281 pci_rc = (struct acpi_iort_root_complex *)node->node_data; 282 283 /* 284 * It is assumed that PCI segment numbers maps one-to-one 285 * with root complexes. Each segment number can represent only 286 * one root complex. 287 */ 288 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ? 289 AE_OK : AE_NOT_FOUND; 290 } 291 out: 292 return status; 293 } 294 295 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, 296 u32 *rid_out) 297 { 298 /* Single mapping does not care for input id */ 299 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 300 if (type == ACPI_IORT_NODE_NAMED_COMPONENT || 301 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 302 *rid_out = map->output_base; 303 return 0; 304 } 305 306 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n", 307 map, type); 308 return -ENXIO; 309 } 310 311 if (rid_in < map->input_base || 312 (rid_in >= map->input_base + map->id_count)) 313 return -ENXIO; 314 315 *rid_out = map->output_base + (rid_in - map->input_base); 316 return 0; 317 } 318 319 static 320 struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, 321 u32 *id_out, int index) 322 { 323 struct acpi_iort_node *parent; 324 struct acpi_iort_id_mapping *map; 325 326 if (!node->mapping_offset || !node->mapping_count || 327 index >= node->mapping_count) 328 return NULL; 329 330 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 331 node->mapping_offset + index * sizeof(*map)); 332 333 /* Firmware bug! */ 334 if (!map->output_reference) { 335 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 336 node, node->type); 337 return NULL; 338 } 339 340 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 341 map->output_reference); 342 343 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 344 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || 345 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 346 *id_out = map->output_base; 347 return parent; 348 } 349 } 350 351 return NULL; 352 } 353 354 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, 355 u32 id_in, u32 *id_out, 356 u8 type_mask) 357 { 358 u32 id = id_in; 359 360 /* Parse the ID mapping tree to find specified node type */ 361 while (node) { 362 struct acpi_iort_id_mapping *map; 363 int i; 364 365 if (IORT_TYPE_MASK(node->type) & type_mask) { 366 if (id_out) 367 *id_out = id; 368 return node; 369 } 370 371 if (!node->mapping_offset || !node->mapping_count) 372 goto fail_map; 373 374 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 375 node->mapping_offset); 376 377 /* Firmware bug! */ 378 if (!map->output_reference) { 379 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 380 node, node->type); 381 goto fail_map; 382 } 383 384 /* Do the ID translation */ 385 for (i = 0; i < node->mapping_count; i++, map++) { 386 if (!iort_id_map(map, node->type, id, &id)) 387 break; 388 } 389 390 if (i == node->mapping_count) 391 goto fail_map; 392 393 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 394 map->output_reference); 395 } 396 397 fail_map: 398 /* Map input ID to output ID unchanged on mapping failure */ 399 if (id_out) 400 *id_out = id_in; 401 402 return NULL; 403 } 404 405 static 406 struct acpi_iort_node *iort_node_map_platform_id(struct acpi_iort_node *node, 407 u32 *id_out, u8 type_mask, 408 int index) 409 { 410 struct acpi_iort_node *parent; 411 u32 id; 412 413 /* step 1: retrieve the initial dev id */ 414 parent = iort_node_get_id(node, &id, index); 415 if (!parent) 416 return NULL; 417 418 /* 419 * optional step 2: map the initial dev id if its parent is not 420 * the target type we want, map it again for the use cases such 421 * as NC (named component) -> SMMU -> ITS. If the type is matched, 422 * return the initial dev id and its parent pointer directly. 423 */ 424 if (!(IORT_TYPE_MASK(parent->type) & type_mask)) 425 parent = iort_node_map_id(parent, id, id_out, type_mask); 426 else 427 if (id_out) 428 *id_out = id; 429 430 return parent; 431 } 432 433 static struct acpi_iort_node *iort_find_dev_node(struct device *dev) 434 { 435 struct pci_bus *pbus; 436 437 if (!dev_is_pci(dev)) 438 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 439 iort_match_node_callback, dev); 440 441 /* Find a PCI root bus */ 442 pbus = to_pci_dev(dev)->bus; 443 while (!pci_is_root_bus(pbus)) 444 pbus = pbus->parent; 445 446 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 447 iort_match_node_callback, &pbus->dev); 448 } 449 450 /** 451 * iort_msi_map_rid() - Map a MSI requester ID for a device 452 * @dev: The device for which the mapping is to be done. 453 * @req_id: The device requester ID. 454 * 455 * Returns: mapped MSI RID on success, input requester ID otherwise 456 */ 457 u32 iort_msi_map_rid(struct device *dev, u32 req_id) 458 { 459 struct acpi_iort_node *node; 460 u32 dev_id; 461 462 node = iort_find_dev_node(dev); 463 if (!node) 464 return req_id; 465 466 iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); 467 return dev_id; 468 } 469 470 /** 471 * iort_pmsi_get_dev_id() - Get the device id for a device 472 * @dev: The device for which the mapping is to be done. 473 * @dev_id: The device ID found. 474 * 475 * Returns: 0 for successful find a dev id, -ENODEV on error 476 */ 477 int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) 478 { 479 int i; 480 struct acpi_iort_node *node; 481 482 node = iort_find_dev_node(dev); 483 if (!node) 484 return -ENODEV; 485 486 for (i = 0; i < node->mapping_count; i++) { 487 if (iort_node_map_platform_id(node, dev_id, IORT_MSI_TYPE, i)) 488 return 0; 489 } 490 491 return -ENODEV; 492 } 493 494 /** 495 * iort_dev_find_its_id() - Find the ITS identifier for a device 496 * @dev: The device. 497 * @req_id: Device's requester ID 498 * @idx: Index of the ITS identifier list. 499 * @its_id: ITS identifier. 500 * 501 * Returns: 0 on success, appropriate error value otherwise 502 */ 503 static int iort_dev_find_its_id(struct device *dev, u32 req_id, 504 unsigned int idx, int *its_id) 505 { 506 struct acpi_iort_its_group *its; 507 struct acpi_iort_node *node; 508 509 node = iort_find_dev_node(dev); 510 if (!node) 511 return -ENXIO; 512 513 node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); 514 if (!node) 515 return -ENXIO; 516 517 /* Move to ITS specific data */ 518 its = (struct acpi_iort_its_group *)node->node_data; 519 if (idx > its->its_count) { 520 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", 521 idx, its->its_count); 522 return -ENXIO; 523 } 524 525 *its_id = its->identifiers[idx]; 526 return 0; 527 } 528 529 /** 530 * iort_get_device_domain() - Find MSI domain related to a device 531 * @dev: The device. 532 * @req_id: Requester ID for the device. 533 * 534 * Returns: the MSI domain for this device, NULL otherwise 535 */ 536 struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) 537 { 538 struct fwnode_handle *handle; 539 int its_id; 540 541 if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) 542 return NULL; 543 544 handle = iort_find_domain_token(its_id); 545 if (!handle) 546 return NULL; 547 548 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); 549 } 550 551 /** 552 * iort_get_platform_device_domain() - Find MSI domain related to a 553 * platform device 554 * @dev: the dev pointer associated with the platform device 555 * 556 * Returns: the MSI domain for this device, NULL otherwise 557 */ 558 static struct irq_domain *iort_get_platform_device_domain(struct device *dev) 559 { 560 struct acpi_iort_node *node, *msi_parent; 561 struct fwnode_handle *iort_fwnode; 562 struct acpi_iort_its_group *its; 563 int i; 564 565 /* find its associated iort node */ 566 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 567 iort_match_node_callback, dev); 568 if (!node) 569 return NULL; 570 571 /* then find its msi parent node */ 572 for (i = 0; i < node->mapping_count; i++) { 573 msi_parent = iort_node_map_platform_id(node, NULL, 574 IORT_MSI_TYPE, i); 575 if (msi_parent) 576 break; 577 } 578 579 if (!msi_parent) 580 return NULL; 581 582 /* Move to ITS specific data */ 583 its = (struct acpi_iort_its_group *)msi_parent->node_data; 584 585 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 586 if (!iort_fwnode) 587 return NULL; 588 589 return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 590 } 591 592 void acpi_configure_pmsi_domain(struct device *dev) 593 { 594 struct irq_domain *msi_domain; 595 596 msi_domain = iort_get_platform_device_domain(dev); 597 if (msi_domain) 598 dev_set_msi_domain(dev, msi_domain); 599 } 600 601 static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) 602 { 603 u32 *rid = data; 604 605 *rid = alias; 606 return 0; 607 } 608 609 static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, 610 struct fwnode_handle *fwnode, 611 const struct iommu_ops *ops) 612 { 613 int ret = iommu_fwspec_init(dev, fwnode, ops); 614 615 if (!ret) 616 ret = iommu_fwspec_add_ids(dev, &streamid, 1); 617 618 return ret; 619 } 620 621 static inline bool iort_iommu_driver_enabled(u8 type) 622 { 623 switch (type) { 624 case ACPI_IORT_NODE_SMMU_V3: 625 return IS_BUILTIN(CONFIG_ARM_SMMU_V3); 626 case ACPI_IORT_NODE_SMMU: 627 return IS_BUILTIN(CONFIG_ARM_SMMU); 628 default: 629 pr_warn("IORT node type %u does not describe an SMMU\n", type); 630 return false; 631 } 632 } 633 634 #ifdef CONFIG_IOMMU_API 635 static inline 636 const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) 637 { 638 return (fwspec && fwspec->ops) ? fwspec->ops : NULL; 639 } 640 641 static inline 642 int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) 643 { 644 int err = 0; 645 646 if (!IS_ERR_OR_NULL(ops) && ops->add_device && dev->bus && 647 !dev->iommu_group) 648 err = ops->add_device(dev); 649 650 return err; 651 } 652 #else 653 static inline 654 const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) 655 { return NULL; } 656 static inline 657 int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) 658 { return 0; } 659 #endif 660 661 static const struct iommu_ops *iort_iommu_xlate(struct device *dev, 662 struct acpi_iort_node *node, 663 u32 streamid) 664 { 665 const struct iommu_ops *ops = NULL; 666 int ret = -ENODEV; 667 struct fwnode_handle *iort_fwnode; 668 669 if (node) { 670 iort_fwnode = iort_get_fwnode(node); 671 if (!iort_fwnode) 672 return NULL; 673 674 ops = iommu_ops_from_fwnode(iort_fwnode); 675 /* 676 * If the ops look-up fails, this means that either 677 * the SMMU drivers have not been probed yet or that 678 * the SMMU drivers are not built in the kernel; 679 * Depending on whether the SMMU drivers are built-in 680 * in the kernel or not, defer the IOMMU configuration 681 * or just abort it. 682 */ 683 if (!ops) 684 return iort_iommu_driver_enabled(node->type) ? 685 ERR_PTR(-EPROBE_DEFER) : NULL; 686 687 ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); 688 } 689 690 return ret ? NULL : ops; 691 } 692 693 /** 694 * iort_set_dma_mask - Set-up dma mask for a device. 695 * 696 * @dev: device to configure 697 */ 698 void iort_set_dma_mask(struct device *dev) 699 { 700 /* 701 * Set default coherent_dma_mask to 32 bit. Drivers are expected to 702 * setup the correct supported mask. 703 */ 704 if (!dev->coherent_dma_mask) 705 dev->coherent_dma_mask = DMA_BIT_MASK(32); 706 707 /* 708 * Set it to coherent_dma_mask by default if the architecture 709 * code has not set it. 710 */ 711 if (!dev->dma_mask) 712 dev->dma_mask = &dev->coherent_dma_mask; 713 } 714 715 /** 716 * iort_iommu_configure - Set-up IOMMU configuration for a device. 717 * 718 * @dev: device to configure 719 * 720 * Returns: iommu_ops pointer on configuration success 721 * NULL on configuration failure 722 */ 723 const struct iommu_ops *iort_iommu_configure(struct device *dev) 724 { 725 struct acpi_iort_node *node, *parent; 726 const struct iommu_ops *ops = NULL; 727 u32 streamid = 0; 728 int err; 729 730 /* 731 * If we already translated the fwspec there 732 * is nothing left to do, return the iommu_ops. 733 */ 734 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); 735 if (ops) 736 return ops; 737 738 if (dev_is_pci(dev)) { 739 struct pci_bus *bus = to_pci_dev(dev)->bus; 740 u32 rid; 741 742 pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid, 743 &rid); 744 745 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 746 iort_match_node_callback, &bus->dev); 747 if (!node) 748 return NULL; 749 750 parent = iort_node_map_id(node, rid, &streamid, 751 IORT_IOMMU_TYPE); 752 753 ops = iort_iommu_xlate(dev, parent, streamid); 754 755 } else { 756 int i = 0; 757 758 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 759 iort_match_node_callback, dev); 760 if (!node) 761 return NULL; 762 763 parent = iort_node_map_platform_id(node, &streamid, 764 IORT_IOMMU_TYPE, i++); 765 766 while (parent) { 767 ops = iort_iommu_xlate(dev, parent, streamid); 768 if (IS_ERR_OR_NULL(ops)) 769 return ops; 770 771 parent = iort_node_map_platform_id(node, &streamid, 772 IORT_IOMMU_TYPE, 773 i++); 774 } 775 } 776 777 /* 778 * If we have reason to believe the IOMMU driver missed the initial 779 * add_device callback for dev, replay it to get things in order. 780 */ 781 err = iort_add_device_replay(ops, dev); 782 if (err) 783 ops = ERR_PTR(err); 784 785 /* Ignore all other errors apart from EPROBE_DEFER */ 786 if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) { 787 dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops)); 788 ops = NULL; 789 } 790 791 return ops; 792 } 793 794 static void __init acpi_iort_register_irq(int hwirq, const char *name, 795 int trigger, 796 struct resource *res) 797 { 798 int irq = acpi_register_gsi(NULL, hwirq, trigger, 799 ACPI_ACTIVE_HIGH); 800 801 if (irq <= 0) { 802 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq, 803 name); 804 return; 805 } 806 807 res->start = irq; 808 res->end = irq; 809 res->flags = IORESOURCE_IRQ; 810 res->name = name; 811 } 812 813 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) 814 { 815 struct acpi_iort_smmu_v3 *smmu; 816 /* Always present mem resource */ 817 int num_res = 1; 818 819 /* Retrieve SMMUv3 specific data */ 820 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 821 822 if (smmu->event_gsiv) 823 num_res++; 824 825 if (smmu->pri_gsiv) 826 num_res++; 827 828 if (smmu->gerr_gsiv) 829 num_res++; 830 831 if (smmu->sync_gsiv) 832 num_res++; 833 834 return num_res; 835 } 836 837 static void __init arm_smmu_v3_init_resources(struct resource *res, 838 struct acpi_iort_node *node) 839 { 840 struct acpi_iort_smmu_v3 *smmu; 841 int num_res = 0; 842 843 /* Retrieve SMMUv3 specific data */ 844 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 845 846 res[num_res].start = smmu->base_address; 847 res[num_res].end = smmu->base_address + SZ_128K - 1; 848 res[num_res].flags = IORESOURCE_MEM; 849 850 num_res++; 851 852 if (smmu->event_gsiv) 853 acpi_iort_register_irq(smmu->event_gsiv, "eventq", 854 ACPI_EDGE_SENSITIVE, 855 &res[num_res++]); 856 857 if (smmu->pri_gsiv) 858 acpi_iort_register_irq(smmu->pri_gsiv, "priq", 859 ACPI_EDGE_SENSITIVE, 860 &res[num_res++]); 861 862 if (smmu->gerr_gsiv) 863 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", 864 ACPI_EDGE_SENSITIVE, 865 &res[num_res++]); 866 867 if (smmu->sync_gsiv) 868 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", 869 ACPI_EDGE_SENSITIVE, 870 &res[num_res++]); 871 } 872 873 static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) 874 { 875 struct acpi_iort_smmu_v3 *smmu; 876 877 /* Retrieve SMMUv3 specific data */ 878 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 879 880 return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE; 881 } 882 883 static int __init arm_smmu_count_resources(struct acpi_iort_node *node) 884 { 885 struct acpi_iort_smmu *smmu; 886 887 /* Retrieve SMMU specific data */ 888 smmu = (struct acpi_iort_smmu *)node->node_data; 889 890 /* 891 * Only consider the global fault interrupt and ignore the 892 * configuration access interrupt. 893 * 894 * MMIO address and global fault interrupt resources are always 895 * present so add them to the context interrupt count as a static 896 * value. 897 */ 898 return smmu->context_interrupt_count + 2; 899 } 900 901 static void __init arm_smmu_init_resources(struct resource *res, 902 struct acpi_iort_node *node) 903 { 904 struct acpi_iort_smmu *smmu; 905 int i, hw_irq, trigger, num_res = 0; 906 u64 *ctx_irq, *glb_irq; 907 908 /* Retrieve SMMU specific data */ 909 smmu = (struct acpi_iort_smmu *)node->node_data; 910 911 res[num_res].start = smmu->base_address; 912 res[num_res].end = smmu->base_address + smmu->span - 1; 913 res[num_res].flags = IORESOURCE_MEM; 914 num_res++; 915 916 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); 917 /* Global IRQs */ 918 hw_irq = IORT_IRQ_MASK(glb_irq[0]); 919 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]); 920 921 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, 922 &res[num_res++]); 923 924 /* Context IRQs */ 925 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); 926 for (i = 0; i < smmu->context_interrupt_count; i++) { 927 hw_irq = IORT_IRQ_MASK(ctx_irq[i]); 928 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]); 929 930 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, 931 &res[num_res++]); 932 } 933 } 934 935 static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node) 936 { 937 struct acpi_iort_smmu *smmu; 938 939 /* Retrieve SMMU specific data */ 940 smmu = (struct acpi_iort_smmu *)node->node_data; 941 942 return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK; 943 } 944 945 struct iort_iommu_config { 946 const char *name; 947 int (*iommu_init)(struct acpi_iort_node *node); 948 bool (*iommu_is_coherent)(struct acpi_iort_node *node); 949 int (*iommu_count_resources)(struct acpi_iort_node *node); 950 void (*iommu_init_resources)(struct resource *res, 951 struct acpi_iort_node *node); 952 }; 953 954 static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = { 955 .name = "arm-smmu-v3", 956 .iommu_is_coherent = arm_smmu_v3_is_coherent, 957 .iommu_count_resources = arm_smmu_v3_count_resources, 958 .iommu_init_resources = arm_smmu_v3_init_resources 959 }; 960 961 static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = { 962 .name = "arm-smmu", 963 .iommu_is_coherent = arm_smmu_is_coherent, 964 .iommu_count_resources = arm_smmu_count_resources, 965 .iommu_init_resources = arm_smmu_init_resources 966 }; 967 968 static __init 969 const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node) 970 { 971 switch (node->type) { 972 case ACPI_IORT_NODE_SMMU_V3: 973 return &iort_arm_smmu_v3_cfg; 974 case ACPI_IORT_NODE_SMMU: 975 return &iort_arm_smmu_cfg; 976 default: 977 return NULL; 978 } 979 } 980 981 /** 982 * iort_add_smmu_platform_device() - Allocate a platform device for SMMU 983 * @node: Pointer to SMMU ACPI IORT node 984 * 985 * Returns: 0 on success, <0 failure 986 */ 987 static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node) 988 { 989 struct fwnode_handle *fwnode; 990 struct platform_device *pdev; 991 struct resource *r; 992 enum dev_dma_attr attr; 993 int ret, count; 994 const struct iort_iommu_config *ops = iort_get_iommu_cfg(node); 995 996 if (!ops) 997 return -ENODEV; 998 999 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); 1000 if (!pdev) 1001 return -ENOMEM; 1002 1003 count = ops->iommu_count_resources(node); 1004 1005 r = kcalloc(count, sizeof(*r), GFP_KERNEL); 1006 if (!r) { 1007 ret = -ENOMEM; 1008 goto dev_put; 1009 } 1010 1011 ops->iommu_init_resources(r, node); 1012 1013 ret = platform_device_add_resources(pdev, r, count); 1014 /* 1015 * Resources are duplicated in platform_device_add_resources, 1016 * free their allocated memory 1017 */ 1018 kfree(r); 1019 1020 if (ret) 1021 goto dev_put; 1022 1023 /* 1024 * Add a copy of IORT node pointer to platform_data to 1025 * be used to retrieve IORT data information. 1026 */ 1027 ret = platform_device_add_data(pdev, &node, sizeof(node)); 1028 if (ret) 1029 goto dev_put; 1030 1031 /* 1032 * We expect the dma masks to be equivalent for 1033 * all SMMUs set-ups 1034 */ 1035 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 1036 1037 fwnode = iort_get_fwnode(node); 1038 1039 if (!fwnode) { 1040 ret = -ENODEV; 1041 goto dev_put; 1042 } 1043 1044 pdev->dev.fwnode = fwnode; 1045 1046 attr = ops->iommu_is_coherent(node) ? 1047 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; 1048 1049 /* Configure DMA for the page table walker */ 1050 acpi_dma_configure(&pdev->dev, attr); 1051 1052 ret = platform_device_add(pdev); 1053 if (ret) 1054 goto dma_deconfigure; 1055 1056 return 0; 1057 1058 dma_deconfigure: 1059 acpi_dma_deconfigure(&pdev->dev); 1060 dev_put: 1061 platform_device_put(pdev); 1062 1063 return ret; 1064 } 1065 1066 static void __init iort_init_platform_devices(void) 1067 { 1068 struct acpi_iort_node *iort_node, *iort_end; 1069 struct acpi_table_iort *iort; 1070 struct fwnode_handle *fwnode; 1071 int i, ret; 1072 1073 /* 1074 * iort_table and iort both point to the start of IORT table, but 1075 * have different struct types 1076 */ 1077 iort = (struct acpi_table_iort *)iort_table; 1078 1079 /* Get the first IORT node */ 1080 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1081 iort->node_offset); 1082 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1083 iort_table->length); 1084 1085 for (i = 0; i < iort->node_count; i++) { 1086 if (iort_node >= iort_end) { 1087 pr_err("iort node pointer overflows, bad table\n"); 1088 return; 1089 } 1090 1091 if ((iort_node->type == ACPI_IORT_NODE_SMMU) || 1092 (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) { 1093 1094 fwnode = acpi_alloc_fwnode_static(); 1095 if (!fwnode) 1096 return; 1097 1098 iort_set_fwnode(iort_node, fwnode); 1099 1100 ret = iort_add_smmu_platform_device(iort_node); 1101 if (ret) { 1102 iort_delete_fwnode(iort_node); 1103 acpi_free_fwnode_static(fwnode); 1104 return; 1105 } 1106 } 1107 1108 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 1109 iort_node->length); 1110 } 1111 } 1112 1113 void __init acpi_iort_init(void) 1114 { 1115 acpi_status status; 1116 1117 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table); 1118 if (ACPI_FAILURE(status)) { 1119 if (status != AE_NOT_FOUND) { 1120 const char *msg = acpi_format_exception(status); 1121 1122 pr_err("Failed to get table, %s\n", msg); 1123 } 1124 1125 return; 1126 } 1127 1128 iort_init_platform_devices(); 1129 } 1130