1 /* 2 * Copyright (C) 2016, Semihalf 3 * Author: Tomasz Nowicki <tn@semihalf.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * This file implements early detection/parsing of I/O mapping 15 * reported to OS through firmware via I/O Remapping Table (IORT) 16 * IORT document number: ARM DEN 0049A 17 */ 18 19 #define pr_fmt(fmt) "ACPI: IORT: " fmt 20 21 #include <linux/acpi_iort.h> 22 #include <linux/iommu.h> 23 #include <linux/kernel.h> 24 #include <linux/list.h> 25 #include <linux/pci.h> 26 #include <linux/platform_device.h> 27 #include <linux/slab.h> 28 29 #define IORT_TYPE_MASK(type) (1 << (type)) 30 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) 31 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ 32 (1 << ACPI_IORT_NODE_SMMU_V3)) 33 34 /* Until ACPICA headers cover IORT rev. C */ 35 #ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 36 #define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2 37 #endif 38 39 struct iort_its_msi_chip { 40 struct list_head list; 41 struct fwnode_handle *fw_node; 42 u32 translation_id; 43 }; 44 45 struct iort_fwnode { 46 struct list_head list; 47 struct acpi_iort_node *iort_node; 48 struct fwnode_handle *fwnode; 49 }; 50 static LIST_HEAD(iort_fwnode_list); 51 static DEFINE_SPINLOCK(iort_fwnode_lock); 52 53 /** 54 * iort_set_fwnode() - Create iort_fwnode and use it to register 55 * iommu data in the iort_fwnode_list 56 * 57 * @node: IORT table node associated with the IOMMU 58 * @fwnode: fwnode associated with the IORT node 59 * 60 * Returns: 0 on success 61 * <0 on failure 62 */ 63 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node, 64 struct fwnode_handle *fwnode) 65 { 66 struct iort_fwnode *np; 67 68 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC); 69 70 if (WARN_ON(!np)) 71 return -ENOMEM; 72 73 INIT_LIST_HEAD(&np->list); 74 np->iort_node = iort_node; 75 np->fwnode = fwnode; 76 77 spin_lock(&iort_fwnode_lock); 78 list_add_tail(&np->list, &iort_fwnode_list); 79 spin_unlock(&iort_fwnode_lock); 80 81 return 0; 82 } 83 84 /** 85 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node 86 * 87 * @node: IORT table node to be looked-up 88 * 89 * Returns: fwnode_handle pointer on success, NULL on failure 90 */ 91 static inline 92 struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node) 93 { 94 struct iort_fwnode *curr; 95 struct fwnode_handle *fwnode = NULL; 96 97 spin_lock(&iort_fwnode_lock); 98 list_for_each_entry(curr, &iort_fwnode_list, list) { 99 if (curr->iort_node == node) { 100 fwnode = curr->fwnode; 101 break; 102 } 103 } 104 spin_unlock(&iort_fwnode_lock); 105 106 return fwnode; 107 } 108 109 /** 110 * iort_delete_fwnode() - Delete fwnode associated with an IORT node 111 * 112 * @node: IORT table node associated with fwnode to delete 113 */ 114 static inline void iort_delete_fwnode(struct acpi_iort_node *node) 115 { 116 struct iort_fwnode *curr, *tmp; 117 118 spin_lock(&iort_fwnode_lock); 119 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) { 120 if (curr->iort_node == node) { 121 list_del(&curr->list); 122 kfree(curr); 123 break; 124 } 125 } 126 spin_unlock(&iort_fwnode_lock); 127 } 128 129 typedef acpi_status (*iort_find_node_callback) 130 (struct acpi_iort_node *node, void *context); 131 132 /* Root pointer to the mapped IORT table */ 133 static struct acpi_table_header *iort_table; 134 135 static LIST_HEAD(iort_msi_chip_list); 136 static DEFINE_SPINLOCK(iort_msi_chip_lock); 137 138 /** 139 * iort_register_domain_token() - register domain token and related ITS ID 140 * to the list from where we can get it back later on. 141 * @trans_id: ITS ID. 142 * @fw_node: Domain token. 143 * 144 * Returns: 0 on success, -ENOMEM if no memory when allocating list element 145 */ 146 int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node) 147 { 148 struct iort_its_msi_chip *its_msi_chip; 149 150 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL); 151 if (!its_msi_chip) 152 return -ENOMEM; 153 154 its_msi_chip->fw_node = fw_node; 155 its_msi_chip->translation_id = trans_id; 156 157 spin_lock(&iort_msi_chip_lock); 158 list_add(&its_msi_chip->list, &iort_msi_chip_list); 159 spin_unlock(&iort_msi_chip_lock); 160 161 return 0; 162 } 163 164 /** 165 * iort_deregister_domain_token() - Deregister domain token based on ITS ID 166 * @trans_id: ITS ID. 167 * 168 * Returns: none. 169 */ 170 void iort_deregister_domain_token(int trans_id) 171 { 172 struct iort_its_msi_chip *its_msi_chip, *t; 173 174 spin_lock(&iort_msi_chip_lock); 175 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) { 176 if (its_msi_chip->translation_id == trans_id) { 177 list_del(&its_msi_chip->list); 178 kfree(its_msi_chip); 179 break; 180 } 181 } 182 spin_unlock(&iort_msi_chip_lock); 183 } 184 185 /** 186 * iort_find_domain_token() - Find domain token based on given ITS ID 187 * @trans_id: ITS ID. 188 * 189 * Returns: domain token when find on the list, NULL otherwise 190 */ 191 struct fwnode_handle *iort_find_domain_token(int trans_id) 192 { 193 struct fwnode_handle *fw_node = NULL; 194 struct iort_its_msi_chip *its_msi_chip; 195 196 spin_lock(&iort_msi_chip_lock); 197 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 198 if (its_msi_chip->translation_id == trans_id) { 199 fw_node = its_msi_chip->fw_node; 200 break; 201 } 202 } 203 spin_unlock(&iort_msi_chip_lock); 204 205 return fw_node; 206 } 207 208 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type, 209 iort_find_node_callback callback, 210 void *context) 211 { 212 struct acpi_iort_node *iort_node, *iort_end; 213 struct acpi_table_iort *iort; 214 int i; 215 216 if (!iort_table) 217 return NULL; 218 219 /* Get the first IORT node */ 220 iort = (struct acpi_table_iort *)iort_table; 221 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 222 iort->node_offset); 223 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 224 iort_table->length); 225 226 for (i = 0; i < iort->node_count; i++) { 227 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, 228 "IORT node pointer overflows, bad table!\n")) 229 return NULL; 230 231 if (iort_node->type == type && 232 ACPI_SUCCESS(callback(iort_node, context))) 233 return iort_node; 234 235 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 236 iort_node->length); 237 } 238 239 return NULL; 240 } 241 242 static acpi_status iort_match_node_callback(struct acpi_iort_node *node, 243 void *context) 244 { 245 struct device *dev = context; 246 acpi_status status = AE_NOT_FOUND; 247 248 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { 249 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 250 struct acpi_device *adev = to_acpi_device_node(dev->fwnode); 251 struct acpi_iort_named_component *ncomp; 252 253 if (!adev) 254 goto out; 255 256 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); 257 if (ACPI_FAILURE(status)) { 258 dev_warn(dev, "Can't get device full path name\n"); 259 goto out; 260 } 261 262 ncomp = (struct acpi_iort_named_component *)node->node_data; 263 status = !strcmp(ncomp->device_name, buf.pointer) ? 264 AE_OK : AE_NOT_FOUND; 265 acpi_os_free(buf.pointer); 266 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 267 struct acpi_iort_root_complex *pci_rc; 268 struct pci_bus *bus; 269 270 bus = to_pci_bus(dev); 271 pci_rc = (struct acpi_iort_root_complex *)node->node_data; 272 273 /* 274 * It is assumed that PCI segment numbers maps one-to-one 275 * with root complexes. Each segment number can represent only 276 * one root complex. 277 */ 278 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ? 279 AE_OK : AE_NOT_FOUND; 280 } 281 out: 282 return status; 283 } 284 285 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, 286 u32 *rid_out) 287 { 288 /* Single mapping does not care for input id */ 289 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 290 if (type == ACPI_IORT_NODE_NAMED_COMPONENT || 291 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 292 *rid_out = map->output_base; 293 return 0; 294 } 295 296 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n", 297 map, type); 298 return -ENXIO; 299 } 300 301 if (rid_in < map->input_base || 302 (rid_in >= map->input_base + map->id_count)) 303 return -ENXIO; 304 305 *rid_out = map->output_base + (rid_in - map->input_base); 306 return 0; 307 } 308 309 static 310 struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, 311 u32 *id_out, int index) 312 { 313 struct acpi_iort_node *parent; 314 struct acpi_iort_id_mapping *map; 315 316 if (!node->mapping_offset || !node->mapping_count || 317 index >= node->mapping_count) 318 return NULL; 319 320 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 321 node->mapping_offset + index * sizeof(*map)); 322 323 /* Firmware bug! */ 324 if (!map->output_reference) { 325 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 326 node, node->type); 327 return NULL; 328 } 329 330 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 331 map->output_reference); 332 333 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 334 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || 335 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 336 *id_out = map->output_base; 337 return parent; 338 } 339 } 340 341 return NULL; 342 } 343 344 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, 345 u32 id_in, u32 *id_out, 346 u8 type_mask) 347 { 348 u32 id = id_in; 349 350 /* Parse the ID mapping tree to find specified node type */ 351 while (node) { 352 struct acpi_iort_id_mapping *map; 353 int i; 354 355 if (IORT_TYPE_MASK(node->type) & type_mask) { 356 if (id_out) 357 *id_out = id; 358 return node; 359 } 360 361 if (!node->mapping_offset || !node->mapping_count) 362 goto fail_map; 363 364 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 365 node->mapping_offset); 366 367 /* Firmware bug! */ 368 if (!map->output_reference) { 369 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 370 node, node->type); 371 goto fail_map; 372 } 373 374 /* Do the ID translation */ 375 for (i = 0; i < node->mapping_count; i++, map++) { 376 if (!iort_id_map(map, node->type, id, &id)) 377 break; 378 } 379 380 if (i == node->mapping_count) 381 goto fail_map; 382 383 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 384 map->output_reference); 385 } 386 387 fail_map: 388 /* Map input ID to output ID unchanged on mapping failure */ 389 if (id_out) 390 *id_out = id_in; 391 392 return NULL; 393 } 394 395 static 396 struct acpi_iort_node *iort_node_map_platform_id(struct acpi_iort_node *node, 397 u32 *id_out, u8 type_mask, 398 int index) 399 { 400 struct acpi_iort_node *parent; 401 u32 id; 402 403 /* step 1: retrieve the initial dev id */ 404 parent = iort_node_get_id(node, &id, index); 405 if (!parent) 406 return NULL; 407 408 /* 409 * optional step 2: map the initial dev id if its parent is not 410 * the target type we want, map it again for the use cases such 411 * as NC (named component) -> SMMU -> ITS. If the type is matched, 412 * return the initial dev id and its parent pointer directly. 413 */ 414 if (!(IORT_TYPE_MASK(parent->type) & type_mask)) 415 parent = iort_node_map_id(parent, id, id_out, type_mask); 416 else 417 if (id_out) 418 *id_out = id; 419 420 return parent; 421 } 422 423 static struct acpi_iort_node *iort_find_dev_node(struct device *dev) 424 { 425 struct pci_bus *pbus; 426 427 if (!dev_is_pci(dev)) 428 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 429 iort_match_node_callback, dev); 430 431 /* Find a PCI root bus */ 432 pbus = to_pci_dev(dev)->bus; 433 while (!pci_is_root_bus(pbus)) 434 pbus = pbus->parent; 435 436 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 437 iort_match_node_callback, &pbus->dev); 438 } 439 440 /** 441 * iort_msi_map_rid() - Map a MSI requester ID for a device 442 * @dev: The device for which the mapping is to be done. 443 * @req_id: The device requester ID. 444 * 445 * Returns: mapped MSI RID on success, input requester ID otherwise 446 */ 447 u32 iort_msi_map_rid(struct device *dev, u32 req_id) 448 { 449 struct acpi_iort_node *node; 450 u32 dev_id; 451 452 node = iort_find_dev_node(dev); 453 if (!node) 454 return req_id; 455 456 iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); 457 return dev_id; 458 } 459 460 /** 461 * iort_pmsi_get_dev_id() - Get the device id for a device 462 * @dev: The device for which the mapping is to be done. 463 * @dev_id: The device ID found. 464 * 465 * Returns: 0 for successful find a dev id, -ENODEV on error 466 */ 467 int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) 468 { 469 int i; 470 struct acpi_iort_node *node; 471 472 node = iort_find_dev_node(dev); 473 if (!node) 474 return -ENODEV; 475 476 for (i = 0; i < node->mapping_count; i++) { 477 if (iort_node_map_platform_id(node, dev_id, IORT_MSI_TYPE, i)) 478 return 0; 479 } 480 481 return -ENODEV; 482 } 483 484 /** 485 * iort_dev_find_its_id() - Find the ITS identifier for a device 486 * @dev: The device. 487 * @req_id: Device's requester ID 488 * @idx: Index of the ITS identifier list. 489 * @its_id: ITS identifier. 490 * 491 * Returns: 0 on success, appropriate error value otherwise 492 */ 493 static int iort_dev_find_its_id(struct device *dev, u32 req_id, 494 unsigned int idx, int *its_id) 495 { 496 struct acpi_iort_its_group *its; 497 struct acpi_iort_node *node; 498 499 node = iort_find_dev_node(dev); 500 if (!node) 501 return -ENXIO; 502 503 node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); 504 if (!node) 505 return -ENXIO; 506 507 /* Move to ITS specific data */ 508 its = (struct acpi_iort_its_group *)node->node_data; 509 if (idx > its->its_count) { 510 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", 511 idx, its->its_count); 512 return -ENXIO; 513 } 514 515 *its_id = its->identifiers[idx]; 516 return 0; 517 } 518 519 /** 520 * iort_get_device_domain() - Find MSI domain related to a device 521 * @dev: The device. 522 * @req_id: Requester ID for the device. 523 * 524 * Returns: the MSI domain for this device, NULL otherwise 525 */ 526 struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) 527 { 528 struct fwnode_handle *handle; 529 int its_id; 530 531 if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) 532 return NULL; 533 534 handle = iort_find_domain_token(its_id); 535 if (!handle) 536 return NULL; 537 538 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); 539 } 540 541 /** 542 * iort_get_platform_device_domain() - Find MSI domain related to a 543 * platform device 544 * @dev: the dev pointer associated with the platform device 545 * 546 * Returns: the MSI domain for this device, NULL otherwise 547 */ 548 static struct irq_domain *iort_get_platform_device_domain(struct device *dev) 549 { 550 struct acpi_iort_node *node, *msi_parent; 551 struct fwnode_handle *iort_fwnode; 552 struct acpi_iort_its_group *its; 553 int i; 554 555 /* find its associated iort node */ 556 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 557 iort_match_node_callback, dev); 558 if (!node) 559 return NULL; 560 561 /* then find its msi parent node */ 562 for (i = 0; i < node->mapping_count; i++) { 563 msi_parent = iort_node_map_platform_id(node, NULL, 564 IORT_MSI_TYPE, i); 565 if (msi_parent) 566 break; 567 } 568 569 if (!msi_parent) 570 return NULL; 571 572 /* Move to ITS specific data */ 573 its = (struct acpi_iort_its_group *)msi_parent->node_data; 574 575 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 576 if (!iort_fwnode) 577 return NULL; 578 579 return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 580 } 581 582 void acpi_configure_pmsi_domain(struct device *dev) 583 { 584 struct irq_domain *msi_domain; 585 586 msi_domain = iort_get_platform_device_domain(dev); 587 if (msi_domain) 588 dev_set_msi_domain(dev, msi_domain); 589 } 590 591 static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) 592 { 593 u32 *rid = data; 594 595 *rid = alias; 596 return 0; 597 } 598 599 static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, 600 struct fwnode_handle *fwnode, 601 const struct iommu_ops *ops) 602 { 603 int ret = iommu_fwspec_init(dev, fwnode, ops); 604 605 if (!ret) 606 ret = iommu_fwspec_add_ids(dev, &streamid, 1); 607 608 return ret; 609 } 610 611 static inline bool iort_iommu_driver_enabled(u8 type) 612 { 613 switch (type) { 614 case ACPI_IORT_NODE_SMMU_V3: 615 return IS_BUILTIN(CONFIG_ARM_SMMU_V3); 616 case ACPI_IORT_NODE_SMMU: 617 return IS_BUILTIN(CONFIG_ARM_SMMU); 618 default: 619 pr_warn("IORT node type %u does not describe an SMMU\n", type); 620 return false; 621 } 622 } 623 624 #ifdef CONFIG_IOMMU_API 625 static inline 626 const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) 627 { 628 return (fwspec && fwspec->ops) ? fwspec->ops : NULL; 629 } 630 631 static inline 632 int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) 633 { 634 int err = 0; 635 636 if (!IS_ERR_OR_NULL(ops) && ops->add_device && dev->bus && 637 !dev->iommu_group) 638 err = ops->add_device(dev); 639 640 return err; 641 } 642 #else 643 static inline 644 const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) 645 { return NULL; } 646 static inline 647 int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) 648 { return 0; } 649 #endif 650 651 static const struct iommu_ops *iort_iommu_xlate(struct device *dev, 652 struct acpi_iort_node *node, 653 u32 streamid) 654 { 655 const struct iommu_ops *ops = NULL; 656 int ret = -ENODEV; 657 struct fwnode_handle *iort_fwnode; 658 659 if (node) { 660 iort_fwnode = iort_get_fwnode(node); 661 if (!iort_fwnode) 662 return NULL; 663 664 ops = iommu_ops_from_fwnode(iort_fwnode); 665 /* 666 * If the ops look-up fails, this means that either 667 * the SMMU drivers have not been probed yet or that 668 * the SMMU drivers are not built in the kernel; 669 * Depending on whether the SMMU drivers are built-in 670 * in the kernel or not, defer the IOMMU configuration 671 * or just abort it. 672 */ 673 if (!ops) 674 return iort_iommu_driver_enabled(node->type) ? 675 ERR_PTR(-EPROBE_DEFER) : NULL; 676 677 ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); 678 } 679 680 return ret ? NULL : ops; 681 } 682 683 /** 684 * iort_set_dma_mask - Set-up dma mask for a device. 685 * 686 * @dev: device to configure 687 */ 688 void iort_set_dma_mask(struct device *dev) 689 { 690 /* 691 * Set default coherent_dma_mask to 32 bit. Drivers are expected to 692 * setup the correct supported mask. 693 */ 694 if (!dev->coherent_dma_mask) 695 dev->coherent_dma_mask = DMA_BIT_MASK(32); 696 697 /* 698 * Set it to coherent_dma_mask by default if the architecture 699 * code has not set it. 700 */ 701 if (!dev->dma_mask) 702 dev->dma_mask = &dev->coherent_dma_mask; 703 } 704 705 /** 706 * iort_iommu_configure - Set-up IOMMU configuration for a device. 707 * 708 * @dev: device to configure 709 * 710 * Returns: iommu_ops pointer on configuration success 711 * NULL on configuration failure 712 */ 713 const struct iommu_ops *iort_iommu_configure(struct device *dev) 714 { 715 struct acpi_iort_node *node, *parent; 716 const struct iommu_ops *ops = NULL; 717 u32 streamid = 0; 718 int err; 719 720 /* 721 * If we already translated the fwspec there 722 * is nothing left to do, return the iommu_ops. 723 */ 724 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); 725 if (ops) 726 return ops; 727 728 if (dev_is_pci(dev)) { 729 struct pci_bus *bus = to_pci_dev(dev)->bus; 730 u32 rid; 731 732 pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid, 733 &rid); 734 735 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 736 iort_match_node_callback, &bus->dev); 737 if (!node) 738 return NULL; 739 740 parent = iort_node_map_id(node, rid, &streamid, 741 IORT_IOMMU_TYPE); 742 743 ops = iort_iommu_xlate(dev, parent, streamid); 744 745 } else { 746 int i = 0; 747 748 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 749 iort_match_node_callback, dev); 750 if (!node) 751 return NULL; 752 753 parent = iort_node_map_platform_id(node, &streamid, 754 IORT_IOMMU_TYPE, i++); 755 756 while (parent) { 757 ops = iort_iommu_xlate(dev, parent, streamid); 758 if (IS_ERR_OR_NULL(ops)) 759 return ops; 760 761 parent = iort_node_map_platform_id(node, &streamid, 762 IORT_IOMMU_TYPE, 763 i++); 764 } 765 } 766 767 /* 768 * If we have reason to believe the IOMMU driver missed the initial 769 * add_device callback for dev, replay it to get things in order. 770 */ 771 err = iort_add_device_replay(ops, dev); 772 if (err) 773 ops = ERR_PTR(err); 774 775 /* Ignore all other errors apart from EPROBE_DEFER */ 776 if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) { 777 dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops)); 778 ops = NULL; 779 } 780 781 return ops; 782 } 783 784 static void __init acpi_iort_register_irq(int hwirq, const char *name, 785 int trigger, 786 struct resource *res) 787 { 788 int irq = acpi_register_gsi(NULL, hwirq, trigger, 789 ACPI_ACTIVE_HIGH); 790 791 if (irq <= 0) { 792 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq, 793 name); 794 return; 795 } 796 797 res->start = irq; 798 res->end = irq; 799 res->flags = IORESOURCE_IRQ; 800 res->name = name; 801 } 802 803 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) 804 { 805 struct acpi_iort_smmu_v3 *smmu; 806 /* Always present mem resource */ 807 int num_res = 1; 808 809 /* Retrieve SMMUv3 specific data */ 810 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 811 812 if (smmu->event_gsiv) 813 num_res++; 814 815 if (smmu->pri_gsiv) 816 num_res++; 817 818 if (smmu->gerr_gsiv) 819 num_res++; 820 821 if (smmu->sync_gsiv) 822 num_res++; 823 824 return num_res; 825 } 826 827 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu) 828 { 829 /* 830 * Cavium ThunderX2 implementation doesn't not support unique 831 * irq line. Use single irq line for all the SMMUv3 interrupts. 832 */ 833 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 834 return false; 835 836 /* 837 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking 838 * SPI numbers here. 839 */ 840 return smmu->event_gsiv == smmu->pri_gsiv && 841 smmu->event_gsiv == smmu->gerr_gsiv && 842 smmu->event_gsiv == smmu->sync_gsiv; 843 } 844 845 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu) 846 { 847 /* 848 * Override the size, for Cavium ThunderX2 implementation 849 * which doesn't support the page 1 SMMU register space. 850 */ 851 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 852 return SZ_64K; 853 854 return SZ_128K; 855 } 856 857 static void __init arm_smmu_v3_init_resources(struct resource *res, 858 struct acpi_iort_node *node) 859 { 860 struct acpi_iort_smmu_v3 *smmu; 861 int num_res = 0; 862 863 /* Retrieve SMMUv3 specific data */ 864 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 865 866 res[num_res].start = smmu->base_address; 867 res[num_res].end = smmu->base_address + 868 arm_smmu_v3_resource_size(smmu) - 1; 869 res[num_res].flags = IORESOURCE_MEM; 870 871 num_res++; 872 if (arm_smmu_v3_is_combined_irq(smmu)) { 873 if (smmu->event_gsiv) 874 acpi_iort_register_irq(smmu->event_gsiv, "combined", 875 ACPI_EDGE_SENSITIVE, 876 &res[num_res++]); 877 } else { 878 879 if (smmu->event_gsiv) 880 acpi_iort_register_irq(smmu->event_gsiv, "eventq", 881 ACPI_EDGE_SENSITIVE, 882 &res[num_res++]); 883 884 if (smmu->pri_gsiv) 885 acpi_iort_register_irq(smmu->pri_gsiv, "priq", 886 ACPI_EDGE_SENSITIVE, 887 &res[num_res++]); 888 889 if (smmu->gerr_gsiv) 890 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", 891 ACPI_EDGE_SENSITIVE, 892 &res[num_res++]); 893 894 if (smmu->sync_gsiv) 895 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", 896 ACPI_EDGE_SENSITIVE, 897 &res[num_res++]); 898 } 899 } 900 901 static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) 902 { 903 struct acpi_iort_smmu_v3 *smmu; 904 905 /* Retrieve SMMUv3 specific data */ 906 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 907 908 return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE; 909 } 910 911 static int __init arm_smmu_count_resources(struct acpi_iort_node *node) 912 { 913 struct acpi_iort_smmu *smmu; 914 915 /* Retrieve SMMU specific data */ 916 smmu = (struct acpi_iort_smmu *)node->node_data; 917 918 /* 919 * Only consider the global fault interrupt and ignore the 920 * configuration access interrupt. 921 * 922 * MMIO address and global fault interrupt resources are always 923 * present so add them to the context interrupt count as a static 924 * value. 925 */ 926 return smmu->context_interrupt_count + 2; 927 } 928 929 static void __init arm_smmu_init_resources(struct resource *res, 930 struct acpi_iort_node *node) 931 { 932 struct acpi_iort_smmu *smmu; 933 int i, hw_irq, trigger, num_res = 0; 934 u64 *ctx_irq, *glb_irq; 935 936 /* Retrieve SMMU specific data */ 937 smmu = (struct acpi_iort_smmu *)node->node_data; 938 939 res[num_res].start = smmu->base_address; 940 res[num_res].end = smmu->base_address + smmu->span - 1; 941 res[num_res].flags = IORESOURCE_MEM; 942 num_res++; 943 944 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); 945 /* Global IRQs */ 946 hw_irq = IORT_IRQ_MASK(glb_irq[0]); 947 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]); 948 949 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, 950 &res[num_res++]); 951 952 /* Context IRQs */ 953 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); 954 for (i = 0; i < smmu->context_interrupt_count; i++) { 955 hw_irq = IORT_IRQ_MASK(ctx_irq[i]); 956 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]); 957 958 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, 959 &res[num_res++]); 960 } 961 } 962 963 static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node) 964 { 965 struct acpi_iort_smmu *smmu; 966 967 /* Retrieve SMMU specific data */ 968 smmu = (struct acpi_iort_smmu *)node->node_data; 969 970 return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK; 971 } 972 973 struct iort_iommu_config { 974 const char *name; 975 int (*iommu_init)(struct acpi_iort_node *node); 976 bool (*iommu_is_coherent)(struct acpi_iort_node *node); 977 int (*iommu_count_resources)(struct acpi_iort_node *node); 978 void (*iommu_init_resources)(struct resource *res, 979 struct acpi_iort_node *node); 980 }; 981 982 static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = { 983 .name = "arm-smmu-v3", 984 .iommu_is_coherent = arm_smmu_v3_is_coherent, 985 .iommu_count_resources = arm_smmu_v3_count_resources, 986 .iommu_init_resources = arm_smmu_v3_init_resources 987 }; 988 989 static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = { 990 .name = "arm-smmu", 991 .iommu_is_coherent = arm_smmu_is_coherent, 992 .iommu_count_resources = arm_smmu_count_resources, 993 .iommu_init_resources = arm_smmu_init_resources 994 }; 995 996 static __init 997 const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node) 998 { 999 switch (node->type) { 1000 case ACPI_IORT_NODE_SMMU_V3: 1001 return &iort_arm_smmu_v3_cfg; 1002 case ACPI_IORT_NODE_SMMU: 1003 return &iort_arm_smmu_cfg; 1004 default: 1005 return NULL; 1006 } 1007 } 1008 1009 /** 1010 * iort_add_smmu_platform_device() - Allocate a platform device for SMMU 1011 * @node: Pointer to SMMU ACPI IORT node 1012 * 1013 * Returns: 0 on success, <0 failure 1014 */ 1015 static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node) 1016 { 1017 struct fwnode_handle *fwnode; 1018 struct platform_device *pdev; 1019 struct resource *r; 1020 enum dev_dma_attr attr; 1021 int ret, count; 1022 const struct iort_iommu_config *ops = iort_get_iommu_cfg(node); 1023 1024 if (!ops) 1025 return -ENODEV; 1026 1027 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); 1028 if (!pdev) 1029 return -ENOMEM; 1030 1031 count = ops->iommu_count_resources(node); 1032 1033 r = kcalloc(count, sizeof(*r), GFP_KERNEL); 1034 if (!r) { 1035 ret = -ENOMEM; 1036 goto dev_put; 1037 } 1038 1039 ops->iommu_init_resources(r, node); 1040 1041 ret = platform_device_add_resources(pdev, r, count); 1042 /* 1043 * Resources are duplicated in platform_device_add_resources, 1044 * free their allocated memory 1045 */ 1046 kfree(r); 1047 1048 if (ret) 1049 goto dev_put; 1050 1051 /* 1052 * Add a copy of IORT node pointer to platform_data to 1053 * be used to retrieve IORT data information. 1054 */ 1055 ret = platform_device_add_data(pdev, &node, sizeof(node)); 1056 if (ret) 1057 goto dev_put; 1058 1059 /* 1060 * We expect the dma masks to be equivalent for 1061 * all SMMUs set-ups 1062 */ 1063 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 1064 1065 fwnode = iort_get_fwnode(node); 1066 1067 if (!fwnode) { 1068 ret = -ENODEV; 1069 goto dev_put; 1070 } 1071 1072 pdev->dev.fwnode = fwnode; 1073 1074 attr = ops->iommu_is_coherent(node) ? 1075 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; 1076 1077 /* Configure DMA for the page table walker */ 1078 acpi_dma_configure(&pdev->dev, attr); 1079 1080 ret = platform_device_add(pdev); 1081 if (ret) 1082 goto dma_deconfigure; 1083 1084 return 0; 1085 1086 dma_deconfigure: 1087 acpi_dma_deconfigure(&pdev->dev); 1088 dev_put: 1089 platform_device_put(pdev); 1090 1091 return ret; 1092 } 1093 1094 static void __init iort_init_platform_devices(void) 1095 { 1096 struct acpi_iort_node *iort_node, *iort_end; 1097 struct acpi_table_iort *iort; 1098 struct fwnode_handle *fwnode; 1099 int i, ret; 1100 1101 /* 1102 * iort_table and iort both point to the start of IORT table, but 1103 * have different struct types 1104 */ 1105 iort = (struct acpi_table_iort *)iort_table; 1106 1107 /* Get the first IORT node */ 1108 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1109 iort->node_offset); 1110 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1111 iort_table->length); 1112 1113 for (i = 0; i < iort->node_count; i++) { 1114 if (iort_node >= iort_end) { 1115 pr_err("iort node pointer overflows, bad table\n"); 1116 return; 1117 } 1118 1119 if ((iort_node->type == ACPI_IORT_NODE_SMMU) || 1120 (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) { 1121 1122 fwnode = acpi_alloc_fwnode_static(); 1123 if (!fwnode) 1124 return; 1125 1126 iort_set_fwnode(iort_node, fwnode); 1127 1128 ret = iort_add_smmu_platform_device(iort_node); 1129 if (ret) { 1130 iort_delete_fwnode(iort_node); 1131 acpi_free_fwnode_static(fwnode); 1132 return; 1133 } 1134 } 1135 1136 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 1137 iort_node->length); 1138 } 1139 } 1140 1141 void __init acpi_iort_init(void) 1142 { 1143 acpi_status status; 1144 1145 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table); 1146 if (ACPI_FAILURE(status)) { 1147 if (status != AE_NOT_FOUND) { 1148 const char *msg = acpi_format_exception(status); 1149 1150 pr_err("Failed to get table, %s\n", msg); 1151 } 1152 1153 return; 1154 } 1155 1156 iort_init_platform_devices(); 1157 } 1158