1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016, Semihalf 4 * Author: Tomasz Nowicki <tn@semihalf.com> 5 * 6 * This file implements early detection/parsing of I/O mapping 7 * reported to OS through firmware via I/O Remapping Table (IORT) 8 * IORT document number: ARM DEN 0049A 9 */ 10 11 #define pr_fmt(fmt) "ACPI: IORT: " fmt 12 13 #include <linux/acpi_iort.h> 14 #include <linux/iommu.h> 15 #include <linux/kernel.h> 16 #include <linux/list.h> 17 #include <linux/pci.h> 18 #include <linux/platform_device.h> 19 #include <linux/slab.h> 20 21 #define IORT_TYPE_MASK(type) (1 << (type)) 22 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) 23 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ 24 (1 << ACPI_IORT_NODE_SMMU_V3)) 25 26 struct iort_its_msi_chip { 27 struct list_head list; 28 struct fwnode_handle *fw_node; 29 phys_addr_t base_addr; 30 u32 translation_id; 31 }; 32 33 struct iort_fwnode { 34 struct list_head list; 35 struct acpi_iort_node *iort_node; 36 struct fwnode_handle *fwnode; 37 }; 38 static LIST_HEAD(iort_fwnode_list); 39 static DEFINE_SPINLOCK(iort_fwnode_lock); 40 41 /** 42 * iort_set_fwnode() - Create iort_fwnode and use it to register 43 * iommu data in the iort_fwnode_list 44 * 45 * @node: IORT table node associated with the IOMMU 46 * @fwnode: fwnode associated with the IORT node 47 * 48 * Returns: 0 on success 49 * <0 on failure 50 */ 51 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node, 52 struct fwnode_handle *fwnode) 53 { 54 struct iort_fwnode *np; 55 56 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC); 57 58 if (WARN_ON(!np)) 59 return -ENOMEM; 60 61 INIT_LIST_HEAD(&np->list); 62 np->iort_node = iort_node; 63 np->fwnode = fwnode; 64 65 spin_lock(&iort_fwnode_lock); 66 list_add_tail(&np->list, &iort_fwnode_list); 67 spin_unlock(&iort_fwnode_lock); 68 69 return 0; 70 } 71 72 /** 73 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node 74 * 75 * @node: IORT table node to be looked-up 76 * 77 * Returns: fwnode_handle pointer on success, NULL on failure 78 */ 79 static inline struct fwnode_handle *iort_get_fwnode( 80 struct acpi_iort_node *node) 81 { 82 struct iort_fwnode *curr; 83 struct fwnode_handle *fwnode = NULL; 84 85 spin_lock(&iort_fwnode_lock); 86 list_for_each_entry(curr, &iort_fwnode_list, list) { 87 if (curr->iort_node == node) { 88 fwnode = curr->fwnode; 89 break; 90 } 91 } 92 spin_unlock(&iort_fwnode_lock); 93 94 return fwnode; 95 } 96 97 /** 98 * iort_delete_fwnode() - Delete fwnode associated with an IORT node 99 * 100 * @node: IORT table node associated with fwnode to delete 101 */ 102 static inline void iort_delete_fwnode(struct acpi_iort_node *node) 103 { 104 struct iort_fwnode *curr, *tmp; 105 106 spin_lock(&iort_fwnode_lock); 107 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) { 108 if (curr->iort_node == node) { 109 list_del(&curr->list); 110 kfree(curr); 111 break; 112 } 113 } 114 spin_unlock(&iort_fwnode_lock); 115 } 116 117 /** 118 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode 119 * 120 * @fwnode: fwnode associated with device to be looked-up 121 * 122 * Returns: iort_node pointer on success, NULL on failure 123 */ 124 static inline struct acpi_iort_node *iort_get_iort_node( 125 struct fwnode_handle *fwnode) 126 { 127 struct iort_fwnode *curr; 128 struct acpi_iort_node *iort_node = NULL; 129 130 spin_lock(&iort_fwnode_lock); 131 list_for_each_entry(curr, &iort_fwnode_list, list) { 132 if (curr->fwnode == fwnode) { 133 iort_node = curr->iort_node; 134 break; 135 } 136 } 137 spin_unlock(&iort_fwnode_lock); 138 139 return iort_node; 140 } 141 142 typedef acpi_status (*iort_find_node_callback) 143 (struct acpi_iort_node *node, void *context); 144 145 /* Root pointer to the mapped IORT table */ 146 static struct acpi_table_header *iort_table; 147 148 static LIST_HEAD(iort_msi_chip_list); 149 static DEFINE_SPINLOCK(iort_msi_chip_lock); 150 151 /** 152 * iort_register_domain_token() - register domain token along with related 153 * ITS ID and base address to the list from where we can get it back later on. 154 * @trans_id: ITS ID. 155 * @base: ITS base address. 156 * @fw_node: Domain token. 157 * 158 * Returns: 0 on success, -ENOMEM if no memory when allocating list element 159 */ 160 int iort_register_domain_token(int trans_id, phys_addr_t base, 161 struct fwnode_handle *fw_node) 162 { 163 struct iort_its_msi_chip *its_msi_chip; 164 165 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL); 166 if (!its_msi_chip) 167 return -ENOMEM; 168 169 its_msi_chip->fw_node = fw_node; 170 its_msi_chip->translation_id = trans_id; 171 its_msi_chip->base_addr = base; 172 173 spin_lock(&iort_msi_chip_lock); 174 list_add(&its_msi_chip->list, &iort_msi_chip_list); 175 spin_unlock(&iort_msi_chip_lock); 176 177 return 0; 178 } 179 180 /** 181 * iort_deregister_domain_token() - Deregister domain token based on ITS ID 182 * @trans_id: ITS ID. 183 * 184 * Returns: none. 185 */ 186 void iort_deregister_domain_token(int trans_id) 187 { 188 struct iort_its_msi_chip *its_msi_chip, *t; 189 190 spin_lock(&iort_msi_chip_lock); 191 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) { 192 if (its_msi_chip->translation_id == trans_id) { 193 list_del(&its_msi_chip->list); 194 kfree(its_msi_chip); 195 break; 196 } 197 } 198 spin_unlock(&iort_msi_chip_lock); 199 } 200 201 /** 202 * iort_find_domain_token() - Find domain token based on given ITS ID 203 * @trans_id: ITS ID. 204 * 205 * Returns: domain token when find on the list, NULL otherwise 206 */ 207 struct fwnode_handle *iort_find_domain_token(int trans_id) 208 { 209 struct fwnode_handle *fw_node = NULL; 210 struct iort_its_msi_chip *its_msi_chip; 211 212 spin_lock(&iort_msi_chip_lock); 213 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 214 if (its_msi_chip->translation_id == trans_id) { 215 fw_node = its_msi_chip->fw_node; 216 break; 217 } 218 } 219 spin_unlock(&iort_msi_chip_lock); 220 221 return fw_node; 222 } 223 224 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type, 225 iort_find_node_callback callback, 226 void *context) 227 { 228 struct acpi_iort_node *iort_node, *iort_end; 229 struct acpi_table_iort *iort; 230 int i; 231 232 if (!iort_table) 233 return NULL; 234 235 /* Get the first IORT node */ 236 iort = (struct acpi_table_iort *)iort_table; 237 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 238 iort->node_offset); 239 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 240 iort_table->length); 241 242 for (i = 0; i < iort->node_count; i++) { 243 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, 244 "IORT node pointer overflows, bad table!\n")) 245 return NULL; 246 247 if (iort_node->type == type && 248 ACPI_SUCCESS(callback(iort_node, context))) 249 return iort_node; 250 251 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 252 iort_node->length); 253 } 254 255 return NULL; 256 } 257 258 static acpi_status iort_match_node_callback(struct acpi_iort_node *node, 259 void *context) 260 { 261 struct device *dev = context; 262 acpi_status status = AE_NOT_FOUND; 263 264 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { 265 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 266 struct acpi_device *adev = to_acpi_device_node(dev->fwnode); 267 struct acpi_iort_named_component *ncomp; 268 269 if (!adev) 270 goto out; 271 272 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); 273 if (ACPI_FAILURE(status)) { 274 dev_warn(dev, "Can't get device full path name\n"); 275 goto out; 276 } 277 278 ncomp = (struct acpi_iort_named_component *)node->node_data; 279 status = !strcmp(ncomp->device_name, buf.pointer) ? 280 AE_OK : AE_NOT_FOUND; 281 acpi_os_free(buf.pointer); 282 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 283 struct acpi_iort_root_complex *pci_rc; 284 struct pci_bus *bus; 285 286 bus = to_pci_bus(dev); 287 pci_rc = (struct acpi_iort_root_complex *)node->node_data; 288 289 /* 290 * It is assumed that PCI segment numbers maps one-to-one 291 * with root complexes. Each segment number can represent only 292 * one root complex. 293 */ 294 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ? 295 AE_OK : AE_NOT_FOUND; 296 } 297 out: 298 return status; 299 } 300 301 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, 302 u32 *rid_out) 303 { 304 /* Single mapping does not care for input id */ 305 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 306 if (type == ACPI_IORT_NODE_NAMED_COMPONENT || 307 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 308 *rid_out = map->output_base; 309 return 0; 310 } 311 312 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n", 313 map, type); 314 return -ENXIO; 315 } 316 317 if (rid_in < map->input_base || 318 (rid_in >= map->input_base + map->id_count)) 319 return -ENXIO; 320 321 *rid_out = map->output_base + (rid_in - map->input_base); 322 return 0; 323 } 324 325 static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, 326 u32 *id_out, int index) 327 { 328 struct acpi_iort_node *parent; 329 struct acpi_iort_id_mapping *map; 330 331 if (!node->mapping_offset || !node->mapping_count || 332 index >= node->mapping_count) 333 return NULL; 334 335 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 336 node->mapping_offset + index * sizeof(*map)); 337 338 /* Firmware bug! */ 339 if (!map->output_reference) { 340 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 341 node, node->type); 342 return NULL; 343 } 344 345 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 346 map->output_reference); 347 348 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 349 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || 350 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || 351 node->type == ACPI_IORT_NODE_SMMU_V3 || 352 node->type == ACPI_IORT_NODE_PMCG) { 353 *id_out = map->output_base; 354 return parent; 355 } 356 } 357 358 return NULL; 359 } 360 361 static int iort_get_id_mapping_index(struct acpi_iort_node *node) 362 { 363 struct acpi_iort_smmu_v3 *smmu; 364 365 switch (node->type) { 366 case ACPI_IORT_NODE_SMMU_V3: 367 /* 368 * SMMUv3 dev ID mapping index was introduced in revision 1 369 * table, not available in revision 0 370 */ 371 if (node->revision < 1) 372 return -EINVAL; 373 374 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 375 /* 376 * ID mapping index is only ignored if all interrupts are 377 * GSIV based 378 */ 379 if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv 380 && smmu->sync_gsiv) 381 return -EINVAL; 382 383 if (smmu->id_mapping_index >= node->mapping_count) { 384 pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n", 385 node, node->type); 386 return -EINVAL; 387 } 388 389 return smmu->id_mapping_index; 390 case ACPI_IORT_NODE_PMCG: 391 return 0; 392 default: 393 return -EINVAL; 394 } 395 } 396 397 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, 398 u32 id_in, u32 *id_out, 399 u8 type_mask) 400 { 401 u32 id = id_in; 402 403 /* Parse the ID mapping tree to find specified node type */ 404 while (node) { 405 struct acpi_iort_id_mapping *map; 406 int i, index; 407 408 if (IORT_TYPE_MASK(node->type) & type_mask) { 409 if (id_out) 410 *id_out = id; 411 return node; 412 } 413 414 if (!node->mapping_offset || !node->mapping_count) 415 goto fail_map; 416 417 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 418 node->mapping_offset); 419 420 /* Firmware bug! */ 421 if (!map->output_reference) { 422 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 423 node, node->type); 424 goto fail_map; 425 } 426 427 /* 428 * Get the special ID mapping index (if any) and skip its 429 * associated ID map to prevent erroneous multi-stage 430 * IORT ID translations. 431 */ 432 index = iort_get_id_mapping_index(node); 433 434 /* Do the ID translation */ 435 for (i = 0; i < node->mapping_count; i++, map++) { 436 /* if it is special mapping index, skip it */ 437 if (i == index) 438 continue; 439 440 if (!iort_id_map(map, node->type, id, &id)) 441 break; 442 } 443 444 if (i == node->mapping_count) 445 goto fail_map; 446 447 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 448 map->output_reference); 449 } 450 451 fail_map: 452 /* Map input ID to output ID unchanged on mapping failure */ 453 if (id_out) 454 *id_out = id_in; 455 456 return NULL; 457 } 458 459 static struct acpi_iort_node *iort_node_map_platform_id( 460 struct acpi_iort_node *node, u32 *id_out, u8 type_mask, 461 int index) 462 { 463 struct acpi_iort_node *parent; 464 u32 id; 465 466 /* step 1: retrieve the initial dev id */ 467 parent = iort_node_get_id(node, &id, index); 468 if (!parent) 469 return NULL; 470 471 /* 472 * optional step 2: map the initial dev id if its parent is not 473 * the target type we want, map it again for the use cases such 474 * as NC (named component) -> SMMU -> ITS. If the type is matched, 475 * return the initial dev id and its parent pointer directly. 476 */ 477 if (!(IORT_TYPE_MASK(parent->type) & type_mask)) 478 parent = iort_node_map_id(parent, id, id_out, type_mask); 479 else 480 if (id_out) 481 *id_out = id; 482 483 return parent; 484 } 485 486 static struct acpi_iort_node *iort_find_dev_node(struct device *dev) 487 { 488 struct pci_bus *pbus; 489 490 if (!dev_is_pci(dev)) { 491 struct acpi_iort_node *node; 492 /* 493 * scan iort_fwnode_list to see if it's an iort platform 494 * device (such as SMMU, PMCG),its iort node already cached 495 * and associated with fwnode when iort platform devices 496 * were initialized. 497 */ 498 node = iort_get_iort_node(dev->fwnode); 499 if (node) 500 return node; 501 502 /* 503 * if not, then it should be a platform device defined in 504 * DSDT/SSDT (with Named Component node in IORT) 505 */ 506 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 507 iort_match_node_callback, dev); 508 } 509 510 /* Find a PCI root bus */ 511 pbus = to_pci_dev(dev)->bus; 512 while (!pci_is_root_bus(pbus)) 513 pbus = pbus->parent; 514 515 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 516 iort_match_node_callback, &pbus->dev); 517 } 518 519 /** 520 * iort_msi_map_rid() - Map a MSI requester ID for a device 521 * @dev: The device for which the mapping is to be done. 522 * @req_id: The device requester ID. 523 * 524 * Returns: mapped MSI RID on success, input requester ID otherwise 525 */ 526 u32 iort_msi_map_rid(struct device *dev, u32 req_id) 527 { 528 struct acpi_iort_node *node; 529 u32 dev_id; 530 531 node = iort_find_dev_node(dev); 532 if (!node) 533 return req_id; 534 535 iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); 536 return dev_id; 537 } 538 539 /** 540 * iort_pmsi_get_dev_id() - Get the device id for a device 541 * @dev: The device for which the mapping is to be done. 542 * @dev_id: The device ID found. 543 * 544 * Returns: 0 for successful find a dev id, -ENODEV on error 545 */ 546 int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) 547 { 548 int i, index; 549 struct acpi_iort_node *node; 550 551 node = iort_find_dev_node(dev); 552 if (!node) 553 return -ENODEV; 554 555 index = iort_get_id_mapping_index(node); 556 /* if there is a valid index, go get the dev_id directly */ 557 if (index >= 0) { 558 if (iort_node_get_id(node, dev_id, index)) 559 return 0; 560 } else { 561 for (i = 0; i < node->mapping_count; i++) { 562 if (iort_node_map_platform_id(node, dev_id, 563 IORT_MSI_TYPE, i)) 564 return 0; 565 } 566 } 567 568 return -ENODEV; 569 } 570 571 static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base) 572 { 573 struct iort_its_msi_chip *its_msi_chip; 574 int ret = -ENODEV; 575 576 spin_lock(&iort_msi_chip_lock); 577 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 578 if (its_msi_chip->translation_id == its_id) { 579 *base = its_msi_chip->base_addr; 580 ret = 0; 581 break; 582 } 583 } 584 spin_unlock(&iort_msi_chip_lock); 585 586 return ret; 587 } 588 589 /** 590 * iort_dev_find_its_id() - Find the ITS identifier for a device 591 * @dev: The device. 592 * @req_id: Device's requester ID 593 * @idx: Index of the ITS identifier list. 594 * @its_id: ITS identifier. 595 * 596 * Returns: 0 on success, appropriate error value otherwise 597 */ 598 static int iort_dev_find_its_id(struct device *dev, u32 req_id, 599 unsigned int idx, int *its_id) 600 { 601 struct acpi_iort_its_group *its; 602 struct acpi_iort_node *node; 603 604 node = iort_find_dev_node(dev); 605 if (!node) 606 return -ENXIO; 607 608 node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); 609 if (!node) 610 return -ENXIO; 611 612 /* Move to ITS specific data */ 613 its = (struct acpi_iort_its_group *)node->node_data; 614 if (idx >= its->its_count) { 615 dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n", 616 idx, its->its_count); 617 return -ENXIO; 618 } 619 620 *its_id = its->identifiers[idx]; 621 return 0; 622 } 623 624 /** 625 * iort_get_device_domain() - Find MSI domain related to a device 626 * @dev: The device. 627 * @req_id: Requester ID for the device. 628 * 629 * Returns: the MSI domain for this device, NULL otherwise 630 */ 631 struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) 632 { 633 struct fwnode_handle *handle; 634 int its_id; 635 636 if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) 637 return NULL; 638 639 handle = iort_find_domain_token(its_id); 640 if (!handle) 641 return NULL; 642 643 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); 644 } 645 646 static void iort_set_device_domain(struct device *dev, 647 struct acpi_iort_node *node) 648 { 649 struct acpi_iort_its_group *its; 650 struct acpi_iort_node *msi_parent; 651 struct acpi_iort_id_mapping *map; 652 struct fwnode_handle *iort_fwnode; 653 struct irq_domain *domain; 654 int index; 655 656 index = iort_get_id_mapping_index(node); 657 if (index < 0) 658 return; 659 660 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 661 node->mapping_offset + index * sizeof(*map)); 662 663 /* Firmware bug! */ 664 if (!map->output_reference || 665 !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) { 666 pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n", 667 node, node->type); 668 return; 669 } 670 671 msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 672 map->output_reference); 673 674 if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP) 675 return; 676 677 /* Move to ITS specific data */ 678 its = (struct acpi_iort_its_group *)msi_parent->node_data; 679 680 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 681 if (!iort_fwnode) 682 return; 683 684 domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 685 if (domain) 686 dev_set_msi_domain(dev, domain); 687 } 688 689 /** 690 * iort_get_platform_device_domain() - Find MSI domain related to a 691 * platform device 692 * @dev: the dev pointer associated with the platform device 693 * 694 * Returns: the MSI domain for this device, NULL otherwise 695 */ 696 static struct irq_domain *iort_get_platform_device_domain(struct device *dev) 697 { 698 struct acpi_iort_node *node, *msi_parent = NULL; 699 struct fwnode_handle *iort_fwnode; 700 struct acpi_iort_its_group *its; 701 int i; 702 703 /* find its associated iort node */ 704 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 705 iort_match_node_callback, dev); 706 if (!node) 707 return NULL; 708 709 /* then find its msi parent node */ 710 for (i = 0; i < node->mapping_count; i++) { 711 msi_parent = iort_node_map_platform_id(node, NULL, 712 IORT_MSI_TYPE, i); 713 if (msi_parent) 714 break; 715 } 716 717 if (!msi_parent) 718 return NULL; 719 720 /* Move to ITS specific data */ 721 its = (struct acpi_iort_its_group *)msi_parent->node_data; 722 723 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 724 if (!iort_fwnode) 725 return NULL; 726 727 return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 728 } 729 730 void acpi_configure_pmsi_domain(struct device *dev) 731 { 732 struct irq_domain *msi_domain; 733 734 msi_domain = iort_get_platform_device_domain(dev); 735 if (msi_domain) 736 dev_set_msi_domain(dev, msi_domain); 737 } 738 739 static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias, 740 void *data) 741 { 742 u32 *rid = data; 743 744 *rid = alias; 745 return 0; 746 } 747 748 #ifdef CONFIG_IOMMU_API 749 static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev) 750 { 751 struct acpi_iort_node *iommu; 752 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 753 754 iommu = iort_get_iort_node(fwspec->iommu_fwnode); 755 756 if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) { 757 struct acpi_iort_smmu_v3 *smmu; 758 759 smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data; 760 if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X) 761 return iommu; 762 } 763 764 return NULL; 765 } 766 767 static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) 768 { 769 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 770 771 return (fwspec && fwspec->ops) ? fwspec->ops : NULL; 772 } 773 774 static inline int iort_add_device_replay(const struct iommu_ops *ops, 775 struct device *dev) 776 { 777 int err = 0; 778 779 if (dev->bus && !device_iommu_mapped(dev)) 780 err = iommu_probe_device(dev); 781 782 return err; 783 } 784 785 /** 786 * iort_iommu_msi_get_resv_regions - Reserved region driver helper 787 * @dev: Device from iommu_get_resv_regions() 788 * @head: Reserved region list from iommu_get_resv_regions() 789 * 790 * Returns: Number of msi reserved regions on success (0 if platform 791 * doesn't require the reservation or no associated msi regions), 792 * appropriate error value otherwise. The ITS interrupt translation 793 * spaces (ITS_base + SZ_64K, SZ_64K) associated with the device 794 * are the msi reserved regions. 795 */ 796 int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) 797 { 798 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 799 struct acpi_iort_its_group *its; 800 struct acpi_iort_node *iommu_node, *its_node = NULL; 801 int i, resv = 0; 802 803 iommu_node = iort_get_msi_resv_iommu(dev); 804 if (!iommu_node) 805 return 0; 806 807 /* 808 * Current logic to reserve ITS regions relies on HW topologies 809 * where a given PCI or named component maps its IDs to only one 810 * ITS group; if a PCI or named component can map its IDs to 811 * different ITS groups through IORT mappings this function has 812 * to be reworked to ensure we reserve regions for all ITS groups 813 * a given PCI or named component may map IDs to. 814 */ 815 816 for (i = 0; i < fwspec->num_ids; i++) { 817 its_node = iort_node_map_id(iommu_node, 818 fwspec->ids[i], 819 NULL, IORT_MSI_TYPE); 820 if (its_node) 821 break; 822 } 823 824 if (!its_node) 825 return 0; 826 827 /* Move to ITS specific data */ 828 its = (struct acpi_iort_its_group *)its_node->node_data; 829 830 for (i = 0; i < its->its_count; i++) { 831 phys_addr_t base; 832 833 if (!iort_find_its_base(its->identifiers[i], &base)) { 834 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 835 struct iommu_resv_region *region; 836 837 region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K, 838 prot, IOMMU_RESV_MSI); 839 if (region) { 840 list_add_tail(®ion->list, head); 841 resv++; 842 } 843 } 844 } 845 846 return (resv == its->its_count) ? resv : -ENODEV; 847 } 848 849 static inline bool iort_iommu_driver_enabled(u8 type) 850 { 851 switch (type) { 852 case ACPI_IORT_NODE_SMMU_V3: 853 return IS_BUILTIN(CONFIG_ARM_SMMU_V3); 854 case ACPI_IORT_NODE_SMMU: 855 return IS_BUILTIN(CONFIG_ARM_SMMU); 856 default: 857 pr_warn("IORT node type %u does not describe an SMMU\n", type); 858 return false; 859 } 860 } 861 862 static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, 863 struct fwnode_handle *fwnode, 864 const struct iommu_ops *ops) 865 { 866 int ret = iommu_fwspec_init(dev, fwnode, ops); 867 868 if (!ret) 869 ret = iommu_fwspec_add_ids(dev, &streamid, 1); 870 871 return ret; 872 } 873 874 static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node) 875 { 876 struct acpi_iort_root_complex *pci_rc; 877 878 pci_rc = (struct acpi_iort_root_complex *)node->node_data; 879 return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED; 880 } 881 882 static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, 883 u32 streamid) 884 { 885 const struct iommu_ops *ops; 886 struct fwnode_handle *iort_fwnode; 887 888 if (!node) 889 return -ENODEV; 890 891 iort_fwnode = iort_get_fwnode(node); 892 if (!iort_fwnode) 893 return -ENODEV; 894 895 /* 896 * If the ops look-up fails, this means that either 897 * the SMMU drivers have not been probed yet or that 898 * the SMMU drivers are not built in the kernel; 899 * Depending on whether the SMMU drivers are built-in 900 * in the kernel or not, defer the IOMMU configuration 901 * or just abort it. 902 */ 903 ops = iommu_ops_from_fwnode(iort_fwnode); 904 if (!ops) 905 return iort_iommu_driver_enabled(node->type) ? 906 -EPROBE_DEFER : -ENODEV; 907 908 return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); 909 } 910 911 struct iort_pci_alias_info { 912 struct device *dev; 913 struct acpi_iort_node *node; 914 }; 915 916 static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) 917 { 918 struct iort_pci_alias_info *info = data; 919 struct acpi_iort_node *parent; 920 u32 streamid; 921 922 parent = iort_node_map_id(info->node, alias, &streamid, 923 IORT_IOMMU_TYPE); 924 return iort_iommu_xlate(info->dev, parent, streamid); 925 } 926 927 /** 928 * iort_iommu_configure - Set-up IOMMU configuration for a device. 929 * 930 * @dev: device to configure 931 * 932 * Returns: iommu_ops pointer on configuration success 933 * NULL on configuration failure 934 */ 935 const struct iommu_ops *iort_iommu_configure(struct device *dev) 936 { 937 struct acpi_iort_node *node, *parent; 938 const struct iommu_ops *ops; 939 u32 streamid = 0; 940 int err = -ENODEV; 941 942 /* 943 * If we already translated the fwspec there 944 * is nothing left to do, return the iommu_ops. 945 */ 946 ops = iort_fwspec_iommu_ops(dev); 947 if (ops) 948 return ops; 949 950 if (dev_is_pci(dev)) { 951 struct pci_bus *bus = to_pci_dev(dev)->bus; 952 struct iort_pci_alias_info info = { .dev = dev }; 953 954 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 955 iort_match_node_callback, &bus->dev); 956 if (!node) 957 return NULL; 958 959 info.node = node; 960 err = pci_for_each_dma_alias(to_pci_dev(dev), 961 iort_pci_iommu_init, &info); 962 963 if (!err && iort_pci_rc_supports_ats(node)) 964 dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; 965 } else { 966 int i = 0; 967 968 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 969 iort_match_node_callback, dev); 970 if (!node) 971 return NULL; 972 973 do { 974 parent = iort_node_map_platform_id(node, &streamid, 975 IORT_IOMMU_TYPE, 976 i++); 977 978 if (parent) 979 err = iort_iommu_xlate(dev, parent, streamid); 980 } while (parent && !err); 981 } 982 983 /* 984 * If we have reason to believe the IOMMU driver missed the initial 985 * add_device callback for dev, replay it to get things in order. 986 */ 987 if (!err) { 988 ops = iort_fwspec_iommu_ops(dev); 989 err = iort_add_device_replay(ops, dev); 990 } 991 992 /* Ignore all other errors apart from EPROBE_DEFER */ 993 if (err == -EPROBE_DEFER) { 994 ops = ERR_PTR(err); 995 } else if (err) { 996 dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); 997 ops = NULL; 998 } 999 1000 return ops; 1001 } 1002 #else 1003 static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) 1004 { return NULL; } 1005 static inline int iort_add_device_replay(const struct iommu_ops *ops, 1006 struct device *dev) 1007 { return 0; } 1008 int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) 1009 { return 0; } 1010 const struct iommu_ops *iort_iommu_configure(struct device *dev) 1011 { return NULL; } 1012 #endif 1013 1014 static int nc_dma_get_range(struct device *dev, u64 *size) 1015 { 1016 struct acpi_iort_node *node; 1017 struct acpi_iort_named_component *ncomp; 1018 1019 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 1020 iort_match_node_callback, dev); 1021 if (!node) 1022 return -ENODEV; 1023 1024 ncomp = (struct acpi_iort_named_component *)node->node_data; 1025 1026 *size = ncomp->memory_address_limit >= 64 ? U64_MAX : 1027 1ULL<<ncomp->memory_address_limit; 1028 1029 return 0; 1030 } 1031 1032 static int rc_dma_get_range(struct device *dev, u64 *size) 1033 { 1034 struct acpi_iort_node *node; 1035 struct acpi_iort_root_complex *rc; 1036 struct pci_bus *pbus = to_pci_dev(dev)->bus; 1037 1038 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 1039 iort_match_node_callback, &pbus->dev); 1040 if (!node || node->revision < 1) 1041 return -ENODEV; 1042 1043 rc = (struct acpi_iort_root_complex *)node->node_data; 1044 1045 *size = rc->memory_address_limit >= 64 ? U64_MAX : 1046 1ULL<<rc->memory_address_limit; 1047 1048 return 0; 1049 } 1050 1051 /** 1052 * iort_dma_setup() - Set-up device DMA parameters. 1053 * 1054 * @dev: device to configure 1055 * @dma_addr: device DMA address result pointer 1056 * @size: DMA range size result pointer 1057 */ 1058 void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) 1059 { 1060 u64 end, mask, dmaaddr = 0, size = 0, offset = 0; 1061 int ret; 1062 1063 /* 1064 * If @dev is expected to be DMA-capable then the bus code that created 1065 * it should have initialised its dma_mask pointer by this point. For 1066 * now, we'll continue the legacy behaviour of coercing it to the 1067 * coherent mask if not, but we'll no longer do so quietly. 1068 */ 1069 if (!dev->dma_mask) { 1070 dev_warn(dev, "DMA mask not set\n"); 1071 dev->dma_mask = &dev->coherent_dma_mask; 1072 } 1073 1074 if (dev->coherent_dma_mask) 1075 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); 1076 else 1077 size = 1ULL << 32; 1078 1079 if (dev_is_pci(dev)) { 1080 ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); 1081 if (ret == -ENODEV) 1082 ret = rc_dma_get_range(dev, &size); 1083 } else { 1084 ret = nc_dma_get_range(dev, &size); 1085 } 1086 1087 if (!ret) { 1088 /* 1089 * Limit coherent and dma mask based on size retrieved from 1090 * firmware. 1091 */ 1092 end = dmaaddr + size - 1; 1093 mask = DMA_BIT_MASK(ilog2(end) + 1); 1094 dev->bus_dma_limit = end; 1095 dev->coherent_dma_mask = mask; 1096 *dev->dma_mask = mask; 1097 } 1098 1099 *dma_addr = dmaaddr; 1100 *dma_size = size; 1101 1102 dev->dma_pfn_offset = PFN_DOWN(offset); 1103 dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset); 1104 } 1105 1106 static void __init acpi_iort_register_irq(int hwirq, const char *name, 1107 int trigger, 1108 struct resource *res) 1109 { 1110 int irq = acpi_register_gsi(NULL, hwirq, trigger, 1111 ACPI_ACTIVE_HIGH); 1112 1113 if (irq <= 0) { 1114 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq, 1115 name); 1116 return; 1117 } 1118 1119 res->start = irq; 1120 res->end = irq; 1121 res->flags = IORESOURCE_IRQ; 1122 res->name = name; 1123 } 1124 1125 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) 1126 { 1127 struct acpi_iort_smmu_v3 *smmu; 1128 /* Always present mem resource */ 1129 int num_res = 1; 1130 1131 /* Retrieve SMMUv3 specific data */ 1132 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1133 1134 if (smmu->event_gsiv) 1135 num_res++; 1136 1137 if (smmu->pri_gsiv) 1138 num_res++; 1139 1140 if (smmu->gerr_gsiv) 1141 num_res++; 1142 1143 if (smmu->sync_gsiv) 1144 num_res++; 1145 1146 return num_res; 1147 } 1148 1149 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu) 1150 { 1151 /* 1152 * Cavium ThunderX2 implementation doesn't not support unique 1153 * irq line. Use single irq line for all the SMMUv3 interrupts. 1154 */ 1155 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 1156 return false; 1157 1158 /* 1159 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking 1160 * SPI numbers here. 1161 */ 1162 return smmu->event_gsiv == smmu->pri_gsiv && 1163 smmu->event_gsiv == smmu->gerr_gsiv && 1164 smmu->event_gsiv == smmu->sync_gsiv; 1165 } 1166 1167 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu) 1168 { 1169 /* 1170 * Override the size, for Cavium ThunderX2 implementation 1171 * which doesn't support the page 1 SMMU register space. 1172 */ 1173 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 1174 return SZ_64K; 1175 1176 return SZ_128K; 1177 } 1178 1179 static void __init arm_smmu_v3_init_resources(struct resource *res, 1180 struct acpi_iort_node *node) 1181 { 1182 struct acpi_iort_smmu_v3 *smmu; 1183 int num_res = 0; 1184 1185 /* Retrieve SMMUv3 specific data */ 1186 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1187 1188 res[num_res].start = smmu->base_address; 1189 res[num_res].end = smmu->base_address + 1190 arm_smmu_v3_resource_size(smmu) - 1; 1191 res[num_res].flags = IORESOURCE_MEM; 1192 1193 num_res++; 1194 if (arm_smmu_v3_is_combined_irq(smmu)) { 1195 if (smmu->event_gsiv) 1196 acpi_iort_register_irq(smmu->event_gsiv, "combined", 1197 ACPI_EDGE_SENSITIVE, 1198 &res[num_res++]); 1199 } else { 1200 1201 if (smmu->event_gsiv) 1202 acpi_iort_register_irq(smmu->event_gsiv, "eventq", 1203 ACPI_EDGE_SENSITIVE, 1204 &res[num_res++]); 1205 1206 if (smmu->pri_gsiv) 1207 acpi_iort_register_irq(smmu->pri_gsiv, "priq", 1208 ACPI_EDGE_SENSITIVE, 1209 &res[num_res++]); 1210 1211 if (smmu->gerr_gsiv) 1212 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", 1213 ACPI_EDGE_SENSITIVE, 1214 &res[num_res++]); 1215 1216 if (smmu->sync_gsiv) 1217 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", 1218 ACPI_EDGE_SENSITIVE, 1219 &res[num_res++]); 1220 } 1221 } 1222 1223 static void __init arm_smmu_v3_dma_configure(struct device *dev, 1224 struct acpi_iort_node *node) 1225 { 1226 struct acpi_iort_smmu_v3 *smmu; 1227 enum dev_dma_attr attr; 1228 1229 /* Retrieve SMMUv3 specific data */ 1230 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1231 1232 attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ? 1233 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; 1234 1235 /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */ 1236 dev->dma_mask = &dev->coherent_dma_mask; 1237 1238 /* Configure DMA for the page table walker */ 1239 acpi_dma_configure(dev, attr); 1240 } 1241 1242 #if defined(CONFIG_ACPI_NUMA) 1243 /* 1244 * set numa proximity domain for smmuv3 device 1245 */ 1246 static int __init arm_smmu_v3_set_proximity(struct device *dev, 1247 struct acpi_iort_node *node) 1248 { 1249 struct acpi_iort_smmu_v3 *smmu; 1250 1251 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1252 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { 1253 int dev_node = acpi_map_pxm_to_node(smmu->pxm); 1254 1255 if (dev_node != NUMA_NO_NODE && !node_online(dev_node)) 1256 return -EINVAL; 1257 1258 set_dev_node(dev, dev_node); 1259 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", 1260 smmu->base_address, 1261 smmu->pxm); 1262 } 1263 return 0; 1264 } 1265 #else 1266 #define arm_smmu_v3_set_proximity NULL 1267 #endif 1268 1269 static int __init arm_smmu_count_resources(struct acpi_iort_node *node) 1270 { 1271 struct acpi_iort_smmu *smmu; 1272 1273 /* Retrieve SMMU specific data */ 1274 smmu = (struct acpi_iort_smmu *)node->node_data; 1275 1276 /* 1277 * Only consider the global fault interrupt and ignore the 1278 * configuration access interrupt. 1279 * 1280 * MMIO address and global fault interrupt resources are always 1281 * present so add them to the context interrupt count as a static 1282 * value. 1283 */ 1284 return smmu->context_interrupt_count + 2; 1285 } 1286 1287 static void __init arm_smmu_init_resources(struct resource *res, 1288 struct acpi_iort_node *node) 1289 { 1290 struct acpi_iort_smmu *smmu; 1291 int i, hw_irq, trigger, num_res = 0; 1292 u64 *ctx_irq, *glb_irq; 1293 1294 /* Retrieve SMMU specific data */ 1295 smmu = (struct acpi_iort_smmu *)node->node_data; 1296 1297 res[num_res].start = smmu->base_address; 1298 res[num_res].end = smmu->base_address + smmu->span - 1; 1299 res[num_res].flags = IORESOURCE_MEM; 1300 num_res++; 1301 1302 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); 1303 /* Global IRQs */ 1304 hw_irq = IORT_IRQ_MASK(glb_irq[0]); 1305 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]); 1306 1307 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, 1308 &res[num_res++]); 1309 1310 /* Context IRQs */ 1311 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); 1312 for (i = 0; i < smmu->context_interrupt_count; i++) { 1313 hw_irq = IORT_IRQ_MASK(ctx_irq[i]); 1314 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]); 1315 1316 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, 1317 &res[num_res++]); 1318 } 1319 } 1320 1321 static void __init arm_smmu_dma_configure(struct device *dev, 1322 struct acpi_iort_node *node) 1323 { 1324 struct acpi_iort_smmu *smmu; 1325 enum dev_dma_attr attr; 1326 1327 /* Retrieve SMMU specific data */ 1328 smmu = (struct acpi_iort_smmu *)node->node_data; 1329 1330 attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ? 1331 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; 1332 1333 /* We expect the dma masks to be equivalent for SMMU set-ups */ 1334 dev->dma_mask = &dev->coherent_dma_mask; 1335 1336 /* Configure DMA for the page table walker */ 1337 acpi_dma_configure(dev, attr); 1338 } 1339 1340 static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node) 1341 { 1342 struct acpi_iort_pmcg *pmcg; 1343 1344 /* Retrieve PMCG specific data */ 1345 pmcg = (struct acpi_iort_pmcg *)node->node_data; 1346 1347 /* 1348 * There are always 2 memory resources. 1349 * If the overflow_gsiv is present then add that for a total of 3. 1350 */ 1351 return pmcg->overflow_gsiv ? 3 : 2; 1352 } 1353 1354 static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, 1355 struct acpi_iort_node *node) 1356 { 1357 struct acpi_iort_pmcg *pmcg; 1358 1359 /* Retrieve PMCG specific data */ 1360 pmcg = (struct acpi_iort_pmcg *)node->node_data; 1361 1362 res[0].start = pmcg->page0_base_address; 1363 res[0].end = pmcg->page0_base_address + SZ_4K - 1; 1364 res[0].flags = IORESOURCE_MEM; 1365 res[1].start = pmcg->page1_base_address; 1366 res[1].end = pmcg->page1_base_address + SZ_4K - 1; 1367 res[1].flags = IORESOURCE_MEM; 1368 1369 if (pmcg->overflow_gsiv) 1370 acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", 1371 ACPI_EDGE_SENSITIVE, &res[2]); 1372 } 1373 1374 static struct acpi_platform_list pmcg_plat_info[] __initdata = { 1375 /* HiSilicon Hip08 Platform */ 1376 {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal, 1377 "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08}, 1378 { } 1379 }; 1380 1381 static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev) 1382 { 1383 u32 model; 1384 int idx; 1385 1386 idx = acpi_match_platform_list(pmcg_plat_info); 1387 if (idx >= 0) 1388 model = pmcg_plat_info[idx].data; 1389 else 1390 model = IORT_SMMU_V3_PMCG_GENERIC; 1391 1392 return platform_device_add_data(pdev, &model, sizeof(model)); 1393 } 1394 1395 struct iort_dev_config { 1396 const char *name; 1397 int (*dev_init)(struct acpi_iort_node *node); 1398 void (*dev_dma_configure)(struct device *dev, 1399 struct acpi_iort_node *node); 1400 int (*dev_count_resources)(struct acpi_iort_node *node); 1401 void (*dev_init_resources)(struct resource *res, 1402 struct acpi_iort_node *node); 1403 int (*dev_set_proximity)(struct device *dev, 1404 struct acpi_iort_node *node); 1405 int (*dev_add_platdata)(struct platform_device *pdev); 1406 }; 1407 1408 static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { 1409 .name = "arm-smmu-v3", 1410 .dev_dma_configure = arm_smmu_v3_dma_configure, 1411 .dev_count_resources = arm_smmu_v3_count_resources, 1412 .dev_init_resources = arm_smmu_v3_init_resources, 1413 .dev_set_proximity = arm_smmu_v3_set_proximity, 1414 }; 1415 1416 static const struct iort_dev_config iort_arm_smmu_cfg __initconst = { 1417 .name = "arm-smmu", 1418 .dev_dma_configure = arm_smmu_dma_configure, 1419 .dev_count_resources = arm_smmu_count_resources, 1420 .dev_init_resources = arm_smmu_init_resources, 1421 }; 1422 1423 static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = { 1424 .name = "arm-smmu-v3-pmcg", 1425 .dev_count_resources = arm_smmu_v3_pmcg_count_resources, 1426 .dev_init_resources = arm_smmu_v3_pmcg_init_resources, 1427 .dev_add_platdata = arm_smmu_v3_pmcg_add_platdata, 1428 }; 1429 1430 static __init const struct iort_dev_config *iort_get_dev_cfg( 1431 struct acpi_iort_node *node) 1432 { 1433 switch (node->type) { 1434 case ACPI_IORT_NODE_SMMU_V3: 1435 return &iort_arm_smmu_v3_cfg; 1436 case ACPI_IORT_NODE_SMMU: 1437 return &iort_arm_smmu_cfg; 1438 case ACPI_IORT_NODE_PMCG: 1439 return &iort_arm_smmu_v3_pmcg_cfg; 1440 default: 1441 return NULL; 1442 } 1443 } 1444 1445 /** 1446 * iort_add_platform_device() - Allocate a platform device for IORT node 1447 * @node: Pointer to device ACPI IORT node 1448 * 1449 * Returns: 0 on success, <0 failure 1450 */ 1451 static int __init iort_add_platform_device(struct acpi_iort_node *node, 1452 const struct iort_dev_config *ops) 1453 { 1454 struct fwnode_handle *fwnode; 1455 struct platform_device *pdev; 1456 struct resource *r; 1457 int ret, count; 1458 1459 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); 1460 if (!pdev) 1461 return -ENOMEM; 1462 1463 if (ops->dev_set_proximity) { 1464 ret = ops->dev_set_proximity(&pdev->dev, node); 1465 if (ret) 1466 goto dev_put; 1467 } 1468 1469 count = ops->dev_count_resources(node); 1470 1471 r = kcalloc(count, sizeof(*r), GFP_KERNEL); 1472 if (!r) { 1473 ret = -ENOMEM; 1474 goto dev_put; 1475 } 1476 1477 ops->dev_init_resources(r, node); 1478 1479 ret = platform_device_add_resources(pdev, r, count); 1480 /* 1481 * Resources are duplicated in platform_device_add_resources, 1482 * free their allocated memory 1483 */ 1484 kfree(r); 1485 1486 if (ret) 1487 goto dev_put; 1488 1489 /* 1490 * Platform devices based on PMCG nodes uses platform_data to 1491 * pass the hardware model info to the driver. For others, add 1492 * a copy of IORT node pointer to platform_data to be used to 1493 * retrieve IORT data information. 1494 */ 1495 if (ops->dev_add_platdata) 1496 ret = ops->dev_add_platdata(pdev); 1497 else 1498 ret = platform_device_add_data(pdev, &node, sizeof(node)); 1499 1500 if (ret) 1501 goto dev_put; 1502 1503 fwnode = iort_get_fwnode(node); 1504 1505 if (!fwnode) { 1506 ret = -ENODEV; 1507 goto dev_put; 1508 } 1509 1510 pdev->dev.fwnode = fwnode; 1511 1512 if (ops->dev_dma_configure) 1513 ops->dev_dma_configure(&pdev->dev, node); 1514 1515 iort_set_device_domain(&pdev->dev, node); 1516 1517 ret = platform_device_add(pdev); 1518 if (ret) 1519 goto dma_deconfigure; 1520 1521 return 0; 1522 1523 dma_deconfigure: 1524 arch_teardown_dma_ops(&pdev->dev); 1525 dev_put: 1526 platform_device_put(pdev); 1527 1528 return ret; 1529 } 1530 1531 #ifdef CONFIG_PCI 1532 static void __init iort_enable_acs(struct acpi_iort_node *iort_node) 1533 { 1534 static bool acs_enabled __initdata; 1535 1536 if (acs_enabled) 1537 return; 1538 1539 if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 1540 struct acpi_iort_node *parent; 1541 struct acpi_iort_id_mapping *map; 1542 int i; 1543 1544 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node, 1545 iort_node->mapping_offset); 1546 1547 for (i = 0; i < iort_node->mapping_count; i++, map++) { 1548 if (!map->output_reference) 1549 continue; 1550 1551 parent = ACPI_ADD_PTR(struct acpi_iort_node, 1552 iort_table, map->output_reference); 1553 /* 1554 * If we detect a RC->SMMU mapping, make sure 1555 * we enable ACS on the system. 1556 */ 1557 if ((parent->type == ACPI_IORT_NODE_SMMU) || 1558 (parent->type == ACPI_IORT_NODE_SMMU_V3)) { 1559 pci_request_acs(); 1560 acs_enabled = true; 1561 return; 1562 } 1563 } 1564 } 1565 } 1566 #else 1567 static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { } 1568 #endif 1569 1570 static void __init iort_init_platform_devices(void) 1571 { 1572 struct acpi_iort_node *iort_node, *iort_end; 1573 struct acpi_table_iort *iort; 1574 struct fwnode_handle *fwnode; 1575 int i, ret; 1576 const struct iort_dev_config *ops; 1577 1578 /* 1579 * iort_table and iort both point to the start of IORT table, but 1580 * have different struct types 1581 */ 1582 iort = (struct acpi_table_iort *)iort_table; 1583 1584 /* Get the first IORT node */ 1585 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1586 iort->node_offset); 1587 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1588 iort_table->length); 1589 1590 for (i = 0; i < iort->node_count; i++) { 1591 if (iort_node >= iort_end) { 1592 pr_err("iort node pointer overflows, bad table\n"); 1593 return; 1594 } 1595 1596 iort_enable_acs(iort_node); 1597 1598 ops = iort_get_dev_cfg(iort_node); 1599 if (ops) { 1600 fwnode = acpi_alloc_fwnode_static(); 1601 if (!fwnode) 1602 return; 1603 1604 iort_set_fwnode(iort_node, fwnode); 1605 1606 ret = iort_add_platform_device(iort_node, ops); 1607 if (ret) { 1608 iort_delete_fwnode(iort_node); 1609 acpi_free_fwnode_static(fwnode); 1610 return; 1611 } 1612 } 1613 1614 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 1615 iort_node->length); 1616 } 1617 } 1618 1619 void __init acpi_iort_init(void) 1620 { 1621 acpi_status status; 1622 1623 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table); 1624 if (ACPI_FAILURE(status)) { 1625 if (status != AE_NOT_FOUND) { 1626 const char *msg = acpi_format_exception(status); 1627 1628 pr_err("Failed to get table, %s\n", msg); 1629 } 1630 1631 return; 1632 } 1633 1634 iort_init_platform_devices(); 1635 } 1636