1 /* 2 * Copyright (C) 2016, Semihalf 3 * Author: Tomasz Nowicki <tn@semihalf.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * This file implements early detection/parsing of I/O mapping 15 * reported to OS through firmware via I/O Remapping Table (IORT) 16 * IORT document number: ARM DEN 0049A 17 */ 18 19 #define pr_fmt(fmt) "ACPI: IORT: " fmt 20 21 #include <linux/acpi_iort.h> 22 #include <linux/iommu.h> 23 #include <linux/kernel.h> 24 #include <linux/list.h> 25 #include <linux/pci.h> 26 #include <linux/platform_device.h> 27 #include <linux/slab.h> 28 29 #define IORT_TYPE_MASK(type) (1 << (type)) 30 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) 31 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ 32 (1 << ACPI_IORT_NODE_SMMU_V3)) 33 34 struct iort_its_msi_chip { 35 struct list_head list; 36 struct fwnode_handle *fw_node; 37 phys_addr_t base_addr; 38 u32 translation_id; 39 }; 40 41 struct iort_fwnode { 42 struct list_head list; 43 struct acpi_iort_node *iort_node; 44 struct fwnode_handle *fwnode; 45 }; 46 static LIST_HEAD(iort_fwnode_list); 47 static DEFINE_SPINLOCK(iort_fwnode_lock); 48 49 /** 50 * iort_set_fwnode() - Create iort_fwnode and use it to register 51 * iommu data in the iort_fwnode_list 52 * 53 * @node: IORT table node associated with the IOMMU 54 * @fwnode: fwnode associated with the IORT node 55 * 56 * Returns: 0 on success 57 * <0 on failure 58 */ 59 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node, 60 struct fwnode_handle *fwnode) 61 { 62 struct iort_fwnode *np; 63 64 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC); 65 66 if (WARN_ON(!np)) 67 return -ENOMEM; 68 69 INIT_LIST_HEAD(&np->list); 70 np->iort_node = iort_node; 71 np->fwnode = fwnode; 72 73 spin_lock(&iort_fwnode_lock); 74 list_add_tail(&np->list, &iort_fwnode_list); 75 spin_unlock(&iort_fwnode_lock); 76 77 return 0; 78 } 79 80 /** 81 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node 82 * 83 * @node: IORT table node to be looked-up 84 * 85 * Returns: fwnode_handle pointer on success, NULL on failure 86 */ 87 static inline struct fwnode_handle *iort_get_fwnode( 88 struct acpi_iort_node *node) 89 { 90 struct iort_fwnode *curr; 91 struct fwnode_handle *fwnode = NULL; 92 93 spin_lock(&iort_fwnode_lock); 94 list_for_each_entry(curr, &iort_fwnode_list, list) { 95 if (curr->iort_node == node) { 96 fwnode = curr->fwnode; 97 break; 98 } 99 } 100 spin_unlock(&iort_fwnode_lock); 101 102 return fwnode; 103 } 104 105 /** 106 * iort_delete_fwnode() - Delete fwnode associated with an IORT node 107 * 108 * @node: IORT table node associated with fwnode to delete 109 */ 110 static inline void iort_delete_fwnode(struct acpi_iort_node *node) 111 { 112 struct iort_fwnode *curr, *tmp; 113 114 spin_lock(&iort_fwnode_lock); 115 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) { 116 if (curr->iort_node == node) { 117 list_del(&curr->list); 118 kfree(curr); 119 break; 120 } 121 } 122 spin_unlock(&iort_fwnode_lock); 123 } 124 125 /** 126 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode 127 * 128 * @fwnode: fwnode associated with device to be looked-up 129 * 130 * Returns: iort_node pointer on success, NULL on failure 131 */ 132 static inline struct acpi_iort_node *iort_get_iort_node( 133 struct fwnode_handle *fwnode) 134 { 135 struct iort_fwnode *curr; 136 struct acpi_iort_node *iort_node = NULL; 137 138 spin_lock(&iort_fwnode_lock); 139 list_for_each_entry(curr, &iort_fwnode_list, list) { 140 if (curr->fwnode == fwnode) { 141 iort_node = curr->iort_node; 142 break; 143 } 144 } 145 spin_unlock(&iort_fwnode_lock); 146 147 return iort_node; 148 } 149 150 typedef acpi_status (*iort_find_node_callback) 151 (struct acpi_iort_node *node, void *context); 152 153 /* Root pointer to the mapped IORT table */ 154 static struct acpi_table_header *iort_table; 155 156 static LIST_HEAD(iort_msi_chip_list); 157 static DEFINE_SPINLOCK(iort_msi_chip_lock); 158 159 /** 160 * iort_register_domain_token() - register domain token along with related 161 * ITS ID and base address to the list from where we can get it back later on. 162 * @trans_id: ITS ID. 163 * @base: ITS base address. 164 * @fw_node: Domain token. 165 * 166 * Returns: 0 on success, -ENOMEM if no memory when allocating list element 167 */ 168 int iort_register_domain_token(int trans_id, phys_addr_t base, 169 struct fwnode_handle *fw_node) 170 { 171 struct iort_its_msi_chip *its_msi_chip; 172 173 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL); 174 if (!its_msi_chip) 175 return -ENOMEM; 176 177 its_msi_chip->fw_node = fw_node; 178 its_msi_chip->translation_id = trans_id; 179 its_msi_chip->base_addr = base; 180 181 spin_lock(&iort_msi_chip_lock); 182 list_add(&its_msi_chip->list, &iort_msi_chip_list); 183 spin_unlock(&iort_msi_chip_lock); 184 185 return 0; 186 } 187 188 /** 189 * iort_deregister_domain_token() - Deregister domain token based on ITS ID 190 * @trans_id: ITS ID. 191 * 192 * Returns: none. 193 */ 194 void iort_deregister_domain_token(int trans_id) 195 { 196 struct iort_its_msi_chip *its_msi_chip, *t; 197 198 spin_lock(&iort_msi_chip_lock); 199 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) { 200 if (its_msi_chip->translation_id == trans_id) { 201 list_del(&its_msi_chip->list); 202 kfree(its_msi_chip); 203 break; 204 } 205 } 206 spin_unlock(&iort_msi_chip_lock); 207 } 208 209 /** 210 * iort_find_domain_token() - Find domain token based on given ITS ID 211 * @trans_id: ITS ID. 212 * 213 * Returns: domain token when find on the list, NULL otherwise 214 */ 215 struct fwnode_handle *iort_find_domain_token(int trans_id) 216 { 217 struct fwnode_handle *fw_node = NULL; 218 struct iort_its_msi_chip *its_msi_chip; 219 220 spin_lock(&iort_msi_chip_lock); 221 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 222 if (its_msi_chip->translation_id == trans_id) { 223 fw_node = its_msi_chip->fw_node; 224 break; 225 } 226 } 227 spin_unlock(&iort_msi_chip_lock); 228 229 return fw_node; 230 } 231 232 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type, 233 iort_find_node_callback callback, 234 void *context) 235 { 236 struct acpi_iort_node *iort_node, *iort_end; 237 struct acpi_table_iort *iort; 238 int i; 239 240 if (!iort_table) 241 return NULL; 242 243 /* Get the first IORT node */ 244 iort = (struct acpi_table_iort *)iort_table; 245 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 246 iort->node_offset); 247 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 248 iort_table->length); 249 250 for (i = 0; i < iort->node_count; i++) { 251 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, 252 "IORT node pointer overflows, bad table!\n")) 253 return NULL; 254 255 if (iort_node->type == type && 256 ACPI_SUCCESS(callback(iort_node, context))) 257 return iort_node; 258 259 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 260 iort_node->length); 261 } 262 263 return NULL; 264 } 265 266 static acpi_status iort_match_node_callback(struct acpi_iort_node *node, 267 void *context) 268 { 269 struct device *dev = context; 270 acpi_status status = AE_NOT_FOUND; 271 272 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { 273 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 274 struct acpi_device *adev = to_acpi_device_node(dev->fwnode); 275 struct acpi_iort_named_component *ncomp; 276 277 if (!adev) 278 goto out; 279 280 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); 281 if (ACPI_FAILURE(status)) { 282 dev_warn(dev, "Can't get device full path name\n"); 283 goto out; 284 } 285 286 ncomp = (struct acpi_iort_named_component *)node->node_data; 287 status = !strcmp(ncomp->device_name, buf.pointer) ? 288 AE_OK : AE_NOT_FOUND; 289 acpi_os_free(buf.pointer); 290 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 291 struct acpi_iort_root_complex *pci_rc; 292 struct pci_bus *bus; 293 294 bus = to_pci_bus(dev); 295 pci_rc = (struct acpi_iort_root_complex *)node->node_data; 296 297 /* 298 * It is assumed that PCI segment numbers maps one-to-one 299 * with root complexes. Each segment number can represent only 300 * one root complex. 301 */ 302 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ? 303 AE_OK : AE_NOT_FOUND; 304 } 305 out: 306 return status; 307 } 308 309 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, 310 u32 *rid_out) 311 { 312 /* Single mapping does not care for input id */ 313 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 314 if (type == ACPI_IORT_NODE_NAMED_COMPONENT || 315 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 316 *rid_out = map->output_base; 317 return 0; 318 } 319 320 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n", 321 map, type); 322 return -ENXIO; 323 } 324 325 if (rid_in < map->input_base || 326 (rid_in >= map->input_base + map->id_count)) 327 return -ENXIO; 328 329 *rid_out = map->output_base + (rid_in - map->input_base); 330 return 0; 331 } 332 333 static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, 334 u32 *id_out, int index) 335 { 336 struct acpi_iort_node *parent; 337 struct acpi_iort_id_mapping *map; 338 339 if (!node->mapping_offset || !node->mapping_count || 340 index >= node->mapping_count) 341 return NULL; 342 343 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 344 node->mapping_offset + index * sizeof(*map)); 345 346 /* Firmware bug! */ 347 if (!map->output_reference) { 348 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 349 node, node->type); 350 return NULL; 351 } 352 353 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 354 map->output_reference); 355 356 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 357 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || 358 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX || 359 node->type == ACPI_IORT_NODE_SMMU_V3) { 360 *id_out = map->output_base; 361 return parent; 362 } 363 } 364 365 return NULL; 366 } 367 368 static int iort_get_id_mapping_index(struct acpi_iort_node *node) 369 { 370 struct acpi_iort_smmu_v3 *smmu; 371 372 switch (node->type) { 373 case ACPI_IORT_NODE_SMMU_V3: 374 /* 375 * SMMUv3 dev ID mapping index was introduced in revision 1 376 * table, not available in revision 0 377 */ 378 if (node->revision < 1) 379 return -EINVAL; 380 381 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 382 /* 383 * ID mapping index is only ignored if all interrupts are 384 * GSIV based 385 */ 386 if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv 387 && smmu->sync_gsiv) 388 return -EINVAL; 389 390 if (smmu->id_mapping_index >= node->mapping_count) { 391 pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n", 392 node, node->type); 393 return -EINVAL; 394 } 395 396 return smmu->id_mapping_index; 397 default: 398 return -EINVAL; 399 } 400 } 401 402 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, 403 u32 id_in, u32 *id_out, 404 u8 type_mask) 405 { 406 u32 id = id_in; 407 408 /* Parse the ID mapping tree to find specified node type */ 409 while (node) { 410 struct acpi_iort_id_mapping *map; 411 int i, index; 412 413 if (IORT_TYPE_MASK(node->type) & type_mask) { 414 if (id_out) 415 *id_out = id; 416 return node; 417 } 418 419 if (!node->mapping_offset || !node->mapping_count) 420 goto fail_map; 421 422 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 423 node->mapping_offset); 424 425 /* Firmware bug! */ 426 if (!map->output_reference) { 427 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 428 node, node->type); 429 goto fail_map; 430 } 431 432 /* 433 * Get the special ID mapping index (if any) and skip its 434 * associated ID map to prevent erroneous multi-stage 435 * IORT ID translations. 436 */ 437 index = iort_get_id_mapping_index(node); 438 439 /* Do the ID translation */ 440 for (i = 0; i < node->mapping_count; i++, map++) { 441 /* if it is special mapping index, skip it */ 442 if (i == index) 443 continue; 444 445 if (!iort_id_map(map, node->type, id, &id)) 446 break; 447 } 448 449 if (i == node->mapping_count) 450 goto fail_map; 451 452 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 453 map->output_reference); 454 } 455 456 fail_map: 457 /* Map input ID to output ID unchanged on mapping failure */ 458 if (id_out) 459 *id_out = id_in; 460 461 return NULL; 462 } 463 464 static struct acpi_iort_node *iort_node_map_platform_id( 465 struct acpi_iort_node *node, u32 *id_out, u8 type_mask, 466 int index) 467 { 468 struct acpi_iort_node *parent; 469 u32 id; 470 471 /* step 1: retrieve the initial dev id */ 472 parent = iort_node_get_id(node, &id, index); 473 if (!parent) 474 return NULL; 475 476 /* 477 * optional step 2: map the initial dev id if its parent is not 478 * the target type we want, map it again for the use cases such 479 * as NC (named component) -> SMMU -> ITS. If the type is matched, 480 * return the initial dev id and its parent pointer directly. 481 */ 482 if (!(IORT_TYPE_MASK(parent->type) & type_mask)) 483 parent = iort_node_map_id(parent, id, id_out, type_mask); 484 else 485 if (id_out) 486 *id_out = id; 487 488 return parent; 489 } 490 491 static struct acpi_iort_node *iort_find_dev_node(struct device *dev) 492 { 493 struct pci_bus *pbus; 494 495 if (!dev_is_pci(dev)) { 496 struct acpi_iort_node *node; 497 /* 498 * scan iort_fwnode_list to see if it's an iort platform 499 * device (such as SMMU, PMCG),its iort node already cached 500 * and associated with fwnode when iort platform devices 501 * were initialized. 502 */ 503 node = iort_get_iort_node(dev->fwnode); 504 if (node) 505 return node; 506 507 /* 508 * if not, then it should be a platform device defined in 509 * DSDT/SSDT (with Named Component node in IORT) 510 */ 511 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 512 iort_match_node_callback, dev); 513 } 514 515 /* Find a PCI root bus */ 516 pbus = to_pci_dev(dev)->bus; 517 while (!pci_is_root_bus(pbus)) 518 pbus = pbus->parent; 519 520 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 521 iort_match_node_callback, &pbus->dev); 522 } 523 524 /** 525 * iort_msi_map_rid() - Map a MSI requester ID for a device 526 * @dev: The device for which the mapping is to be done. 527 * @req_id: The device requester ID. 528 * 529 * Returns: mapped MSI RID on success, input requester ID otherwise 530 */ 531 u32 iort_msi_map_rid(struct device *dev, u32 req_id) 532 { 533 struct acpi_iort_node *node; 534 u32 dev_id; 535 536 node = iort_find_dev_node(dev); 537 if (!node) 538 return req_id; 539 540 iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); 541 return dev_id; 542 } 543 544 /** 545 * iort_pmsi_get_dev_id() - Get the device id for a device 546 * @dev: The device for which the mapping is to be done. 547 * @dev_id: The device ID found. 548 * 549 * Returns: 0 for successful find a dev id, -ENODEV on error 550 */ 551 int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) 552 { 553 int i, index; 554 struct acpi_iort_node *node; 555 556 node = iort_find_dev_node(dev); 557 if (!node) 558 return -ENODEV; 559 560 index = iort_get_id_mapping_index(node); 561 /* if there is a valid index, go get the dev_id directly */ 562 if (index >= 0) { 563 if (iort_node_get_id(node, dev_id, index)) 564 return 0; 565 } else { 566 for (i = 0; i < node->mapping_count; i++) { 567 if (iort_node_map_platform_id(node, dev_id, 568 IORT_MSI_TYPE, i)) 569 return 0; 570 } 571 } 572 573 return -ENODEV; 574 } 575 576 static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base) 577 { 578 struct iort_its_msi_chip *its_msi_chip; 579 int ret = -ENODEV; 580 581 spin_lock(&iort_msi_chip_lock); 582 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 583 if (its_msi_chip->translation_id == its_id) { 584 *base = its_msi_chip->base_addr; 585 ret = 0; 586 break; 587 } 588 } 589 spin_unlock(&iort_msi_chip_lock); 590 591 return ret; 592 } 593 594 /** 595 * iort_dev_find_its_id() - Find the ITS identifier for a device 596 * @dev: The device. 597 * @req_id: Device's requester ID 598 * @idx: Index of the ITS identifier list. 599 * @its_id: ITS identifier. 600 * 601 * Returns: 0 on success, appropriate error value otherwise 602 */ 603 static int iort_dev_find_its_id(struct device *dev, u32 req_id, 604 unsigned int idx, int *its_id) 605 { 606 struct acpi_iort_its_group *its; 607 struct acpi_iort_node *node; 608 609 node = iort_find_dev_node(dev); 610 if (!node) 611 return -ENXIO; 612 613 node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); 614 if (!node) 615 return -ENXIO; 616 617 /* Move to ITS specific data */ 618 its = (struct acpi_iort_its_group *)node->node_data; 619 if (idx > its->its_count) { 620 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", 621 idx, its->its_count); 622 return -ENXIO; 623 } 624 625 *its_id = its->identifiers[idx]; 626 return 0; 627 } 628 629 /** 630 * iort_get_device_domain() - Find MSI domain related to a device 631 * @dev: The device. 632 * @req_id: Requester ID for the device. 633 * 634 * Returns: the MSI domain for this device, NULL otherwise 635 */ 636 struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) 637 { 638 struct fwnode_handle *handle; 639 int its_id; 640 641 if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) 642 return NULL; 643 644 handle = iort_find_domain_token(its_id); 645 if (!handle) 646 return NULL; 647 648 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); 649 } 650 651 static void iort_set_device_domain(struct device *dev, 652 struct acpi_iort_node *node) 653 { 654 struct acpi_iort_its_group *its; 655 struct acpi_iort_node *msi_parent; 656 struct acpi_iort_id_mapping *map; 657 struct fwnode_handle *iort_fwnode; 658 struct irq_domain *domain; 659 int index; 660 661 index = iort_get_id_mapping_index(node); 662 if (index < 0) 663 return; 664 665 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 666 node->mapping_offset + index * sizeof(*map)); 667 668 /* Firmware bug! */ 669 if (!map->output_reference || 670 !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) { 671 pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n", 672 node, node->type); 673 return; 674 } 675 676 msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 677 map->output_reference); 678 679 if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP) 680 return; 681 682 /* Move to ITS specific data */ 683 its = (struct acpi_iort_its_group *)msi_parent->node_data; 684 685 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 686 if (!iort_fwnode) 687 return; 688 689 domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 690 if (domain) 691 dev_set_msi_domain(dev, domain); 692 } 693 694 /** 695 * iort_get_platform_device_domain() - Find MSI domain related to a 696 * platform device 697 * @dev: the dev pointer associated with the platform device 698 * 699 * Returns: the MSI domain for this device, NULL otherwise 700 */ 701 static struct irq_domain *iort_get_platform_device_domain(struct device *dev) 702 { 703 struct acpi_iort_node *node, *msi_parent = NULL; 704 struct fwnode_handle *iort_fwnode; 705 struct acpi_iort_its_group *its; 706 int i; 707 708 /* find its associated iort node */ 709 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 710 iort_match_node_callback, dev); 711 if (!node) 712 return NULL; 713 714 /* then find its msi parent node */ 715 for (i = 0; i < node->mapping_count; i++) { 716 msi_parent = iort_node_map_platform_id(node, NULL, 717 IORT_MSI_TYPE, i); 718 if (msi_parent) 719 break; 720 } 721 722 if (!msi_parent) 723 return NULL; 724 725 /* Move to ITS specific data */ 726 its = (struct acpi_iort_its_group *)msi_parent->node_data; 727 728 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 729 if (!iort_fwnode) 730 return NULL; 731 732 return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 733 } 734 735 void acpi_configure_pmsi_domain(struct device *dev) 736 { 737 struct irq_domain *msi_domain; 738 739 msi_domain = iort_get_platform_device_domain(dev); 740 if (msi_domain) 741 dev_set_msi_domain(dev, msi_domain); 742 } 743 744 static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias, 745 void *data) 746 { 747 u32 *rid = data; 748 749 *rid = alias; 750 return 0; 751 } 752 753 static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, 754 struct fwnode_handle *fwnode, 755 const struct iommu_ops *ops) 756 { 757 int ret = iommu_fwspec_init(dev, fwnode, ops); 758 759 if (!ret) 760 ret = iommu_fwspec_add_ids(dev, &streamid, 1); 761 762 return ret; 763 } 764 765 static inline bool iort_iommu_driver_enabled(u8 type) 766 { 767 switch (type) { 768 case ACPI_IORT_NODE_SMMU_V3: 769 return IS_BUILTIN(CONFIG_ARM_SMMU_V3); 770 case ACPI_IORT_NODE_SMMU: 771 return IS_BUILTIN(CONFIG_ARM_SMMU); 772 default: 773 pr_warn("IORT node type %u does not describe an SMMU\n", type); 774 return false; 775 } 776 } 777 778 #ifdef CONFIG_IOMMU_API 779 static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev) 780 { 781 struct acpi_iort_node *iommu; 782 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 783 784 iommu = iort_get_iort_node(fwspec->iommu_fwnode); 785 786 if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) { 787 struct acpi_iort_smmu_v3 *smmu; 788 789 smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data; 790 if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X) 791 return iommu; 792 } 793 794 return NULL; 795 } 796 797 static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) 798 { 799 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 800 801 return (fwspec && fwspec->ops) ? fwspec->ops : NULL; 802 } 803 804 static inline int iort_add_device_replay(const struct iommu_ops *ops, 805 struct device *dev) 806 { 807 int err = 0; 808 809 if (dev->bus && !device_iommu_mapped(dev)) 810 err = iommu_probe_device(dev); 811 812 return err; 813 } 814 815 /** 816 * iort_iommu_msi_get_resv_regions - Reserved region driver helper 817 * @dev: Device from iommu_get_resv_regions() 818 * @head: Reserved region list from iommu_get_resv_regions() 819 * 820 * Returns: Number of msi reserved regions on success (0 if platform 821 * doesn't require the reservation or no associated msi regions), 822 * appropriate error value otherwise. The ITS interrupt translation 823 * spaces (ITS_base + SZ_64K, SZ_64K) associated with the device 824 * are the msi reserved regions. 825 */ 826 int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) 827 { 828 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 829 struct acpi_iort_its_group *its; 830 struct acpi_iort_node *iommu_node, *its_node = NULL; 831 int i, resv = 0; 832 833 iommu_node = iort_get_msi_resv_iommu(dev); 834 if (!iommu_node) 835 return 0; 836 837 /* 838 * Current logic to reserve ITS regions relies on HW topologies 839 * where a given PCI or named component maps its IDs to only one 840 * ITS group; if a PCI or named component can map its IDs to 841 * different ITS groups through IORT mappings this function has 842 * to be reworked to ensure we reserve regions for all ITS groups 843 * a given PCI or named component may map IDs to. 844 */ 845 846 for (i = 0; i < fwspec->num_ids; i++) { 847 its_node = iort_node_map_id(iommu_node, 848 fwspec->ids[i], 849 NULL, IORT_MSI_TYPE); 850 if (its_node) 851 break; 852 } 853 854 if (!its_node) 855 return 0; 856 857 /* Move to ITS specific data */ 858 its = (struct acpi_iort_its_group *)its_node->node_data; 859 860 for (i = 0; i < its->its_count; i++) { 861 phys_addr_t base; 862 863 if (!iort_find_its_base(its->identifiers[i], &base)) { 864 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 865 struct iommu_resv_region *region; 866 867 region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K, 868 prot, IOMMU_RESV_MSI); 869 if (region) { 870 list_add_tail(®ion->list, head); 871 resv++; 872 } 873 } 874 } 875 876 return (resv == its->its_count) ? resv : -ENODEV; 877 } 878 #else 879 static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev); 880 { return NULL; } 881 static inline int iort_add_device_replay(const struct iommu_ops *ops, 882 struct device *dev) 883 { return 0; } 884 int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) 885 { return 0; } 886 #endif 887 888 static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, 889 u32 streamid) 890 { 891 const struct iommu_ops *ops; 892 struct fwnode_handle *iort_fwnode; 893 894 if (!node) 895 return -ENODEV; 896 897 iort_fwnode = iort_get_fwnode(node); 898 if (!iort_fwnode) 899 return -ENODEV; 900 901 /* 902 * If the ops look-up fails, this means that either 903 * the SMMU drivers have not been probed yet or that 904 * the SMMU drivers are not built in the kernel; 905 * Depending on whether the SMMU drivers are built-in 906 * in the kernel or not, defer the IOMMU configuration 907 * or just abort it. 908 */ 909 ops = iommu_ops_from_fwnode(iort_fwnode); 910 if (!ops) 911 return iort_iommu_driver_enabled(node->type) ? 912 -EPROBE_DEFER : -ENODEV; 913 914 return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); 915 } 916 917 struct iort_pci_alias_info { 918 struct device *dev; 919 struct acpi_iort_node *node; 920 }; 921 922 static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) 923 { 924 struct iort_pci_alias_info *info = data; 925 struct acpi_iort_node *parent; 926 u32 streamid; 927 928 parent = iort_node_map_id(info->node, alias, &streamid, 929 IORT_IOMMU_TYPE); 930 return iort_iommu_xlate(info->dev, parent, streamid); 931 } 932 933 static int nc_dma_get_range(struct device *dev, u64 *size) 934 { 935 struct acpi_iort_node *node; 936 struct acpi_iort_named_component *ncomp; 937 938 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 939 iort_match_node_callback, dev); 940 if (!node) 941 return -ENODEV; 942 943 ncomp = (struct acpi_iort_named_component *)node->node_data; 944 945 *size = ncomp->memory_address_limit >= 64 ? U64_MAX : 946 1ULL<<ncomp->memory_address_limit; 947 948 return 0; 949 } 950 951 static int rc_dma_get_range(struct device *dev, u64 *size) 952 { 953 struct acpi_iort_node *node; 954 struct acpi_iort_root_complex *rc; 955 956 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 957 iort_match_node_callback, dev); 958 if (!node || node->revision < 1) 959 return -ENODEV; 960 961 rc = (struct acpi_iort_root_complex *)node->node_data; 962 963 *size = rc->memory_address_limit >= 64 ? U64_MAX : 964 1ULL<<rc->memory_address_limit; 965 966 return 0; 967 } 968 969 /** 970 * iort_dma_setup() - Set-up device DMA parameters. 971 * 972 * @dev: device to configure 973 * @dma_addr: device DMA address result pointer 974 * @size: DMA range size result pointer 975 */ 976 void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) 977 { 978 u64 mask, dmaaddr = 0, size = 0, offset = 0; 979 int ret, msb; 980 981 /* 982 * If @dev is expected to be DMA-capable then the bus code that created 983 * it should have initialised its dma_mask pointer by this point. For 984 * now, we'll continue the legacy behaviour of coercing it to the 985 * coherent mask if not, but we'll no longer do so quietly. 986 */ 987 if (!dev->dma_mask) { 988 dev_warn(dev, "DMA mask not set\n"); 989 dev->dma_mask = &dev->coherent_dma_mask; 990 } 991 992 if (dev->coherent_dma_mask) 993 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); 994 else 995 size = 1ULL << 32; 996 997 if (dev_is_pci(dev)) { 998 ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); 999 if (ret == -ENODEV) 1000 ret = rc_dma_get_range(dev, &size); 1001 } else { 1002 ret = nc_dma_get_range(dev, &size); 1003 } 1004 1005 if (!ret) { 1006 msb = fls64(dmaaddr + size - 1); 1007 /* 1008 * Round-up to the power-of-two mask or set 1009 * the mask to the whole 64-bit address space 1010 * in case the DMA region covers the full 1011 * memory window. 1012 */ 1013 mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1; 1014 /* 1015 * Limit coherent and dma mask based on size 1016 * retrieved from firmware. 1017 */ 1018 dev->bus_dma_mask = mask; 1019 dev->coherent_dma_mask = mask; 1020 *dev->dma_mask = mask; 1021 } 1022 1023 *dma_addr = dmaaddr; 1024 *dma_size = size; 1025 1026 dev->dma_pfn_offset = PFN_DOWN(offset); 1027 dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset); 1028 } 1029 1030 /** 1031 * iort_iommu_configure - Set-up IOMMU configuration for a device. 1032 * 1033 * @dev: device to configure 1034 * 1035 * Returns: iommu_ops pointer on configuration success 1036 * NULL on configuration failure 1037 */ 1038 const struct iommu_ops *iort_iommu_configure(struct device *dev) 1039 { 1040 struct acpi_iort_node *node, *parent; 1041 const struct iommu_ops *ops; 1042 u32 streamid = 0; 1043 int err = -ENODEV; 1044 1045 /* 1046 * If we already translated the fwspec there 1047 * is nothing left to do, return the iommu_ops. 1048 */ 1049 ops = iort_fwspec_iommu_ops(dev); 1050 if (ops) 1051 return ops; 1052 1053 if (dev_is_pci(dev)) { 1054 struct pci_bus *bus = to_pci_dev(dev)->bus; 1055 struct iort_pci_alias_info info = { .dev = dev }; 1056 1057 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 1058 iort_match_node_callback, &bus->dev); 1059 if (!node) 1060 return NULL; 1061 1062 info.node = node; 1063 err = pci_for_each_dma_alias(to_pci_dev(dev), 1064 iort_pci_iommu_init, &info); 1065 } else { 1066 int i = 0; 1067 1068 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 1069 iort_match_node_callback, dev); 1070 if (!node) 1071 return NULL; 1072 1073 do { 1074 parent = iort_node_map_platform_id(node, &streamid, 1075 IORT_IOMMU_TYPE, 1076 i++); 1077 1078 if (parent) 1079 err = iort_iommu_xlate(dev, parent, streamid); 1080 } while (parent && !err); 1081 } 1082 1083 /* 1084 * If we have reason to believe the IOMMU driver missed the initial 1085 * add_device callback for dev, replay it to get things in order. 1086 */ 1087 if (!err) { 1088 ops = iort_fwspec_iommu_ops(dev); 1089 err = iort_add_device_replay(ops, dev); 1090 } 1091 1092 /* Ignore all other errors apart from EPROBE_DEFER */ 1093 if (err == -EPROBE_DEFER) { 1094 ops = ERR_PTR(err); 1095 } else if (err) { 1096 dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); 1097 ops = NULL; 1098 } 1099 1100 return ops; 1101 } 1102 1103 static void __init acpi_iort_register_irq(int hwirq, const char *name, 1104 int trigger, 1105 struct resource *res) 1106 { 1107 int irq = acpi_register_gsi(NULL, hwirq, trigger, 1108 ACPI_ACTIVE_HIGH); 1109 1110 if (irq <= 0) { 1111 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq, 1112 name); 1113 return; 1114 } 1115 1116 res->start = irq; 1117 res->end = irq; 1118 res->flags = IORESOURCE_IRQ; 1119 res->name = name; 1120 } 1121 1122 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) 1123 { 1124 struct acpi_iort_smmu_v3 *smmu; 1125 /* Always present mem resource */ 1126 int num_res = 1; 1127 1128 /* Retrieve SMMUv3 specific data */ 1129 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1130 1131 if (smmu->event_gsiv) 1132 num_res++; 1133 1134 if (smmu->pri_gsiv) 1135 num_res++; 1136 1137 if (smmu->gerr_gsiv) 1138 num_res++; 1139 1140 if (smmu->sync_gsiv) 1141 num_res++; 1142 1143 return num_res; 1144 } 1145 1146 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu) 1147 { 1148 /* 1149 * Cavium ThunderX2 implementation doesn't not support unique 1150 * irq line. Use single irq line for all the SMMUv3 interrupts. 1151 */ 1152 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 1153 return false; 1154 1155 /* 1156 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking 1157 * SPI numbers here. 1158 */ 1159 return smmu->event_gsiv == smmu->pri_gsiv && 1160 smmu->event_gsiv == smmu->gerr_gsiv && 1161 smmu->event_gsiv == smmu->sync_gsiv; 1162 } 1163 1164 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu) 1165 { 1166 /* 1167 * Override the size, for Cavium ThunderX2 implementation 1168 * which doesn't support the page 1 SMMU register space. 1169 */ 1170 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 1171 return SZ_64K; 1172 1173 return SZ_128K; 1174 } 1175 1176 static void __init arm_smmu_v3_init_resources(struct resource *res, 1177 struct acpi_iort_node *node) 1178 { 1179 struct acpi_iort_smmu_v3 *smmu; 1180 int num_res = 0; 1181 1182 /* Retrieve SMMUv3 specific data */ 1183 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1184 1185 res[num_res].start = smmu->base_address; 1186 res[num_res].end = smmu->base_address + 1187 arm_smmu_v3_resource_size(smmu) - 1; 1188 res[num_res].flags = IORESOURCE_MEM; 1189 1190 num_res++; 1191 if (arm_smmu_v3_is_combined_irq(smmu)) { 1192 if (smmu->event_gsiv) 1193 acpi_iort_register_irq(smmu->event_gsiv, "combined", 1194 ACPI_EDGE_SENSITIVE, 1195 &res[num_res++]); 1196 } else { 1197 1198 if (smmu->event_gsiv) 1199 acpi_iort_register_irq(smmu->event_gsiv, "eventq", 1200 ACPI_EDGE_SENSITIVE, 1201 &res[num_res++]); 1202 1203 if (smmu->pri_gsiv) 1204 acpi_iort_register_irq(smmu->pri_gsiv, "priq", 1205 ACPI_EDGE_SENSITIVE, 1206 &res[num_res++]); 1207 1208 if (smmu->gerr_gsiv) 1209 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", 1210 ACPI_EDGE_SENSITIVE, 1211 &res[num_res++]); 1212 1213 if (smmu->sync_gsiv) 1214 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", 1215 ACPI_EDGE_SENSITIVE, 1216 &res[num_res++]); 1217 } 1218 } 1219 1220 static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) 1221 { 1222 struct acpi_iort_smmu_v3 *smmu; 1223 1224 /* Retrieve SMMUv3 specific data */ 1225 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1226 1227 return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE; 1228 } 1229 1230 #if defined(CONFIG_ACPI_NUMA) 1231 /* 1232 * set numa proximity domain for smmuv3 device 1233 */ 1234 static void __init arm_smmu_v3_set_proximity(struct device *dev, 1235 struct acpi_iort_node *node) 1236 { 1237 struct acpi_iort_smmu_v3 *smmu; 1238 1239 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1240 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { 1241 set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm)); 1242 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", 1243 smmu->base_address, 1244 smmu->pxm); 1245 } 1246 } 1247 #else 1248 #define arm_smmu_v3_set_proximity NULL 1249 #endif 1250 1251 static int __init arm_smmu_count_resources(struct acpi_iort_node *node) 1252 { 1253 struct acpi_iort_smmu *smmu; 1254 1255 /* Retrieve SMMU specific data */ 1256 smmu = (struct acpi_iort_smmu *)node->node_data; 1257 1258 /* 1259 * Only consider the global fault interrupt and ignore the 1260 * configuration access interrupt. 1261 * 1262 * MMIO address and global fault interrupt resources are always 1263 * present so add them to the context interrupt count as a static 1264 * value. 1265 */ 1266 return smmu->context_interrupt_count + 2; 1267 } 1268 1269 static void __init arm_smmu_init_resources(struct resource *res, 1270 struct acpi_iort_node *node) 1271 { 1272 struct acpi_iort_smmu *smmu; 1273 int i, hw_irq, trigger, num_res = 0; 1274 u64 *ctx_irq, *glb_irq; 1275 1276 /* Retrieve SMMU specific data */ 1277 smmu = (struct acpi_iort_smmu *)node->node_data; 1278 1279 res[num_res].start = smmu->base_address; 1280 res[num_res].end = smmu->base_address + smmu->span - 1; 1281 res[num_res].flags = IORESOURCE_MEM; 1282 num_res++; 1283 1284 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); 1285 /* Global IRQs */ 1286 hw_irq = IORT_IRQ_MASK(glb_irq[0]); 1287 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]); 1288 1289 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, 1290 &res[num_res++]); 1291 1292 /* Context IRQs */ 1293 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); 1294 for (i = 0; i < smmu->context_interrupt_count; i++) { 1295 hw_irq = IORT_IRQ_MASK(ctx_irq[i]); 1296 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]); 1297 1298 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, 1299 &res[num_res++]); 1300 } 1301 } 1302 1303 static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node) 1304 { 1305 struct acpi_iort_smmu *smmu; 1306 1307 /* Retrieve SMMU specific data */ 1308 smmu = (struct acpi_iort_smmu *)node->node_data; 1309 1310 return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK; 1311 } 1312 1313 struct iort_dev_config { 1314 const char *name; 1315 int (*dev_init)(struct acpi_iort_node *node); 1316 bool (*dev_is_coherent)(struct acpi_iort_node *node); 1317 int (*dev_count_resources)(struct acpi_iort_node *node); 1318 void (*dev_init_resources)(struct resource *res, 1319 struct acpi_iort_node *node); 1320 void (*dev_set_proximity)(struct device *dev, 1321 struct acpi_iort_node *node); 1322 }; 1323 1324 static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = { 1325 .name = "arm-smmu-v3", 1326 .dev_is_coherent = arm_smmu_v3_is_coherent, 1327 .dev_count_resources = arm_smmu_v3_count_resources, 1328 .dev_init_resources = arm_smmu_v3_init_resources, 1329 .dev_set_proximity = arm_smmu_v3_set_proximity, 1330 }; 1331 1332 static const struct iort_dev_config iort_arm_smmu_cfg __initconst = { 1333 .name = "arm-smmu", 1334 .dev_is_coherent = arm_smmu_is_coherent, 1335 .dev_count_resources = arm_smmu_count_resources, 1336 .dev_init_resources = arm_smmu_init_resources 1337 }; 1338 1339 static __init const struct iort_dev_config *iort_get_dev_cfg( 1340 struct acpi_iort_node *node) 1341 { 1342 switch (node->type) { 1343 case ACPI_IORT_NODE_SMMU_V3: 1344 return &iort_arm_smmu_v3_cfg; 1345 case ACPI_IORT_NODE_SMMU: 1346 return &iort_arm_smmu_cfg; 1347 default: 1348 return NULL; 1349 } 1350 } 1351 1352 /** 1353 * iort_add_platform_device() - Allocate a platform device for IORT node 1354 * @node: Pointer to device ACPI IORT node 1355 * 1356 * Returns: 0 on success, <0 failure 1357 */ 1358 static int __init iort_add_platform_device(struct acpi_iort_node *node, 1359 const struct iort_dev_config *ops) 1360 { 1361 struct fwnode_handle *fwnode; 1362 struct platform_device *pdev; 1363 struct resource *r; 1364 enum dev_dma_attr attr; 1365 int ret, count; 1366 1367 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); 1368 if (!pdev) 1369 return -ENOMEM; 1370 1371 if (ops->dev_set_proximity) 1372 ops->dev_set_proximity(&pdev->dev, node); 1373 1374 count = ops->dev_count_resources(node); 1375 1376 r = kcalloc(count, sizeof(*r), GFP_KERNEL); 1377 if (!r) { 1378 ret = -ENOMEM; 1379 goto dev_put; 1380 } 1381 1382 ops->dev_init_resources(r, node); 1383 1384 ret = platform_device_add_resources(pdev, r, count); 1385 /* 1386 * Resources are duplicated in platform_device_add_resources, 1387 * free their allocated memory 1388 */ 1389 kfree(r); 1390 1391 if (ret) 1392 goto dev_put; 1393 1394 /* 1395 * Add a copy of IORT node pointer to platform_data to 1396 * be used to retrieve IORT data information. 1397 */ 1398 ret = platform_device_add_data(pdev, &node, sizeof(node)); 1399 if (ret) 1400 goto dev_put; 1401 1402 /* 1403 * We expect the dma masks to be equivalent for 1404 * all SMMUs set-ups 1405 */ 1406 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 1407 1408 fwnode = iort_get_fwnode(node); 1409 1410 if (!fwnode) { 1411 ret = -ENODEV; 1412 goto dev_put; 1413 } 1414 1415 pdev->dev.fwnode = fwnode; 1416 1417 attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ? 1418 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; 1419 1420 /* Configure DMA for the page table walker */ 1421 acpi_dma_configure(&pdev->dev, attr); 1422 1423 iort_set_device_domain(&pdev->dev, node); 1424 1425 ret = platform_device_add(pdev); 1426 if (ret) 1427 goto dma_deconfigure; 1428 1429 return 0; 1430 1431 dma_deconfigure: 1432 arch_teardown_dma_ops(&pdev->dev); 1433 dev_put: 1434 platform_device_put(pdev); 1435 1436 return ret; 1437 } 1438 1439 #ifdef CONFIG_PCI 1440 static void __init iort_enable_acs(struct acpi_iort_node *iort_node) 1441 { 1442 static bool acs_enabled __initdata; 1443 1444 if (acs_enabled) 1445 return; 1446 1447 if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 1448 struct acpi_iort_node *parent; 1449 struct acpi_iort_id_mapping *map; 1450 int i; 1451 1452 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node, 1453 iort_node->mapping_offset); 1454 1455 for (i = 0; i < iort_node->mapping_count; i++, map++) { 1456 if (!map->output_reference) 1457 continue; 1458 1459 parent = ACPI_ADD_PTR(struct acpi_iort_node, 1460 iort_table, map->output_reference); 1461 /* 1462 * If we detect a RC->SMMU mapping, make sure 1463 * we enable ACS on the system. 1464 */ 1465 if ((parent->type == ACPI_IORT_NODE_SMMU) || 1466 (parent->type == ACPI_IORT_NODE_SMMU_V3)) { 1467 pci_request_acs(); 1468 acs_enabled = true; 1469 return; 1470 } 1471 } 1472 } 1473 } 1474 #else 1475 static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { } 1476 #endif 1477 1478 static void __init iort_init_platform_devices(void) 1479 { 1480 struct acpi_iort_node *iort_node, *iort_end; 1481 struct acpi_table_iort *iort; 1482 struct fwnode_handle *fwnode; 1483 int i, ret; 1484 const struct iort_dev_config *ops; 1485 1486 /* 1487 * iort_table and iort both point to the start of IORT table, but 1488 * have different struct types 1489 */ 1490 iort = (struct acpi_table_iort *)iort_table; 1491 1492 /* Get the first IORT node */ 1493 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1494 iort->node_offset); 1495 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1496 iort_table->length); 1497 1498 for (i = 0; i < iort->node_count; i++) { 1499 if (iort_node >= iort_end) { 1500 pr_err("iort node pointer overflows, bad table\n"); 1501 return; 1502 } 1503 1504 iort_enable_acs(iort_node); 1505 1506 ops = iort_get_dev_cfg(iort_node); 1507 if (ops) { 1508 fwnode = acpi_alloc_fwnode_static(); 1509 if (!fwnode) 1510 return; 1511 1512 iort_set_fwnode(iort_node, fwnode); 1513 1514 ret = iort_add_platform_device(iort_node, ops); 1515 if (ret) { 1516 iort_delete_fwnode(iort_node); 1517 acpi_free_fwnode_static(fwnode); 1518 return; 1519 } 1520 } 1521 1522 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 1523 iort_node->length); 1524 } 1525 } 1526 1527 void __init acpi_iort_init(void) 1528 { 1529 acpi_status status; 1530 1531 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table); 1532 if (ACPI_FAILURE(status)) { 1533 if (status != AE_NOT_FOUND) { 1534 const char *msg = acpi_format_exception(status); 1535 1536 pr_err("Failed to get table, %s\n", msg); 1537 } 1538 1539 return; 1540 } 1541 1542 iort_init_platform_devices(); 1543 } 1544