1 /* 2 * Copyright (C) 2016, Semihalf 3 * Author: Tomasz Nowicki <tn@semihalf.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * This file implements early detection/parsing of I/O mapping 15 * reported to OS through firmware via I/O Remapping Table (IORT) 16 * IORT document number: ARM DEN 0049A 17 */ 18 19 #define pr_fmt(fmt) "ACPI: IORT: " fmt 20 21 #include <linux/acpi_iort.h> 22 #include <linux/iommu.h> 23 #include <linux/kernel.h> 24 #include <linux/list.h> 25 #include <linux/pci.h> 26 #include <linux/platform_device.h> 27 #include <linux/slab.h> 28 29 #define IORT_TYPE_MASK(type) (1 << (type)) 30 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) 31 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ 32 (1 << ACPI_IORT_NODE_SMMU_V3)) 33 34 struct iort_its_msi_chip { 35 struct list_head list; 36 struct fwnode_handle *fw_node; 37 u32 translation_id; 38 }; 39 40 struct iort_fwnode { 41 struct list_head list; 42 struct acpi_iort_node *iort_node; 43 struct fwnode_handle *fwnode; 44 }; 45 static LIST_HEAD(iort_fwnode_list); 46 static DEFINE_SPINLOCK(iort_fwnode_lock); 47 48 /** 49 * iort_set_fwnode() - Create iort_fwnode and use it to register 50 * iommu data in the iort_fwnode_list 51 * 52 * @node: IORT table node associated with the IOMMU 53 * @fwnode: fwnode associated with the IORT node 54 * 55 * Returns: 0 on success 56 * <0 on failure 57 */ 58 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node, 59 struct fwnode_handle *fwnode) 60 { 61 struct iort_fwnode *np; 62 63 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC); 64 65 if (WARN_ON(!np)) 66 return -ENOMEM; 67 68 INIT_LIST_HEAD(&np->list); 69 np->iort_node = iort_node; 70 np->fwnode = fwnode; 71 72 spin_lock(&iort_fwnode_lock); 73 list_add_tail(&np->list, &iort_fwnode_list); 74 spin_unlock(&iort_fwnode_lock); 75 76 return 0; 77 } 78 79 /** 80 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node 81 * 82 * @node: IORT table node to be looked-up 83 * 84 * Returns: fwnode_handle pointer on success, NULL on failure 85 */ 86 static inline 87 struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node) 88 { 89 struct iort_fwnode *curr; 90 struct fwnode_handle *fwnode = NULL; 91 92 spin_lock(&iort_fwnode_lock); 93 list_for_each_entry(curr, &iort_fwnode_list, list) { 94 if (curr->iort_node == node) { 95 fwnode = curr->fwnode; 96 break; 97 } 98 } 99 spin_unlock(&iort_fwnode_lock); 100 101 return fwnode; 102 } 103 104 /** 105 * iort_delete_fwnode() - Delete fwnode associated with an IORT node 106 * 107 * @node: IORT table node associated with fwnode to delete 108 */ 109 static inline void iort_delete_fwnode(struct acpi_iort_node *node) 110 { 111 struct iort_fwnode *curr, *tmp; 112 113 spin_lock(&iort_fwnode_lock); 114 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) { 115 if (curr->iort_node == node) { 116 list_del(&curr->list); 117 kfree(curr); 118 break; 119 } 120 } 121 spin_unlock(&iort_fwnode_lock); 122 } 123 124 typedef acpi_status (*iort_find_node_callback) 125 (struct acpi_iort_node *node, void *context); 126 127 /* Root pointer to the mapped IORT table */ 128 static struct acpi_table_header *iort_table; 129 130 static LIST_HEAD(iort_msi_chip_list); 131 static DEFINE_SPINLOCK(iort_msi_chip_lock); 132 133 /** 134 * iort_register_domain_token() - register domain token and related ITS ID 135 * to the list from where we can get it back later on. 136 * @trans_id: ITS ID. 137 * @fw_node: Domain token. 138 * 139 * Returns: 0 on success, -ENOMEM if no memory when allocating list element 140 */ 141 int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node) 142 { 143 struct iort_its_msi_chip *its_msi_chip; 144 145 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL); 146 if (!its_msi_chip) 147 return -ENOMEM; 148 149 its_msi_chip->fw_node = fw_node; 150 its_msi_chip->translation_id = trans_id; 151 152 spin_lock(&iort_msi_chip_lock); 153 list_add(&its_msi_chip->list, &iort_msi_chip_list); 154 spin_unlock(&iort_msi_chip_lock); 155 156 return 0; 157 } 158 159 /** 160 * iort_deregister_domain_token() - Deregister domain token based on ITS ID 161 * @trans_id: ITS ID. 162 * 163 * Returns: none. 164 */ 165 void iort_deregister_domain_token(int trans_id) 166 { 167 struct iort_its_msi_chip *its_msi_chip, *t; 168 169 spin_lock(&iort_msi_chip_lock); 170 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) { 171 if (its_msi_chip->translation_id == trans_id) { 172 list_del(&its_msi_chip->list); 173 kfree(its_msi_chip); 174 break; 175 } 176 } 177 spin_unlock(&iort_msi_chip_lock); 178 } 179 180 /** 181 * iort_find_domain_token() - Find domain token based on given ITS ID 182 * @trans_id: ITS ID. 183 * 184 * Returns: domain token when find on the list, NULL otherwise 185 */ 186 struct fwnode_handle *iort_find_domain_token(int trans_id) 187 { 188 struct fwnode_handle *fw_node = NULL; 189 struct iort_its_msi_chip *its_msi_chip; 190 191 spin_lock(&iort_msi_chip_lock); 192 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 193 if (its_msi_chip->translation_id == trans_id) { 194 fw_node = its_msi_chip->fw_node; 195 break; 196 } 197 } 198 spin_unlock(&iort_msi_chip_lock); 199 200 return fw_node; 201 } 202 203 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type, 204 iort_find_node_callback callback, 205 void *context) 206 { 207 struct acpi_iort_node *iort_node, *iort_end; 208 struct acpi_table_iort *iort; 209 int i; 210 211 if (!iort_table) 212 return NULL; 213 214 /* Get the first IORT node */ 215 iort = (struct acpi_table_iort *)iort_table; 216 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 217 iort->node_offset); 218 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 219 iort_table->length); 220 221 for (i = 0; i < iort->node_count; i++) { 222 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, 223 "IORT node pointer overflows, bad table!\n")) 224 return NULL; 225 226 if (iort_node->type == type && 227 ACPI_SUCCESS(callback(iort_node, context))) 228 return iort_node; 229 230 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 231 iort_node->length); 232 } 233 234 return NULL; 235 } 236 237 static acpi_status 238 iort_match_type_callback(struct acpi_iort_node *node, void *context) 239 { 240 return AE_OK; 241 } 242 243 bool iort_node_match(u8 type) 244 { 245 struct acpi_iort_node *node; 246 247 node = iort_scan_node(type, iort_match_type_callback, NULL); 248 249 return node != NULL; 250 } 251 252 static acpi_status iort_match_node_callback(struct acpi_iort_node *node, 253 void *context) 254 { 255 struct device *dev = context; 256 acpi_status status; 257 258 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { 259 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 260 struct acpi_device *adev = to_acpi_device_node(dev->fwnode); 261 struct acpi_iort_named_component *ncomp; 262 263 if (!adev) { 264 status = AE_NOT_FOUND; 265 goto out; 266 } 267 268 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); 269 if (ACPI_FAILURE(status)) { 270 dev_warn(dev, "Can't get device full path name\n"); 271 goto out; 272 } 273 274 ncomp = (struct acpi_iort_named_component *)node->node_data; 275 status = !strcmp(ncomp->device_name, buf.pointer) ? 276 AE_OK : AE_NOT_FOUND; 277 acpi_os_free(buf.pointer); 278 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 279 struct acpi_iort_root_complex *pci_rc; 280 struct pci_bus *bus; 281 282 bus = to_pci_bus(dev); 283 pci_rc = (struct acpi_iort_root_complex *)node->node_data; 284 285 /* 286 * It is assumed that PCI segment numbers maps one-to-one 287 * with root complexes. Each segment number can represent only 288 * one root complex. 289 */ 290 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ? 291 AE_OK : AE_NOT_FOUND; 292 } else { 293 status = AE_NOT_FOUND; 294 } 295 out: 296 return status; 297 } 298 299 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, 300 u32 *rid_out) 301 { 302 /* Single mapping does not care for input id */ 303 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 304 if (type == ACPI_IORT_NODE_NAMED_COMPONENT || 305 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 306 *rid_out = map->output_base; 307 return 0; 308 } 309 310 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n", 311 map, type); 312 return -ENXIO; 313 } 314 315 if (rid_in < map->input_base || 316 (rid_in >= map->input_base + map->id_count)) 317 return -ENXIO; 318 319 *rid_out = map->output_base + (rid_in - map->input_base); 320 return 0; 321 } 322 323 static 324 struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, 325 u32 *id_out, u8 type_mask, 326 int index) 327 { 328 struct acpi_iort_node *parent; 329 struct acpi_iort_id_mapping *map; 330 331 if (!node->mapping_offset || !node->mapping_count || 332 index >= node->mapping_count) 333 return NULL; 334 335 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 336 node->mapping_offset + index * sizeof(*map)); 337 338 /* Firmware bug! */ 339 if (!map->output_reference) { 340 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 341 node, node->type); 342 return NULL; 343 } 344 345 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 346 map->output_reference); 347 348 if (!(IORT_TYPE_MASK(parent->type) & type_mask)) 349 return NULL; 350 351 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 352 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || 353 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 354 *id_out = map->output_base; 355 return parent; 356 } 357 } 358 359 return NULL; 360 } 361 362 static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node, 363 u32 rid_in, u32 *rid_out, 364 u8 type_mask) 365 { 366 u32 rid = rid_in; 367 368 /* Parse the ID mapping tree to find specified node type */ 369 while (node) { 370 struct acpi_iort_id_mapping *map; 371 int i; 372 373 if (IORT_TYPE_MASK(node->type) & type_mask) { 374 if (rid_out) 375 *rid_out = rid; 376 return node; 377 } 378 379 if (!node->mapping_offset || !node->mapping_count) 380 goto fail_map; 381 382 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 383 node->mapping_offset); 384 385 /* Firmware bug! */ 386 if (!map->output_reference) { 387 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 388 node, node->type); 389 goto fail_map; 390 } 391 392 /* Do the RID translation */ 393 for (i = 0; i < node->mapping_count; i++, map++) { 394 if (!iort_id_map(map, node->type, rid, &rid)) 395 break; 396 } 397 398 if (i == node->mapping_count) 399 goto fail_map; 400 401 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 402 map->output_reference); 403 } 404 405 fail_map: 406 /* Map input RID to output RID unchanged on mapping failure*/ 407 if (rid_out) 408 *rid_out = rid_in; 409 410 return NULL; 411 } 412 413 static struct acpi_iort_node *iort_find_dev_node(struct device *dev) 414 { 415 struct pci_bus *pbus; 416 417 if (!dev_is_pci(dev)) 418 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 419 iort_match_node_callback, dev); 420 421 /* Find a PCI root bus */ 422 pbus = to_pci_dev(dev)->bus; 423 while (!pci_is_root_bus(pbus)) 424 pbus = pbus->parent; 425 426 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 427 iort_match_node_callback, &pbus->dev); 428 } 429 430 /** 431 * iort_msi_map_rid() - Map a MSI requester ID for a device 432 * @dev: The device for which the mapping is to be done. 433 * @req_id: The device requester ID. 434 * 435 * Returns: mapped MSI RID on success, input requester ID otherwise 436 */ 437 u32 iort_msi_map_rid(struct device *dev, u32 req_id) 438 { 439 struct acpi_iort_node *node; 440 u32 dev_id; 441 442 node = iort_find_dev_node(dev); 443 if (!node) 444 return req_id; 445 446 iort_node_map_rid(node, req_id, &dev_id, IORT_MSI_TYPE); 447 return dev_id; 448 } 449 450 /** 451 * iort_dev_find_its_id() - Find the ITS identifier for a device 452 * @dev: The device. 453 * @idx: Index of the ITS identifier list. 454 * @its_id: ITS identifier. 455 * 456 * Returns: 0 on success, appropriate error value otherwise 457 */ 458 static int iort_dev_find_its_id(struct device *dev, u32 req_id, 459 unsigned int idx, int *its_id) 460 { 461 struct acpi_iort_its_group *its; 462 struct acpi_iort_node *node; 463 464 node = iort_find_dev_node(dev); 465 if (!node) 466 return -ENXIO; 467 468 node = iort_node_map_rid(node, req_id, NULL, IORT_MSI_TYPE); 469 if (!node) 470 return -ENXIO; 471 472 /* Move to ITS specific data */ 473 its = (struct acpi_iort_its_group *)node->node_data; 474 if (idx > its->its_count) { 475 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", 476 idx, its->its_count); 477 return -ENXIO; 478 } 479 480 *its_id = its->identifiers[idx]; 481 return 0; 482 } 483 484 /** 485 * iort_get_device_domain() - Find MSI domain related to a device 486 * @dev: The device. 487 * @req_id: Requester ID for the device. 488 * 489 * Returns: the MSI domain for this device, NULL otherwise 490 */ 491 struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) 492 { 493 struct fwnode_handle *handle; 494 int its_id; 495 496 if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) 497 return NULL; 498 499 handle = iort_find_domain_token(its_id); 500 if (!handle) 501 return NULL; 502 503 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); 504 } 505 506 static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) 507 { 508 u32 *rid = data; 509 510 *rid = alias; 511 return 0; 512 } 513 514 static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, 515 struct fwnode_handle *fwnode, 516 const struct iommu_ops *ops) 517 { 518 int ret = iommu_fwspec_init(dev, fwnode, ops); 519 520 if (!ret) 521 ret = iommu_fwspec_add_ids(dev, &streamid, 1); 522 523 return ret; 524 } 525 526 static const struct iommu_ops *iort_iommu_xlate(struct device *dev, 527 struct acpi_iort_node *node, 528 u32 streamid) 529 { 530 const struct iommu_ops *ops = NULL; 531 int ret = -ENODEV; 532 struct fwnode_handle *iort_fwnode; 533 534 if (node) { 535 iort_fwnode = iort_get_fwnode(node); 536 if (!iort_fwnode) 537 return NULL; 538 539 ops = iommu_ops_from_fwnode(iort_fwnode); 540 if (!ops) 541 return NULL; 542 543 ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); 544 } 545 546 return ret ? NULL : ops; 547 } 548 549 /** 550 * iort_set_dma_mask - Set-up dma mask for a device. 551 * 552 * @dev: device to configure 553 */ 554 void iort_set_dma_mask(struct device *dev) 555 { 556 /* 557 * Set default coherent_dma_mask to 32 bit. Drivers are expected to 558 * setup the correct supported mask. 559 */ 560 if (!dev->coherent_dma_mask) 561 dev->coherent_dma_mask = DMA_BIT_MASK(32); 562 563 /* 564 * Set it to coherent_dma_mask by default if the architecture 565 * code has not set it. 566 */ 567 if (!dev->dma_mask) 568 dev->dma_mask = &dev->coherent_dma_mask; 569 } 570 571 /** 572 * iort_iommu_configure - Set-up IOMMU configuration for a device. 573 * 574 * @dev: device to configure 575 * 576 * Returns: iommu_ops pointer on configuration success 577 * NULL on configuration failure 578 */ 579 const struct iommu_ops *iort_iommu_configure(struct device *dev) 580 { 581 struct acpi_iort_node *node, *parent; 582 const struct iommu_ops *ops = NULL; 583 u32 streamid = 0; 584 585 if (dev_is_pci(dev)) { 586 struct pci_bus *bus = to_pci_dev(dev)->bus; 587 u32 rid; 588 589 pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid, 590 &rid); 591 592 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 593 iort_match_node_callback, &bus->dev); 594 if (!node) 595 return NULL; 596 597 parent = iort_node_map_rid(node, rid, &streamid, 598 IORT_IOMMU_TYPE); 599 600 ops = iort_iommu_xlate(dev, parent, streamid); 601 602 } else { 603 int i = 0; 604 605 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 606 iort_match_node_callback, dev); 607 if (!node) 608 return NULL; 609 610 parent = iort_node_get_id(node, &streamid, 611 IORT_IOMMU_TYPE, i++); 612 613 while (parent) { 614 ops = iort_iommu_xlate(dev, parent, streamid); 615 616 parent = iort_node_get_id(node, &streamid, 617 IORT_IOMMU_TYPE, i++); 618 } 619 } 620 621 return ops; 622 } 623 624 static void __init acpi_iort_register_irq(int hwirq, const char *name, 625 int trigger, 626 struct resource *res) 627 { 628 int irq = acpi_register_gsi(NULL, hwirq, trigger, 629 ACPI_ACTIVE_HIGH); 630 631 if (irq <= 0) { 632 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq, 633 name); 634 return; 635 } 636 637 res->start = irq; 638 res->end = irq; 639 res->flags = IORESOURCE_IRQ; 640 res->name = name; 641 } 642 643 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) 644 { 645 struct acpi_iort_smmu_v3 *smmu; 646 /* Always present mem resource */ 647 int num_res = 1; 648 649 /* Retrieve SMMUv3 specific data */ 650 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 651 652 if (smmu->event_gsiv) 653 num_res++; 654 655 if (smmu->pri_gsiv) 656 num_res++; 657 658 if (smmu->gerr_gsiv) 659 num_res++; 660 661 if (smmu->sync_gsiv) 662 num_res++; 663 664 return num_res; 665 } 666 667 static void __init arm_smmu_v3_init_resources(struct resource *res, 668 struct acpi_iort_node *node) 669 { 670 struct acpi_iort_smmu_v3 *smmu; 671 int num_res = 0; 672 673 /* Retrieve SMMUv3 specific data */ 674 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 675 676 res[num_res].start = smmu->base_address; 677 res[num_res].end = smmu->base_address + SZ_128K - 1; 678 res[num_res].flags = IORESOURCE_MEM; 679 680 num_res++; 681 682 if (smmu->event_gsiv) 683 acpi_iort_register_irq(smmu->event_gsiv, "eventq", 684 ACPI_EDGE_SENSITIVE, 685 &res[num_res++]); 686 687 if (smmu->pri_gsiv) 688 acpi_iort_register_irq(smmu->pri_gsiv, "priq", 689 ACPI_EDGE_SENSITIVE, 690 &res[num_res++]); 691 692 if (smmu->gerr_gsiv) 693 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", 694 ACPI_EDGE_SENSITIVE, 695 &res[num_res++]); 696 697 if (smmu->sync_gsiv) 698 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", 699 ACPI_EDGE_SENSITIVE, 700 &res[num_res++]); 701 } 702 703 static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) 704 { 705 struct acpi_iort_smmu_v3 *smmu; 706 707 /* Retrieve SMMUv3 specific data */ 708 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 709 710 return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE; 711 } 712 713 static int __init arm_smmu_count_resources(struct acpi_iort_node *node) 714 { 715 struct acpi_iort_smmu *smmu; 716 717 /* Retrieve SMMU specific data */ 718 smmu = (struct acpi_iort_smmu *)node->node_data; 719 720 /* 721 * Only consider the global fault interrupt and ignore the 722 * configuration access interrupt. 723 * 724 * MMIO address and global fault interrupt resources are always 725 * present so add them to the context interrupt count as a static 726 * value. 727 */ 728 return smmu->context_interrupt_count + 2; 729 } 730 731 static void __init arm_smmu_init_resources(struct resource *res, 732 struct acpi_iort_node *node) 733 { 734 struct acpi_iort_smmu *smmu; 735 int i, hw_irq, trigger, num_res = 0; 736 u64 *ctx_irq, *glb_irq; 737 738 /* Retrieve SMMU specific data */ 739 smmu = (struct acpi_iort_smmu *)node->node_data; 740 741 res[num_res].start = smmu->base_address; 742 res[num_res].end = smmu->base_address + smmu->span - 1; 743 res[num_res].flags = IORESOURCE_MEM; 744 num_res++; 745 746 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); 747 /* Global IRQs */ 748 hw_irq = IORT_IRQ_MASK(glb_irq[0]); 749 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]); 750 751 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, 752 &res[num_res++]); 753 754 /* Context IRQs */ 755 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); 756 for (i = 0; i < smmu->context_interrupt_count; i++) { 757 hw_irq = IORT_IRQ_MASK(ctx_irq[i]); 758 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]); 759 760 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, 761 &res[num_res++]); 762 } 763 } 764 765 static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node) 766 { 767 struct acpi_iort_smmu *smmu; 768 769 /* Retrieve SMMU specific data */ 770 smmu = (struct acpi_iort_smmu *)node->node_data; 771 772 return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK; 773 } 774 775 struct iort_iommu_config { 776 const char *name; 777 int (*iommu_init)(struct acpi_iort_node *node); 778 bool (*iommu_is_coherent)(struct acpi_iort_node *node); 779 int (*iommu_count_resources)(struct acpi_iort_node *node); 780 void (*iommu_init_resources)(struct resource *res, 781 struct acpi_iort_node *node); 782 }; 783 784 static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = { 785 .name = "arm-smmu-v3", 786 .iommu_is_coherent = arm_smmu_v3_is_coherent, 787 .iommu_count_resources = arm_smmu_v3_count_resources, 788 .iommu_init_resources = arm_smmu_v3_init_resources 789 }; 790 791 static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = { 792 .name = "arm-smmu", 793 .iommu_is_coherent = arm_smmu_is_coherent, 794 .iommu_count_resources = arm_smmu_count_resources, 795 .iommu_init_resources = arm_smmu_init_resources 796 }; 797 798 static __init 799 const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node) 800 { 801 switch (node->type) { 802 case ACPI_IORT_NODE_SMMU_V3: 803 return &iort_arm_smmu_v3_cfg; 804 case ACPI_IORT_NODE_SMMU: 805 return &iort_arm_smmu_cfg; 806 default: 807 return NULL; 808 } 809 } 810 811 /** 812 * iort_add_smmu_platform_device() - Allocate a platform device for SMMU 813 * @node: Pointer to SMMU ACPI IORT node 814 * 815 * Returns: 0 on success, <0 failure 816 */ 817 static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node) 818 { 819 struct fwnode_handle *fwnode; 820 struct platform_device *pdev; 821 struct resource *r; 822 enum dev_dma_attr attr; 823 int ret, count; 824 const struct iort_iommu_config *ops = iort_get_iommu_cfg(node); 825 826 if (!ops) 827 return -ENODEV; 828 829 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); 830 if (!pdev) 831 return -ENOMEM; 832 833 count = ops->iommu_count_resources(node); 834 835 r = kcalloc(count, sizeof(*r), GFP_KERNEL); 836 if (!r) { 837 ret = -ENOMEM; 838 goto dev_put; 839 } 840 841 ops->iommu_init_resources(r, node); 842 843 ret = platform_device_add_resources(pdev, r, count); 844 /* 845 * Resources are duplicated in platform_device_add_resources, 846 * free their allocated memory 847 */ 848 kfree(r); 849 850 if (ret) 851 goto dev_put; 852 853 /* 854 * Add a copy of IORT node pointer to platform_data to 855 * be used to retrieve IORT data information. 856 */ 857 ret = platform_device_add_data(pdev, &node, sizeof(node)); 858 if (ret) 859 goto dev_put; 860 861 /* 862 * We expect the dma masks to be equivalent for 863 * all SMMUs set-ups 864 */ 865 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 866 867 fwnode = iort_get_fwnode(node); 868 869 if (!fwnode) { 870 ret = -ENODEV; 871 goto dev_put; 872 } 873 874 pdev->dev.fwnode = fwnode; 875 876 attr = ops->iommu_is_coherent(node) ? 877 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; 878 879 /* Configure DMA for the page table walker */ 880 acpi_dma_configure(&pdev->dev, attr); 881 882 ret = platform_device_add(pdev); 883 if (ret) 884 goto dma_deconfigure; 885 886 return 0; 887 888 dma_deconfigure: 889 acpi_dma_deconfigure(&pdev->dev); 890 dev_put: 891 platform_device_put(pdev); 892 893 return ret; 894 } 895 896 static void __init iort_init_platform_devices(void) 897 { 898 struct acpi_iort_node *iort_node, *iort_end; 899 struct acpi_table_iort *iort; 900 struct fwnode_handle *fwnode; 901 int i, ret; 902 903 /* 904 * iort_table and iort both point to the start of IORT table, but 905 * have different struct types 906 */ 907 iort = (struct acpi_table_iort *)iort_table; 908 909 /* Get the first IORT node */ 910 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 911 iort->node_offset); 912 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, 913 iort_table->length); 914 915 for (i = 0; i < iort->node_count; i++) { 916 if (iort_node >= iort_end) { 917 pr_err("iort node pointer overflows, bad table\n"); 918 return; 919 } 920 921 if ((iort_node->type == ACPI_IORT_NODE_SMMU) || 922 (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) { 923 924 fwnode = acpi_alloc_fwnode_static(); 925 if (!fwnode) 926 return; 927 928 iort_set_fwnode(iort_node, fwnode); 929 930 ret = iort_add_smmu_platform_device(iort_node); 931 if (ret) { 932 iort_delete_fwnode(iort_node); 933 acpi_free_fwnode_static(fwnode); 934 return; 935 } 936 } 937 938 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 939 iort_node->length); 940 } 941 } 942 943 void __init acpi_iort_init(void) 944 { 945 acpi_status status; 946 947 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table); 948 if (ACPI_FAILURE(status)) { 949 if (status != AE_NOT_FOUND) { 950 const char *msg = acpi_format_exception(status); 951 952 pr_err("Failed to get table, %s\n", msg); 953 } 954 955 return; 956 } 957 958 iort_init_platform_devices(); 959 960 acpi_probe_device_table(iort); 961 } 962