1 /* 2 * Copyright (C) 2016, Semihalf 3 * Author: Tomasz Nowicki <tn@semihalf.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * This file implements early detection/parsing of I/O mapping 15 * reported to OS through firmware via I/O Remapping Table (IORT) 16 * IORT document number: ARM DEN 0049A 17 */ 18 19 #define pr_fmt(fmt) "ACPI: IORT: " fmt 20 21 #include <linux/acpi_iort.h> 22 #include <linux/iommu.h> 23 #include <linux/kernel.h> 24 #include <linux/list.h> 25 #include <linux/pci.h> 26 #include <linux/platform_device.h> 27 #include <linux/slab.h> 28 29 #define IORT_TYPE_MASK(type) (1 << (type)) 30 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) 31 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ 32 (1 << ACPI_IORT_NODE_SMMU_V3)) 33 34 struct iort_its_msi_chip { 35 struct list_head list; 36 struct fwnode_handle *fw_node; 37 u32 translation_id; 38 }; 39 40 struct iort_fwnode { 41 struct list_head list; 42 struct acpi_iort_node *iort_node; 43 struct fwnode_handle *fwnode; 44 }; 45 static LIST_HEAD(iort_fwnode_list); 46 static DEFINE_SPINLOCK(iort_fwnode_lock); 47 48 /** 49 * iort_set_fwnode() - Create iort_fwnode and use it to register 50 * iommu data in the iort_fwnode_list 51 * 52 * @node: IORT table node associated with the IOMMU 53 * @fwnode: fwnode associated with the IORT node 54 * 55 * Returns: 0 on success 56 * <0 on failure 57 */ 58 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node, 59 struct fwnode_handle *fwnode) 60 { 61 struct iort_fwnode *np; 62 63 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC); 64 65 if (WARN_ON(!np)) 66 return -ENOMEM; 67 68 INIT_LIST_HEAD(&np->list); 69 np->iort_node = iort_node; 70 np->fwnode = fwnode; 71 72 spin_lock(&iort_fwnode_lock); 73 list_add_tail(&np->list, &iort_fwnode_list); 74 spin_unlock(&iort_fwnode_lock); 75 76 return 0; 77 } 78 79 /** 80 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node 81 * 82 * @node: IORT table node to be looked-up 83 * 84 * Returns: fwnode_handle pointer on success, NULL on failure 85 */ 86 static inline 87 struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node) 88 { 89 struct iort_fwnode *curr; 90 struct fwnode_handle *fwnode = NULL; 91 92 spin_lock(&iort_fwnode_lock); 93 list_for_each_entry(curr, &iort_fwnode_list, list) { 94 if (curr->iort_node == node) { 95 fwnode = curr->fwnode; 96 break; 97 } 98 } 99 spin_unlock(&iort_fwnode_lock); 100 101 return fwnode; 102 } 103 104 /** 105 * iort_delete_fwnode() - Delete fwnode associated with an IORT node 106 * 107 * @node: IORT table node associated with fwnode to delete 108 */ 109 static inline void iort_delete_fwnode(struct acpi_iort_node *node) 110 { 111 struct iort_fwnode *curr, *tmp; 112 113 spin_lock(&iort_fwnode_lock); 114 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) { 115 if (curr->iort_node == node) { 116 list_del(&curr->list); 117 kfree(curr); 118 break; 119 } 120 } 121 spin_unlock(&iort_fwnode_lock); 122 } 123 124 typedef acpi_status (*iort_find_node_callback) 125 (struct acpi_iort_node *node, void *context); 126 127 /* Root pointer to the mapped IORT table */ 128 static struct acpi_table_header *iort_table; 129 130 static LIST_HEAD(iort_msi_chip_list); 131 static DEFINE_SPINLOCK(iort_msi_chip_lock); 132 133 /** 134 * iort_register_domain_token() - register domain token and related ITS ID 135 * to the list from where we can get it back later on. 136 * @trans_id: ITS ID. 137 * @fw_node: Domain token. 138 * 139 * Returns: 0 on success, -ENOMEM if no memory when allocating list element 140 */ 141 int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node) 142 { 143 struct iort_its_msi_chip *its_msi_chip; 144 145 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL); 146 if (!its_msi_chip) 147 return -ENOMEM; 148 149 its_msi_chip->fw_node = fw_node; 150 its_msi_chip->translation_id = trans_id; 151 152 spin_lock(&iort_msi_chip_lock); 153 list_add(&its_msi_chip->list, &iort_msi_chip_list); 154 spin_unlock(&iort_msi_chip_lock); 155 156 return 0; 157 } 158 159 /** 160 * iort_deregister_domain_token() - Deregister domain token based on ITS ID 161 * @trans_id: ITS ID. 162 * 163 * Returns: none. 164 */ 165 void iort_deregister_domain_token(int trans_id) 166 { 167 struct iort_its_msi_chip *its_msi_chip, *t; 168 169 spin_lock(&iort_msi_chip_lock); 170 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) { 171 if (its_msi_chip->translation_id == trans_id) { 172 list_del(&its_msi_chip->list); 173 kfree(its_msi_chip); 174 break; 175 } 176 } 177 spin_unlock(&iort_msi_chip_lock); 178 } 179 180 /** 181 * iort_find_domain_token() - Find domain token based on given ITS ID 182 * @trans_id: ITS ID. 183 * 184 * Returns: domain token when find on the list, NULL otherwise 185 */ 186 struct fwnode_handle *iort_find_domain_token(int trans_id) 187 { 188 struct fwnode_handle *fw_node = NULL; 189 struct iort_its_msi_chip *its_msi_chip; 190 191 spin_lock(&iort_msi_chip_lock); 192 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 193 if (its_msi_chip->translation_id == trans_id) { 194 fw_node = its_msi_chip->fw_node; 195 break; 196 } 197 } 198 spin_unlock(&iort_msi_chip_lock); 199 200 return fw_node; 201 } 202 203 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type, 204 iort_find_node_callback callback, 205 void *context) 206 { 207 struct acpi_iort_node *iort_node, *iort_end; 208 struct acpi_table_iort *iort; 209 int i; 210 211 if (!iort_table) 212 return NULL; 213 214 /* Get the first IORT node */ 215 iort = (struct acpi_table_iort *)iort_table; 216 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 217 iort->node_offset); 218 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 219 iort_table->length); 220 221 for (i = 0; i < iort->node_count; i++) { 222 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, 223 "IORT node pointer overflows, bad table!\n")) 224 return NULL; 225 226 if (iort_node->type == type && 227 ACPI_SUCCESS(callback(iort_node, context))) 228 return iort_node; 229 230 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 231 iort_node->length); 232 } 233 234 return NULL; 235 } 236 237 static acpi_status 238 iort_match_type_callback(struct acpi_iort_node *node, void *context) 239 { 240 return AE_OK; 241 } 242 243 bool iort_node_match(u8 type) 244 { 245 struct acpi_iort_node *node; 246 247 node = iort_scan_node(type, iort_match_type_callback, NULL); 248 249 return node != NULL; 250 } 251 252 static acpi_status iort_match_node_callback(struct acpi_iort_node *node, 253 void *context) 254 { 255 struct device *dev = context; 256 acpi_status status = AE_NOT_FOUND; 257 258 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { 259 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 260 struct acpi_device *adev = to_acpi_device_node(dev->fwnode); 261 struct acpi_iort_named_component *ncomp; 262 263 if (!adev) 264 goto out; 265 266 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); 267 if (ACPI_FAILURE(status)) { 268 dev_warn(dev, "Can't get device full path name\n"); 269 goto out; 270 } 271 272 ncomp = (struct acpi_iort_named_component *)node->node_data; 273 status = !strcmp(ncomp->device_name, buf.pointer) ? 274 AE_OK : AE_NOT_FOUND; 275 acpi_os_free(buf.pointer); 276 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 277 struct acpi_iort_root_complex *pci_rc; 278 struct pci_bus *bus; 279 280 bus = to_pci_bus(dev); 281 pci_rc = (struct acpi_iort_root_complex *)node->node_data; 282 283 /* 284 * It is assumed that PCI segment numbers maps one-to-one 285 * with root complexes. Each segment number can represent only 286 * one root complex. 287 */ 288 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ? 289 AE_OK : AE_NOT_FOUND; 290 } 291 out: 292 return status; 293 } 294 295 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, 296 u32 *rid_out) 297 { 298 /* Single mapping does not care for input id */ 299 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 300 if (type == ACPI_IORT_NODE_NAMED_COMPONENT || 301 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 302 *rid_out = map->output_base; 303 return 0; 304 } 305 306 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n", 307 map, type); 308 return -ENXIO; 309 } 310 311 if (rid_in < map->input_base || 312 (rid_in >= map->input_base + map->id_count)) 313 return -ENXIO; 314 315 *rid_out = map->output_base + (rid_in - map->input_base); 316 return 0; 317 } 318 319 static 320 struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, 321 u32 *id_out, int index) 322 { 323 struct acpi_iort_node *parent; 324 struct acpi_iort_id_mapping *map; 325 326 if (!node->mapping_offset || !node->mapping_count || 327 index >= node->mapping_count) 328 return NULL; 329 330 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 331 node->mapping_offset + index * sizeof(*map)); 332 333 /* Firmware bug! */ 334 if (!map->output_reference) { 335 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 336 node, node->type); 337 return NULL; 338 } 339 340 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 341 map->output_reference); 342 343 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 344 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || 345 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 346 *id_out = map->output_base; 347 return parent; 348 } 349 } 350 351 return NULL; 352 } 353 354 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, 355 u32 id_in, u32 *id_out, 356 u8 type_mask) 357 { 358 u32 id = id_in; 359 360 /* Parse the ID mapping tree to find specified node type */ 361 while (node) { 362 struct acpi_iort_id_mapping *map; 363 int i; 364 365 if (IORT_TYPE_MASK(node->type) & type_mask) { 366 if (id_out) 367 *id_out = id; 368 return node; 369 } 370 371 if (!node->mapping_offset || !node->mapping_count) 372 goto fail_map; 373 374 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 375 node->mapping_offset); 376 377 /* Firmware bug! */ 378 if (!map->output_reference) { 379 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 380 node, node->type); 381 goto fail_map; 382 } 383 384 /* Do the ID translation */ 385 for (i = 0; i < node->mapping_count; i++, map++) { 386 if (!iort_id_map(map, node->type, id, &id)) 387 break; 388 } 389 390 if (i == node->mapping_count) 391 goto fail_map; 392 393 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 394 map->output_reference); 395 } 396 397 fail_map: 398 /* Map input ID to output ID unchanged on mapping failure */ 399 if (id_out) 400 *id_out = id_in; 401 402 return NULL; 403 } 404 405 static 406 struct acpi_iort_node *iort_node_map_platform_id(struct acpi_iort_node *node, 407 u32 *id_out, u8 type_mask, 408 int index) 409 { 410 struct acpi_iort_node *parent; 411 u32 id; 412 413 /* step 1: retrieve the initial dev id */ 414 parent = iort_node_get_id(node, &id, index); 415 if (!parent) 416 return NULL; 417 418 /* 419 * optional step 2: map the initial dev id if its parent is not 420 * the target type we want, map it again for the use cases such 421 * as NC (named component) -> SMMU -> ITS. If the type is matched, 422 * return the initial dev id and its parent pointer directly. 423 */ 424 if (!(IORT_TYPE_MASK(parent->type) & type_mask)) 425 parent = iort_node_map_id(parent, id, id_out, type_mask); 426 else 427 if (id_out) 428 *id_out = id; 429 430 return parent; 431 } 432 433 static struct acpi_iort_node *iort_find_dev_node(struct device *dev) 434 { 435 struct pci_bus *pbus; 436 437 if (!dev_is_pci(dev)) 438 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 439 iort_match_node_callback, dev); 440 441 /* Find a PCI root bus */ 442 pbus = to_pci_dev(dev)->bus; 443 while (!pci_is_root_bus(pbus)) 444 pbus = pbus->parent; 445 446 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 447 iort_match_node_callback, &pbus->dev); 448 } 449 450 /** 451 * iort_msi_map_rid() - Map a MSI requester ID for a device 452 * @dev: The device for which the mapping is to be done. 453 * @req_id: The device requester ID. 454 * 455 * Returns: mapped MSI RID on success, input requester ID otherwise 456 */ 457 u32 iort_msi_map_rid(struct device *dev, u32 req_id) 458 { 459 struct acpi_iort_node *node; 460 u32 dev_id; 461 462 node = iort_find_dev_node(dev); 463 if (!node) 464 return req_id; 465 466 iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); 467 return dev_id; 468 } 469 470 /** 471 * iort_pmsi_get_dev_id() - Get the device id for a device 472 * @dev: The device for which the mapping is to be done. 473 * @dev_id: The device ID found. 474 * 475 * Returns: 0 for successful find a dev id, -ENODEV on error 476 */ 477 int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) 478 { 479 int i; 480 struct acpi_iort_node *node; 481 482 node = iort_find_dev_node(dev); 483 if (!node) 484 return -ENODEV; 485 486 for (i = 0; i < node->mapping_count; i++) { 487 if (iort_node_map_platform_id(node, dev_id, IORT_MSI_TYPE, i)) 488 return 0; 489 } 490 491 return -ENODEV; 492 } 493 494 /** 495 * iort_dev_find_its_id() - Find the ITS identifier for a device 496 * @dev: The device. 497 * @req_id: Device's requester ID 498 * @idx: Index of the ITS identifier list. 499 * @its_id: ITS identifier. 500 * 501 * Returns: 0 on success, appropriate error value otherwise 502 */ 503 static int iort_dev_find_its_id(struct device *dev, u32 req_id, 504 unsigned int idx, int *its_id) 505 { 506 struct acpi_iort_its_group *its; 507 struct acpi_iort_node *node; 508 509 node = iort_find_dev_node(dev); 510 if (!node) 511 return -ENXIO; 512 513 node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); 514 if (!node) 515 return -ENXIO; 516 517 /* Move to ITS specific data */ 518 its = (struct acpi_iort_its_group *)node->node_data; 519 if (idx > its->its_count) { 520 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", 521 idx, its->its_count); 522 return -ENXIO; 523 } 524 525 *its_id = its->identifiers[idx]; 526 return 0; 527 } 528 529 /** 530 * iort_get_device_domain() - Find MSI domain related to a device 531 * @dev: The device. 532 * @req_id: Requester ID for the device. 533 * 534 * Returns: the MSI domain for this device, NULL otherwise 535 */ 536 struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) 537 { 538 struct fwnode_handle *handle; 539 int its_id; 540 541 if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) 542 return NULL; 543 544 handle = iort_find_domain_token(its_id); 545 if (!handle) 546 return NULL; 547 548 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); 549 } 550 551 /** 552 * iort_get_platform_device_domain() - Find MSI domain related to a 553 * platform device 554 * @dev: the dev pointer associated with the platform device 555 * 556 * Returns: the MSI domain for this device, NULL otherwise 557 */ 558 static struct irq_domain *iort_get_platform_device_domain(struct device *dev) 559 { 560 struct acpi_iort_node *node, *msi_parent; 561 struct fwnode_handle *iort_fwnode; 562 struct acpi_iort_its_group *its; 563 int i; 564 565 /* find its associated iort node */ 566 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 567 iort_match_node_callback, dev); 568 if (!node) 569 return NULL; 570 571 /* then find its msi parent node */ 572 for (i = 0; i < node->mapping_count; i++) { 573 msi_parent = iort_node_map_platform_id(node, NULL, 574 IORT_MSI_TYPE, i); 575 if (msi_parent) 576 break; 577 } 578 579 if (!msi_parent) 580 return NULL; 581 582 /* Move to ITS specific data */ 583 its = (struct acpi_iort_its_group *)msi_parent->node_data; 584 585 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 586 if (!iort_fwnode) 587 return NULL; 588 589 return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 590 } 591 592 void acpi_configure_pmsi_domain(struct device *dev) 593 { 594 struct irq_domain *msi_domain; 595 596 msi_domain = iort_get_platform_device_domain(dev); 597 if (msi_domain) 598 dev_set_msi_domain(dev, msi_domain); 599 } 600 601 static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) 602 { 603 u32 *rid = data; 604 605 *rid = alias; 606 return 0; 607 } 608 609 static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, 610 struct fwnode_handle *fwnode, 611 const struct iommu_ops *ops) 612 { 613 int ret = iommu_fwspec_init(dev, fwnode, ops); 614 615 if (!ret) 616 ret = iommu_fwspec_add_ids(dev, &streamid, 1); 617 618 return ret; 619 } 620 621 static inline bool iort_iommu_driver_enabled(u8 type) 622 { 623 switch (type) { 624 case ACPI_IORT_NODE_SMMU_V3: 625 return IS_BUILTIN(CONFIG_ARM_SMMU_V3); 626 case ACPI_IORT_NODE_SMMU: 627 return IS_BUILTIN(CONFIG_ARM_SMMU); 628 default: 629 pr_warn("IORT node type %u does not describe an SMMU\n", type); 630 return false; 631 } 632 } 633 634 #ifdef CONFIG_IOMMU_API 635 static inline 636 const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) 637 { 638 return (fwspec && fwspec->ops) ? fwspec->ops : NULL; 639 } 640 641 static inline 642 int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) 643 { 644 int err = 0; 645 646 if (!IS_ERR_OR_NULL(ops) && ops->add_device && dev->bus && 647 !dev->iommu_group) 648 err = ops->add_device(dev); 649 650 return err; 651 } 652 #else 653 static inline 654 const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) 655 { return NULL; } 656 static inline 657 int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) 658 { return 0; } 659 #endif 660 661 static const struct iommu_ops *iort_iommu_xlate(struct device *dev, 662 struct acpi_iort_node *node, 663 u32 streamid) 664 { 665 const struct iommu_ops *ops = NULL; 666 int ret = -ENODEV; 667 struct fwnode_handle *iort_fwnode; 668 669 /* 670 * If we already translated the fwspec there 671 * is nothing left to do, return the iommu_ops. 672 */ 673 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); 674 if (ops) 675 return ops; 676 677 if (node) { 678 iort_fwnode = iort_get_fwnode(node); 679 if (!iort_fwnode) 680 return NULL; 681 682 ops = iommu_ops_from_fwnode(iort_fwnode); 683 /* 684 * If the ops look-up fails, this means that either 685 * the SMMU drivers have not been probed yet or that 686 * the SMMU drivers are not built in the kernel; 687 * Depending on whether the SMMU drivers are built-in 688 * in the kernel or not, defer the IOMMU configuration 689 * or just abort it. 690 */ 691 if (!ops) 692 return iort_iommu_driver_enabled(node->type) ? 693 ERR_PTR(-EPROBE_DEFER) : NULL; 694 695 ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); 696 } 697 698 return ret ? NULL : ops; 699 } 700 701 /** 702 * iort_set_dma_mask - Set-up dma mask for a device. 703 * 704 * @dev: device to configure 705 */ 706 void iort_set_dma_mask(struct device *dev) 707 { 708 /* 709 * Set default coherent_dma_mask to 32 bit. Drivers are expected to 710 * setup the correct supported mask. 711 */ 712 if (!dev->coherent_dma_mask) 713 dev->coherent_dma_mask = DMA_BIT_MASK(32); 714 715 /* 716 * Set it to coherent_dma_mask by default if the architecture 717 * code has not set it. 718 */ 719 if (!dev->dma_mask) 720 dev->dma_mask = &dev->coherent_dma_mask; 721 } 722 723 /** 724 * iort_iommu_configure - Set-up IOMMU configuration for a device. 725 * 726 * @dev: device to configure 727 * 728 * Returns: iommu_ops pointer on configuration success 729 * NULL on configuration failure 730 */ 731 const struct iommu_ops *iort_iommu_configure(struct device *dev) 732 { 733 struct acpi_iort_node *node, *parent; 734 const struct iommu_ops *ops = NULL; 735 u32 streamid = 0; 736 int err; 737 738 if (dev_is_pci(dev)) { 739 struct pci_bus *bus = to_pci_dev(dev)->bus; 740 u32 rid; 741 742 pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid, 743 &rid); 744 745 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 746 iort_match_node_callback, &bus->dev); 747 if (!node) 748 return NULL; 749 750 parent = iort_node_map_id(node, rid, &streamid, 751 IORT_IOMMU_TYPE); 752 753 ops = iort_iommu_xlate(dev, parent, streamid); 754 755 } else { 756 int i = 0; 757 758 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 759 iort_match_node_callback, dev); 760 if (!node) 761 return NULL; 762 763 parent = iort_node_map_platform_id(node, &streamid, 764 IORT_IOMMU_TYPE, i++); 765 766 while (parent) { 767 ops = iort_iommu_xlate(dev, parent, streamid); 768 if (IS_ERR_OR_NULL(ops)) 769 return ops; 770 771 parent = iort_node_map_platform_id(node, &streamid, 772 IORT_IOMMU_TYPE, 773 i++); 774 } 775 } 776 777 /* 778 * If we have reason to believe the IOMMU driver missed the initial 779 * add_device callback for dev, replay it to get things in order. 780 */ 781 err = iort_add_device_replay(ops, dev); 782 if (err) 783 ops = ERR_PTR(err); 784 785 return ops; 786 } 787 788 static void __init acpi_iort_register_irq(int hwirq, const char *name, 789 int trigger, 790 struct resource *res) 791 { 792 int irq = acpi_register_gsi(NULL, hwirq, trigger, 793 ACPI_ACTIVE_HIGH); 794 795 if (irq <= 0) { 796 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq, 797 name); 798 return; 799 } 800 801 res->start = irq; 802 res->end = irq; 803 res->flags = IORESOURCE_IRQ; 804 res->name = name; 805 } 806 807 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) 808 { 809 struct acpi_iort_smmu_v3 *smmu; 810 /* Always present mem resource */ 811 int num_res = 1; 812 813 /* Retrieve SMMUv3 specific data */ 814 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 815 816 if (smmu->event_gsiv) 817 num_res++; 818 819 if (smmu->pri_gsiv) 820 num_res++; 821 822 if (smmu->gerr_gsiv) 823 num_res++; 824 825 if (smmu->sync_gsiv) 826 num_res++; 827 828 return num_res; 829 } 830 831 static void __init arm_smmu_v3_init_resources(struct resource *res, 832 struct acpi_iort_node *node) 833 { 834 struct acpi_iort_smmu_v3 *smmu; 835 int num_res = 0; 836 837 /* Retrieve SMMUv3 specific data */ 838 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 839 840 res[num_res].start = smmu->base_address; 841 res[num_res].end = smmu->base_address + SZ_128K - 1; 842 res[num_res].flags = IORESOURCE_MEM; 843 844 num_res++; 845 846 if (smmu->event_gsiv) 847 acpi_iort_register_irq(smmu->event_gsiv, "eventq", 848 ACPI_EDGE_SENSITIVE, 849 &res[num_res++]); 850 851 if (smmu->pri_gsiv) 852 acpi_iort_register_irq(smmu->pri_gsiv, "priq", 853 ACPI_EDGE_SENSITIVE, 854 &res[num_res++]); 855 856 if (smmu->gerr_gsiv) 857 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", 858 ACPI_EDGE_SENSITIVE, 859 &res[num_res++]); 860 861 if (smmu->sync_gsiv) 862 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", 863 ACPI_EDGE_SENSITIVE, 864 &res[num_res++]); 865 } 866 867 static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) 868 { 869 struct acpi_iort_smmu_v3 *smmu; 870 871 /* Retrieve SMMUv3 specific data */ 872 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 873 874 return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE; 875 } 876 877 static int __init arm_smmu_count_resources(struct acpi_iort_node *node) 878 { 879 struct acpi_iort_smmu *smmu; 880 881 /* Retrieve SMMU specific data */ 882 smmu = (struct acpi_iort_smmu *)node->node_data; 883 884 /* 885 * Only consider the global fault interrupt and ignore the 886 * configuration access interrupt. 887 * 888 * MMIO address and global fault interrupt resources are always 889 * present so add them to the context interrupt count as a static 890 * value. 891 */ 892 return smmu->context_interrupt_count + 2; 893 } 894 895 static void __init arm_smmu_init_resources(struct resource *res, 896 struct acpi_iort_node *node) 897 { 898 struct acpi_iort_smmu *smmu; 899 int i, hw_irq, trigger, num_res = 0; 900 u64 *ctx_irq, *glb_irq; 901 902 /* Retrieve SMMU specific data */ 903 smmu = (struct acpi_iort_smmu *)node->node_data; 904 905 res[num_res].start = smmu->base_address; 906 res[num_res].end = smmu->base_address + smmu->span - 1; 907 res[num_res].flags = IORESOURCE_MEM; 908 num_res++; 909 910 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); 911 /* Global IRQs */ 912 hw_irq = IORT_IRQ_MASK(glb_irq[0]); 913 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]); 914 915 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, 916 &res[num_res++]); 917 918 /* Context IRQs */ 919 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); 920 for (i = 0; i < smmu->context_interrupt_count; i++) { 921 hw_irq = IORT_IRQ_MASK(ctx_irq[i]); 922 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]); 923 924 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, 925 &res[num_res++]); 926 } 927 } 928 929 static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node) 930 { 931 struct acpi_iort_smmu *smmu; 932 933 /* Retrieve SMMU specific data */ 934 smmu = (struct acpi_iort_smmu *)node->node_data; 935 936 return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK; 937 } 938 939 struct iort_iommu_config { 940 const char *name; 941 int (*iommu_init)(struct acpi_iort_node *node); 942 bool (*iommu_is_coherent)(struct acpi_iort_node *node); 943 int (*iommu_count_resources)(struct acpi_iort_node *node); 944 void (*iommu_init_resources)(struct resource *res, 945 struct acpi_iort_node *node); 946 }; 947 948 static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = { 949 .name = "arm-smmu-v3", 950 .iommu_is_coherent = arm_smmu_v3_is_coherent, 951 .iommu_count_resources = arm_smmu_v3_count_resources, 952 .iommu_init_resources = arm_smmu_v3_init_resources 953 }; 954 955 static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = { 956 .name = "arm-smmu", 957 .iommu_is_coherent = arm_smmu_is_coherent, 958 .iommu_count_resources = arm_smmu_count_resources, 959 .iommu_init_resources = arm_smmu_init_resources 960 }; 961 962 static __init 963 const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node) 964 { 965 switch (node->type) { 966 case ACPI_IORT_NODE_SMMU_V3: 967 return &iort_arm_smmu_v3_cfg; 968 case ACPI_IORT_NODE_SMMU: 969 return &iort_arm_smmu_cfg; 970 default: 971 return NULL; 972 } 973 } 974 975 /** 976 * iort_add_smmu_platform_device() - Allocate a platform device for SMMU 977 * @node: Pointer to SMMU ACPI IORT node 978 * 979 * Returns: 0 on success, <0 failure 980 */ 981 static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node) 982 { 983 struct fwnode_handle *fwnode; 984 struct platform_device *pdev; 985 struct resource *r; 986 enum dev_dma_attr attr; 987 int ret, count; 988 const struct iort_iommu_config *ops = iort_get_iommu_cfg(node); 989 990 if (!ops) 991 return -ENODEV; 992 993 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); 994 if (!pdev) 995 return -ENOMEM; 996 997 count = ops->iommu_count_resources(node); 998 999 r = kcalloc(count, sizeof(*r), GFP_KERNEL); 1000 if (!r) { 1001 ret = -ENOMEM; 1002 goto dev_put; 1003 } 1004 1005 ops->iommu_init_resources(r, node); 1006 1007 ret = platform_device_add_resources(pdev, r, count); 1008 /* 1009 * Resources are duplicated in platform_device_add_resources, 1010 * free their allocated memory 1011 */ 1012 kfree(r); 1013 1014 if (ret) 1015 goto dev_put; 1016 1017 /* 1018 * Add a copy of IORT node pointer to platform_data to 1019 * be used to retrieve IORT data information. 1020 */ 1021 ret = platform_device_add_data(pdev, &node, sizeof(node)); 1022 if (ret) 1023 goto dev_put; 1024 1025 /* 1026 * We expect the dma masks to be equivalent for 1027 * all SMMUs set-ups 1028 */ 1029 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 1030 1031 fwnode = iort_get_fwnode(node); 1032 1033 if (!fwnode) { 1034 ret = -ENODEV; 1035 goto dev_put; 1036 } 1037 1038 pdev->dev.fwnode = fwnode; 1039 1040 attr = ops->iommu_is_coherent(node) ? 1041 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; 1042 1043 /* Configure DMA for the page table walker */ 1044 acpi_dma_configure(&pdev->dev, attr); 1045 1046 ret = platform_device_add(pdev); 1047 if (ret) 1048 goto dma_deconfigure; 1049 1050 return 0; 1051 1052 dma_deconfigure: 1053 acpi_dma_deconfigure(&pdev->dev); 1054 dev_put: 1055 platform_device_put(pdev); 1056 1057 return ret; 1058 } 1059 1060 static void __init iort_init_platform_devices(void) 1061 { 1062 struct acpi_iort_node *iort_node, *iort_end; 1063 struct acpi_table_iort *iort; 1064 struct fwnode_handle *fwnode; 1065 int i, ret; 1066 1067 /* 1068 * iort_table and iort both point to the start of IORT table, but 1069 * have different struct types 1070 */ 1071 iort = (struct acpi_table_iort *)iort_table; 1072 1073 /* Get the first IORT node */ 1074 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1075 iort->node_offset); 1076 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1077 iort_table->length); 1078 1079 for (i = 0; i < iort->node_count; i++) { 1080 if (iort_node >= iort_end) { 1081 pr_err("iort node pointer overflows, bad table\n"); 1082 return; 1083 } 1084 1085 if ((iort_node->type == ACPI_IORT_NODE_SMMU) || 1086 (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) { 1087 1088 fwnode = acpi_alloc_fwnode_static(); 1089 if (!fwnode) 1090 return; 1091 1092 iort_set_fwnode(iort_node, fwnode); 1093 1094 ret = iort_add_smmu_platform_device(iort_node); 1095 if (ret) { 1096 iort_delete_fwnode(iort_node); 1097 acpi_free_fwnode_static(fwnode); 1098 return; 1099 } 1100 } 1101 1102 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 1103 iort_node->length); 1104 } 1105 } 1106 1107 void __init acpi_iort_init(void) 1108 { 1109 acpi_status status; 1110 1111 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table); 1112 if (ACPI_FAILURE(status)) { 1113 if (status != AE_NOT_FOUND) { 1114 const char *msg = acpi_format_exception(status); 1115 1116 pr_err("Failed to get table, %s\n", msg); 1117 } 1118 1119 return; 1120 } 1121 1122 iort_init_platform_devices(); 1123 } 1124