1 /* 2 * Copyright (C) 2016, Semihalf 3 * Author: Tomasz Nowicki <tn@semihalf.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * This file implements early detection/parsing of I/O mapping 15 * reported to OS through firmware via I/O Remapping Table (IORT) 16 * IORT document number: ARM DEN 0049A 17 */ 18 19 #define pr_fmt(fmt) "ACPI: IORT: " fmt 20 21 #include <linux/acpi_iort.h> 22 #include <linux/iommu.h> 23 #include <linux/kernel.h> 24 #include <linux/list.h> 25 #include <linux/pci.h> 26 #include <linux/platform_device.h> 27 #include <linux/slab.h> 28 29 #define IORT_TYPE_MASK(type) (1 << (type)) 30 #define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP) 31 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ 32 (1 << ACPI_IORT_NODE_SMMU_V3)) 33 34 /* Until ACPICA headers cover IORT rev. C */ 35 #ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 36 #define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2 37 #endif 38 39 struct iort_its_msi_chip { 40 struct list_head list; 41 struct fwnode_handle *fw_node; 42 u32 translation_id; 43 }; 44 45 struct iort_fwnode { 46 struct list_head list; 47 struct acpi_iort_node *iort_node; 48 struct fwnode_handle *fwnode; 49 }; 50 static LIST_HEAD(iort_fwnode_list); 51 static DEFINE_SPINLOCK(iort_fwnode_lock); 52 53 /** 54 * iort_set_fwnode() - Create iort_fwnode and use it to register 55 * iommu data in the iort_fwnode_list 56 * 57 * @node: IORT table node associated with the IOMMU 58 * @fwnode: fwnode associated with the IORT node 59 * 60 * Returns: 0 on success 61 * <0 on failure 62 */ 63 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node, 64 struct fwnode_handle *fwnode) 65 { 66 struct iort_fwnode *np; 67 68 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC); 69 70 if (WARN_ON(!np)) 71 return -ENOMEM; 72 73 INIT_LIST_HEAD(&np->list); 74 np->iort_node = iort_node; 75 np->fwnode = fwnode; 76 77 spin_lock(&iort_fwnode_lock); 78 list_add_tail(&np->list, &iort_fwnode_list); 79 spin_unlock(&iort_fwnode_lock); 80 81 return 0; 82 } 83 84 /** 85 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node 86 * 87 * @node: IORT table node to be looked-up 88 * 89 * Returns: fwnode_handle pointer on success, NULL on failure 90 */ 91 static inline 92 struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node) 93 { 94 struct iort_fwnode *curr; 95 struct fwnode_handle *fwnode = NULL; 96 97 spin_lock(&iort_fwnode_lock); 98 list_for_each_entry(curr, &iort_fwnode_list, list) { 99 if (curr->iort_node == node) { 100 fwnode = curr->fwnode; 101 break; 102 } 103 } 104 spin_unlock(&iort_fwnode_lock); 105 106 return fwnode; 107 } 108 109 /** 110 * iort_delete_fwnode() - Delete fwnode associated with an IORT node 111 * 112 * @node: IORT table node associated with fwnode to delete 113 */ 114 static inline void iort_delete_fwnode(struct acpi_iort_node *node) 115 { 116 struct iort_fwnode *curr, *tmp; 117 118 spin_lock(&iort_fwnode_lock); 119 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) { 120 if (curr->iort_node == node) { 121 list_del(&curr->list); 122 kfree(curr); 123 break; 124 } 125 } 126 spin_unlock(&iort_fwnode_lock); 127 } 128 129 typedef acpi_status (*iort_find_node_callback) 130 (struct acpi_iort_node *node, void *context); 131 132 /* Root pointer to the mapped IORT table */ 133 static struct acpi_table_header *iort_table; 134 135 static LIST_HEAD(iort_msi_chip_list); 136 static DEFINE_SPINLOCK(iort_msi_chip_lock); 137 138 /** 139 * iort_register_domain_token() - register domain token and related ITS ID 140 * to the list from where we can get it back later on. 141 * @trans_id: ITS ID. 142 * @fw_node: Domain token. 143 * 144 * Returns: 0 on success, -ENOMEM if no memory when allocating list element 145 */ 146 int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node) 147 { 148 struct iort_its_msi_chip *its_msi_chip; 149 150 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL); 151 if (!its_msi_chip) 152 return -ENOMEM; 153 154 its_msi_chip->fw_node = fw_node; 155 its_msi_chip->translation_id = trans_id; 156 157 spin_lock(&iort_msi_chip_lock); 158 list_add(&its_msi_chip->list, &iort_msi_chip_list); 159 spin_unlock(&iort_msi_chip_lock); 160 161 return 0; 162 } 163 164 /** 165 * iort_deregister_domain_token() - Deregister domain token based on ITS ID 166 * @trans_id: ITS ID. 167 * 168 * Returns: none. 169 */ 170 void iort_deregister_domain_token(int trans_id) 171 { 172 struct iort_its_msi_chip *its_msi_chip, *t; 173 174 spin_lock(&iort_msi_chip_lock); 175 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) { 176 if (its_msi_chip->translation_id == trans_id) { 177 list_del(&its_msi_chip->list); 178 kfree(its_msi_chip); 179 break; 180 } 181 } 182 spin_unlock(&iort_msi_chip_lock); 183 } 184 185 /** 186 * iort_find_domain_token() - Find domain token based on given ITS ID 187 * @trans_id: ITS ID. 188 * 189 * Returns: domain token when find on the list, NULL otherwise 190 */ 191 struct fwnode_handle *iort_find_domain_token(int trans_id) 192 { 193 struct fwnode_handle *fw_node = NULL; 194 struct iort_its_msi_chip *its_msi_chip; 195 196 spin_lock(&iort_msi_chip_lock); 197 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { 198 if (its_msi_chip->translation_id == trans_id) { 199 fw_node = its_msi_chip->fw_node; 200 break; 201 } 202 } 203 spin_unlock(&iort_msi_chip_lock); 204 205 return fw_node; 206 } 207 208 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type, 209 iort_find_node_callback callback, 210 void *context) 211 { 212 struct acpi_iort_node *iort_node, *iort_end; 213 struct acpi_table_iort *iort; 214 int i; 215 216 if (!iort_table) 217 return NULL; 218 219 /* Get the first IORT node */ 220 iort = (struct acpi_table_iort *)iort_table; 221 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 222 iort->node_offset); 223 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 224 iort_table->length); 225 226 for (i = 0; i < iort->node_count; i++) { 227 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, 228 "IORT node pointer overflows, bad table!\n")) 229 return NULL; 230 231 if (iort_node->type == type && 232 ACPI_SUCCESS(callback(iort_node, context))) 233 return iort_node; 234 235 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 236 iort_node->length); 237 } 238 239 return NULL; 240 } 241 242 static acpi_status iort_match_node_callback(struct acpi_iort_node *node, 243 void *context) 244 { 245 struct device *dev = context; 246 acpi_status status = AE_NOT_FOUND; 247 248 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { 249 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 250 struct acpi_device *adev = to_acpi_device_node(dev->fwnode); 251 struct acpi_iort_named_component *ncomp; 252 253 if (!adev) 254 goto out; 255 256 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); 257 if (ACPI_FAILURE(status)) { 258 dev_warn(dev, "Can't get device full path name\n"); 259 goto out; 260 } 261 262 ncomp = (struct acpi_iort_named_component *)node->node_data; 263 status = !strcmp(ncomp->device_name, buf.pointer) ? 264 AE_OK : AE_NOT_FOUND; 265 acpi_os_free(buf.pointer); 266 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 267 struct acpi_iort_root_complex *pci_rc; 268 struct pci_bus *bus; 269 270 bus = to_pci_bus(dev); 271 pci_rc = (struct acpi_iort_root_complex *)node->node_data; 272 273 /* 274 * It is assumed that PCI segment numbers maps one-to-one 275 * with root complexes. Each segment number can represent only 276 * one root complex. 277 */ 278 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ? 279 AE_OK : AE_NOT_FOUND; 280 } 281 out: 282 return status; 283 } 284 285 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, 286 u32 *rid_out) 287 { 288 /* Single mapping does not care for input id */ 289 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 290 if (type == ACPI_IORT_NODE_NAMED_COMPONENT || 291 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 292 *rid_out = map->output_base; 293 return 0; 294 } 295 296 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n", 297 map, type); 298 return -ENXIO; 299 } 300 301 if (rid_in < map->input_base || 302 (rid_in >= map->input_base + map->id_count)) 303 return -ENXIO; 304 305 *rid_out = map->output_base + (rid_in - map->input_base); 306 return 0; 307 } 308 309 static 310 struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, 311 u32 *id_out, int index) 312 { 313 struct acpi_iort_node *parent; 314 struct acpi_iort_id_mapping *map; 315 316 if (!node->mapping_offset || !node->mapping_count || 317 index >= node->mapping_count) 318 return NULL; 319 320 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 321 node->mapping_offset + index * sizeof(*map)); 322 323 /* Firmware bug! */ 324 if (!map->output_reference) { 325 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 326 node, node->type); 327 return NULL; 328 } 329 330 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 331 map->output_reference); 332 333 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { 334 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || 335 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 336 *id_out = map->output_base; 337 return parent; 338 } 339 } 340 341 return NULL; 342 } 343 344 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, 345 u32 id_in, u32 *id_out, 346 u8 type_mask) 347 { 348 u32 id = id_in; 349 350 /* Parse the ID mapping tree to find specified node type */ 351 while (node) { 352 struct acpi_iort_id_mapping *map; 353 int i; 354 355 if (IORT_TYPE_MASK(node->type) & type_mask) { 356 if (id_out) 357 *id_out = id; 358 return node; 359 } 360 361 if (!node->mapping_offset || !node->mapping_count) 362 goto fail_map; 363 364 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, 365 node->mapping_offset); 366 367 /* Firmware bug! */ 368 if (!map->output_reference) { 369 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", 370 node, node->type); 371 goto fail_map; 372 } 373 374 /* Do the ID translation */ 375 for (i = 0; i < node->mapping_count; i++, map++) { 376 if (!iort_id_map(map, node->type, id, &id)) 377 break; 378 } 379 380 if (i == node->mapping_count) 381 goto fail_map; 382 383 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, 384 map->output_reference); 385 } 386 387 fail_map: 388 /* Map input ID to output ID unchanged on mapping failure */ 389 if (id_out) 390 *id_out = id_in; 391 392 return NULL; 393 } 394 395 static 396 struct acpi_iort_node *iort_node_map_platform_id(struct acpi_iort_node *node, 397 u32 *id_out, u8 type_mask, 398 int index) 399 { 400 struct acpi_iort_node *parent; 401 u32 id; 402 403 /* step 1: retrieve the initial dev id */ 404 parent = iort_node_get_id(node, &id, index); 405 if (!parent) 406 return NULL; 407 408 /* 409 * optional step 2: map the initial dev id if its parent is not 410 * the target type we want, map it again for the use cases such 411 * as NC (named component) -> SMMU -> ITS. If the type is matched, 412 * return the initial dev id and its parent pointer directly. 413 */ 414 if (!(IORT_TYPE_MASK(parent->type) & type_mask)) 415 parent = iort_node_map_id(parent, id, id_out, type_mask); 416 else 417 if (id_out) 418 *id_out = id; 419 420 return parent; 421 } 422 423 static struct acpi_iort_node *iort_find_dev_node(struct device *dev) 424 { 425 struct pci_bus *pbus; 426 427 if (!dev_is_pci(dev)) 428 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 429 iort_match_node_callback, dev); 430 431 /* Find a PCI root bus */ 432 pbus = to_pci_dev(dev)->bus; 433 while (!pci_is_root_bus(pbus)) 434 pbus = pbus->parent; 435 436 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 437 iort_match_node_callback, &pbus->dev); 438 } 439 440 /** 441 * iort_msi_map_rid() - Map a MSI requester ID for a device 442 * @dev: The device for which the mapping is to be done. 443 * @req_id: The device requester ID. 444 * 445 * Returns: mapped MSI RID on success, input requester ID otherwise 446 */ 447 u32 iort_msi_map_rid(struct device *dev, u32 req_id) 448 { 449 struct acpi_iort_node *node; 450 u32 dev_id; 451 452 node = iort_find_dev_node(dev); 453 if (!node) 454 return req_id; 455 456 iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); 457 return dev_id; 458 } 459 460 /** 461 * iort_pmsi_get_dev_id() - Get the device id for a device 462 * @dev: The device for which the mapping is to be done. 463 * @dev_id: The device ID found. 464 * 465 * Returns: 0 for successful find a dev id, -ENODEV on error 466 */ 467 int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) 468 { 469 int i; 470 struct acpi_iort_node *node; 471 472 node = iort_find_dev_node(dev); 473 if (!node) 474 return -ENODEV; 475 476 for (i = 0; i < node->mapping_count; i++) { 477 if (iort_node_map_platform_id(node, dev_id, IORT_MSI_TYPE, i)) 478 return 0; 479 } 480 481 return -ENODEV; 482 } 483 484 /** 485 * iort_dev_find_its_id() - Find the ITS identifier for a device 486 * @dev: The device. 487 * @req_id: Device's requester ID 488 * @idx: Index of the ITS identifier list. 489 * @its_id: ITS identifier. 490 * 491 * Returns: 0 on success, appropriate error value otherwise 492 */ 493 static int iort_dev_find_its_id(struct device *dev, u32 req_id, 494 unsigned int idx, int *its_id) 495 { 496 struct acpi_iort_its_group *its; 497 struct acpi_iort_node *node; 498 499 node = iort_find_dev_node(dev); 500 if (!node) 501 return -ENXIO; 502 503 node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); 504 if (!node) 505 return -ENXIO; 506 507 /* Move to ITS specific data */ 508 its = (struct acpi_iort_its_group *)node->node_data; 509 if (idx > its->its_count) { 510 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", 511 idx, its->its_count); 512 return -ENXIO; 513 } 514 515 *its_id = its->identifiers[idx]; 516 return 0; 517 } 518 519 /** 520 * iort_get_device_domain() - Find MSI domain related to a device 521 * @dev: The device. 522 * @req_id: Requester ID for the device. 523 * 524 * Returns: the MSI domain for this device, NULL otherwise 525 */ 526 struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) 527 { 528 struct fwnode_handle *handle; 529 int its_id; 530 531 if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) 532 return NULL; 533 534 handle = iort_find_domain_token(its_id); 535 if (!handle) 536 return NULL; 537 538 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); 539 } 540 541 /** 542 * iort_get_platform_device_domain() - Find MSI domain related to a 543 * platform device 544 * @dev: the dev pointer associated with the platform device 545 * 546 * Returns: the MSI domain for this device, NULL otherwise 547 */ 548 static struct irq_domain *iort_get_platform_device_domain(struct device *dev) 549 { 550 struct acpi_iort_node *node, *msi_parent; 551 struct fwnode_handle *iort_fwnode; 552 struct acpi_iort_its_group *its; 553 int i; 554 555 /* find its associated iort node */ 556 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 557 iort_match_node_callback, dev); 558 if (!node) 559 return NULL; 560 561 /* then find its msi parent node */ 562 for (i = 0; i < node->mapping_count; i++) { 563 msi_parent = iort_node_map_platform_id(node, NULL, 564 IORT_MSI_TYPE, i); 565 if (msi_parent) 566 break; 567 } 568 569 if (!msi_parent) 570 return NULL; 571 572 /* Move to ITS specific data */ 573 its = (struct acpi_iort_its_group *)msi_parent->node_data; 574 575 iort_fwnode = iort_find_domain_token(its->identifiers[0]); 576 if (!iort_fwnode) 577 return NULL; 578 579 return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); 580 } 581 582 void acpi_configure_pmsi_domain(struct device *dev) 583 { 584 struct irq_domain *msi_domain; 585 586 msi_domain = iort_get_platform_device_domain(dev); 587 if (msi_domain) 588 dev_set_msi_domain(dev, msi_domain); 589 } 590 591 static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias, 592 void *data) 593 { 594 u32 *rid = data; 595 596 *rid = alias; 597 return 0; 598 } 599 600 static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, 601 struct fwnode_handle *fwnode, 602 const struct iommu_ops *ops) 603 { 604 int ret = iommu_fwspec_init(dev, fwnode, ops); 605 606 if (!ret) 607 ret = iommu_fwspec_add_ids(dev, &streamid, 1); 608 609 return ret; 610 } 611 612 static inline bool iort_iommu_driver_enabled(u8 type) 613 { 614 switch (type) { 615 case ACPI_IORT_NODE_SMMU_V3: 616 return IS_BUILTIN(CONFIG_ARM_SMMU_V3); 617 case ACPI_IORT_NODE_SMMU: 618 return IS_BUILTIN(CONFIG_ARM_SMMU); 619 default: 620 pr_warn("IORT node type %u does not describe an SMMU\n", type); 621 return false; 622 } 623 } 624 625 #ifdef CONFIG_IOMMU_API 626 static inline 627 const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) 628 { 629 return (fwspec && fwspec->ops) ? fwspec->ops : NULL; 630 } 631 632 static inline 633 int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) 634 { 635 int err = 0; 636 637 if (ops->add_device && dev->bus && !dev->iommu_group) 638 err = ops->add_device(dev); 639 640 return err; 641 } 642 #else 643 static inline 644 const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) 645 { return NULL; } 646 static inline 647 int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) 648 { return 0; } 649 #endif 650 651 static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, 652 u32 streamid) 653 { 654 const struct iommu_ops *ops; 655 struct fwnode_handle *iort_fwnode; 656 657 if (!node) 658 return -ENODEV; 659 660 iort_fwnode = iort_get_fwnode(node); 661 if (!iort_fwnode) 662 return -ENODEV; 663 664 /* 665 * If the ops look-up fails, this means that either 666 * the SMMU drivers have not been probed yet or that 667 * the SMMU drivers are not built in the kernel; 668 * Depending on whether the SMMU drivers are built-in 669 * in the kernel or not, defer the IOMMU configuration 670 * or just abort it. 671 */ 672 ops = iommu_ops_from_fwnode(iort_fwnode); 673 if (!ops) 674 return iort_iommu_driver_enabled(node->type) ? 675 -EPROBE_DEFER : -ENODEV; 676 677 return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); 678 } 679 680 struct iort_pci_alias_info { 681 struct device *dev; 682 struct acpi_iort_node *node; 683 }; 684 685 static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) 686 { 687 struct iort_pci_alias_info *info = data; 688 struct acpi_iort_node *parent; 689 u32 streamid; 690 691 parent = iort_node_map_id(info->node, alias, &streamid, 692 IORT_IOMMU_TYPE); 693 return iort_iommu_xlate(info->dev, parent, streamid); 694 } 695 696 static int nc_dma_get_range(struct device *dev, u64 *size) 697 { 698 struct acpi_iort_node *node; 699 struct acpi_iort_named_component *ncomp; 700 701 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 702 iort_match_node_callback, dev); 703 if (!node) 704 return -ENODEV; 705 706 ncomp = (struct acpi_iort_named_component *)node->node_data; 707 708 *size = ncomp->memory_address_limit >= 64 ? U64_MAX : 709 1ULL<<ncomp->memory_address_limit; 710 711 return 0; 712 } 713 714 /** 715 * iort_dma_setup() - Set-up device DMA parameters. 716 * 717 * @dev: device to configure 718 * @dma_addr: device DMA address result pointer 719 * @size: DMA range size result pointer 720 */ 721 void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) 722 { 723 u64 mask, dmaaddr = 0, size = 0, offset = 0; 724 int ret, msb; 725 726 /* 727 * Set default coherent_dma_mask to 32 bit. Drivers are expected to 728 * setup the correct supported mask. 729 */ 730 if (!dev->coherent_dma_mask) 731 dev->coherent_dma_mask = DMA_BIT_MASK(32); 732 733 /* 734 * Set it to coherent_dma_mask by default if the architecture 735 * code has not set it. 736 */ 737 if (!dev->dma_mask) 738 dev->dma_mask = &dev->coherent_dma_mask; 739 740 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); 741 742 if (dev_is_pci(dev)) 743 ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); 744 else 745 ret = nc_dma_get_range(dev, &size); 746 747 if (!ret) { 748 msb = fls64(dmaaddr + size - 1); 749 /* 750 * Round-up to the power-of-two mask or set 751 * the mask to the whole 64-bit address space 752 * in case the DMA region covers the full 753 * memory window. 754 */ 755 mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1; 756 /* 757 * Limit coherent and dma mask based on size 758 * retrieved from firmware. 759 */ 760 dev->coherent_dma_mask = mask; 761 *dev->dma_mask = mask; 762 } 763 764 *dma_addr = dmaaddr; 765 *dma_size = size; 766 767 dev->dma_pfn_offset = PFN_DOWN(offset); 768 dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset); 769 } 770 771 /** 772 * iort_iommu_configure - Set-up IOMMU configuration for a device. 773 * 774 * @dev: device to configure 775 * 776 * Returns: iommu_ops pointer on configuration success 777 * NULL on configuration failure 778 */ 779 const struct iommu_ops *iort_iommu_configure(struct device *dev) 780 { 781 struct acpi_iort_node *node, *parent; 782 const struct iommu_ops *ops; 783 u32 streamid = 0; 784 int err = -ENODEV; 785 786 /* 787 * If we already translated the fwspec there 788 * is nothing left to do, return the iommu_ops. 789 */ 790 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); 791 if (ops) 792 return ops; 793 794 if (dev_is_pci(dev)) { 795 struct pci_bus *bus = to_pci_dev(dev)->bus; 796 struct iort_pci_alias_info info = { .dev = dev }; 797 798 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 799 iort_match_node_callback, &bus->dev); 800 if (!node) 801 return NULL; 802 803 info.node = node; 804 err = pci_for_each_dma_alias(to_pci_dev(dev), 805 iort_pci_iommu_init, &info); 806 } else { 807 int i = 0; 808 809 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, 810 iort_match_node_callback, dev); 811 if (!node) 812 return NULL; 813 814 do { 815 parent = iort_node_map_platform_id(node, &streamid, 816 IORT_IOMMU_TYPE, 817 i++); 818 819 if (parent) 820 err = iort_iommu_xlate(dev, parent, streamid); 821 } while (parent && !err); 822 } 823 824 /* 825 * If we have reason to believe the IOMMU driver missed the initial 826 * add_device callback for dev, replay it to get things in order. 827 */ 828 if (!err) { 829 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); 830 err = iort_add_device_replay(ops, dev); 831 } 832 833 /* Ignore all other errors apart from EPROBE_DEFER */ 834 if (err == -EPROBE_DEFER) { 835 ops = ERR_PTR(err); 836 } else if (err) { 837 dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); 838 ops = NULL; 839 } 840 841 return ops; 842 } 843 844 static void __init acpi_iort_register_irq(int hwirq, const char *name, 845 int trigger, 846 struct resource *res) 847 { 848 int irq = acpi_register_gsi(NULL, hwirq, trigger, 849 ACPI_ACTIVE_HIGH); 850 851 if (irq <= 0) { 852 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq, 853 name); 854 return; 855 } 856 857 res->start = irq; 858 res->end = irq; 859 res->flags = IORESOURCE_IRQ; 860 res->name = name; 861 } 862 863 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) 864 { 865 struct acpi_iort_smmu_v3 *smmu; 866 /* Always present mem resource */ 867 int num_res = 1; 868 869 /* Retrieve SMMUv3 specific data */ 870 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 871 872 if (smmu->event_gsiv) 873 num_res++; 874 875 if (smmu->pri_gsiv) 876 num_res++; 877 878 if (smmu->gerr_gsiv) 879 num_res++; 880 881 if (smmu->sync_gsiv) 882 num_res++; 883 884 return num_res; 885 } 886 887 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu) 888 { 889 /* 890 * Cavium ThunderX2 implementation doesn't not support unique 891 * irq line. Use single irq line for all the SMMUv3 interrupts. 892 */ 893 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 894 return false; 895 896 /* 897 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking 898 * SPI numbers here. 899 */ 900 return smmu->event_gsiv == smmu->pri_gsiv && 901 smmu->event_gsiv == smmu->gerr_gsiv && 902 smmu->event_gsiv == smmu->sync_gsiv; 903 } 904 905 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu) 906 { 907 /* 908 * Override the size, for Cavium ThunderX2 implementation 909 * which doesn't support the page 1 SMMU register space. 910 */ 911 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) 912 return SZ_64K; 913 914 return SZ_128K; 915 } 916 917 static void __init arm_smmu_v3_init_resources(struct resource *res, 918 struct acpi_iort_node *node) 919 { 920 struct acpi_iort_smmu_v3 *smmu; 921 int num_res = 0; 922 923 /* Retrieve SMMUv3 specific data */ 924 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 925 926 res[num_res].start = smmu->base_address; 927 res[num_res].end = smmu->base_address + 928 arm_smmu_v3_resource_size(smmu) - 1; 929 res[num_res].flags = IORESOURCE_MEM; 930 931 num_res++; 932 if (arm_smmu_v3_is_combined_irq(smmu)) { 933 if (smmu->event_gsiv) 934 acpi_iort_register_irq(smmu->event_gsiv, "combined", 935 ACPI_EDGE_SENSITIVE, 936 &res[num_res++]); 937 } else { 938 939 if (smmu->event_gsiv) 940 acpi_iort_register_irq(smmu->event_gsiv, "eventq", 941 ACPI_EDGE_SENSITIVE, 942 &res[num_res++]); 943 944 if (smmu->pri_gsiv) 945 acpi_iort_register_irq(smmu->pri_gsiv, "priq", 946 ACPI_EDGE_SENSITIVE, 947 &res[num_res++]); 948 949 if (smmu->gerr_gsiv) 950 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", 951 ACPI_EDGE_SENSITIVE, 952 &res[num_res++]); 953 954 if (smmu->sync_gsiv) 955 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", 956 ACPI_EDGE_SENSITIVE, 957 &res[num_res++]); 958 } 959 } 960 961 static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) 962 { 963 struct acpi_iort_smmu_v3 *smmu; 964 965 /* Retrieve SMMUv3 specific data */ 966 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 967 968 return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE; 969 } 970 971 #if defined(CONFIG_ACPI_NUMA) && defined(ACPI_IORT_SMMU_V3_PXM_VALID) 972 /* 973 * set numa proximity domain for smmuv3 device 974 */ 975 static void __init arm_smmu_v3_set_proximity(struct device *dev, 976 struct acpi_iort_node *node) 977 { 978 struct acpi_iort_smmu_v3 *smmu; 979 980 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 981 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { 982 set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm)); 983 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", 984 smmu->base_address, 985 smmu->pxm); 986 } 987 } 988 #else 989 #define arm_smmu_v3_set_proximity NULL 990 #endif 991 992 static int __init arm_smmu_count_resources(struct acpi_iort_node *node) 993 { 994 struct acpi_iort_smmu *smmu; 995 996 /* Retrieve SMMU specific data */ 997 smmu = (struct acpi_iort_smmu *)node->node_data; 998 999 /* 1000 * Only consider the global fault interrupt and ignore the 1001 * configuration access interrupt. 1002 * 1003 * MMIO address and global fault interrupt resources are always 1004 * present so add them to the context interrupt count as a static 1005 * value. 1006 */ 1007 return smmu->context_interrupt_count + 2; 1008 } 1009 1010 static void __init arm_smmu_init_resources(struct resource *res, 1011 struct acpi_iort_node *node) 1012 { 1013 struct acpi_iort_smmu *smmu; 1014 int i, hw_irq, trigger, num_res = 0; 1015 u64 *ctx_irq, *glb_irq; 1016 1017 /* Retrieve SMMU specific data */ 1018 smmu = (struct acpi_iort_smmu *)node->node_data; 1019 1020 res[num_res].start = smmu->base_address; 1021 res[num_res].end = smmu->base_address + smmu->span - 1; 1022 res[num_res].flags = IORESOURCE_MEM; 1023 num_res++; 1024 1025 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); 1026 /* Global IRQs */ 1027 hw_irq = IORT_IRQ_MASK(glb_irq[0]); 1028 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]); 1029 1030 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, 1031 &res[num_res++]); 1032 1033 /* Context IRQs */ 1034 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); 1035 for (i = 0; i < smmu->context_interrupt_count; i++) { 1036 hw_irq = IORT_IRQ_MASK(ctx_irq[i]); 1037 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]); 1038 1039 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, 1040 &res[num_res++]); 1041 } 1042 } 1043 1044 static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node) 1045 { 1046 struct acpi_iort_smmu *smmu; 1047 1048 /* Retrieve SMMU specific data */ 1049 smmu = (struct acpi_iort_smmu *)node->node_data; 1050 1051 return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK; 1052 } 1053 1054 struct iort_iommu_config { 1055 const char *name; 1056 int (*iommu_init)(struct acpi_iort_node *node); 1057 bool (*iommu_is_coherent)(struct acpi_iort_node *node); 1058 int (*iommu_count_resources)(struct acpi_iort_node *node); 1059 void (*iommu_init_resources)(struct resource *res, 1060 struct acpi_iort_node *node); 1061 void (*iommu_set_proximity)(struct device *dev, 1062 struct acpi_iort_node *node); 1063 }; 1064 1065 static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = { 1066 .name = "arm-smmu-v3", 1067 .iommu_is_coherent = arm_smmu_v3_is_coherent, 1068 .iommu_count_resources = arm_smmu_v3_count_resources, 1069 .iommu_init_resources = arm_smmu_v3_init_resources, 1070 .iommu_set_proximity = arm_smmu_v3_set_proximity, 1071 }; 1072 1073 static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = { 1074 .name = "arm-smmu", 1075 .iommu_is_coherent = arm_smmu_is_coherent, 1076 .iommu_count_resources = arm_smmu_count_resources, 1077 .iommu_init_resources = arm_smmu_init_resources 1078 }; 1079 1080 static __init 1081 const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node) 1082 { 1083 switch (node->type) { 1084 case ACPI_IORT_NODE_SMMU_V3: 1085 return &iort_arm_smmu_v3_cfg; 1086 case ACPI_IORT_NODE_SMMU: 1087 return &iort_arm_smmu_cfg; 1088 default: 1089 return NULL; 1090 } 1091 } 1092 1093 /** 1094 * iort_add_smmu_platform_device() - Allocate a platform device for SMMU 1095 * @node: Pointer to SMMU ACPI IORT node 1096 * 1097 * Returns: 0 on success, <0 failure 1098 */ 1099 static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node) 1100 { 1101 struct fwnode_handle *fwnode; 1102 struct platform_device *pdev; 1103 struct resource *r; 1104 enum dev_dma_attr attr; 1105 int ret, count; 1106 const struct iort_iommu_config *ops = iort_get_iommu_cfg(node); 1107 1108 if (!ops) 1109 return -ENODEV; 1110 1111 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); 1112 if (!pdev) 1113 return -ENOMEM; 1114 1115 if (ops->iommu_set_proximity) 1116 ops->iommu_set_proximity(&pdev->dev, node); 1117 1118 count = ops->iommu_count_resources(node); 1119 1120 r = kcalloc(count, sizeof(*r), GFP_KERNEL); 1121 if (!r) { 1122 ret = -ENOMEM; 1123 goto dev_put; 1124 } 1125 1126 ops->iommu_init_resources(r, node); 1127 1128 ret = platform_device_add_resources(pdev, r, count); 1129 /* 1130 * Resources are duplicated in platform_device_add_resources, 1131 * free their allocated memory 1132 */ 1133 kfree(r); 1134 1135 if (ret) 1136 goto dev_put; 1137 1138 /* 1139 * Add a copy of IORT node pointer to platform_data to 1140 * be used to retrieve IORT data information. 1141 */ 1142 ret = platform_device_add_data(pdev, &node, sizeof(node)); 1143 if (ret) 1144 goto dev_put; 1145 1146 /* 1147 * We expect the dma masks to be equivalent for 1148 * all SMMUs set-ups 1149 */ 1150 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 1151 1152 fwnode = iort_get_fwnode(node); 1153 1154 if (!fwnode) { 1155 ret = -ENODEV; 1156 goto dev_put; 1157 } 1158 1159 pdev->dev.fwnode = fwnode; 1160 1161 attr = ops->iommu_is_coherent(node) ? 1162 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT; 1163 1164 /* Configure DMA for the page table walker */ 1165 acpi_dma_configure(&pdev->dev, attr); 1166 1167 ret = platform_device_add(pdev); 1168 if (ret) 1169 goto dma_deconfigure; 1170 1171 return 0; 1172 1173 dma_deconfigure: 1174 acpi_dma_deconfigure(&pdev->dev); 1175 dev_put: 1176 platform_device_put(pdev); 1177 1178 return ret; 1179 } 1180 1181 static bool __init iort_enable_acs(struct acpi_iort_node *iort_node) 1182 { 1183 if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { 1184 struct acpi_iort_node *parent; 1185 struct acpi_iort_id_mapping *map; 1186 int i; 1187 1188 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node, 1189 iort_node->mapping_offset); 1190 1191 for (i = 0; i < iort_node->mapping_count; i++, map++) { 1192 if (!map->output_reference) 1193 continue; 1194 1195 parent = ACPI_ADD_PTR(struct acpi_iort_node, 1196 iort_table, map->output_reference); 1197 /* 1198 * If we detect a RC->SMMU mapping, make sure 1199 * we enable ACS on the system. 1200 */ 1201 if ((parent->type == ACPI_IORT_NODE_SMMU) || 1202 (parent->type == ACPI_IORT_NODE_SMMU_V3)) { 1203 pci_request_acs(); 1204 return true; 1205 } 1206 } 1207 } 1208 1209 return false; 1210 } 1211 1212 static void __init iort_init_platform_devices(void) 1213 { 1214 struct acpi_iort_node *iort_node, *iort_end; 1215 struct acpi_table_iort *iort; 1216 struct fwnode_handle *fwnode; 1217 int i, ret; 1218 bool acs_enabled = false; 1219 1220 /* 1221 * iort_table and iort both point to the start of IORT table, but 1222 * have different struct types 1223 */ 1224 iort = (struct acpi_table_iort *)iort_table; 1225 1226 /* Get the first IORT node */ 1227 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1228 iort->node_offset); 1229 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort, 1230 iort_table->length); 1231 1232 for (i = 0; i < iort->node_count; i++) { 1233 if (iort_node >= iort_end) { 1234 pr_err("iort node pointer overflows, bad table\n"); 1235 return; 1236 } 1237 1238 if (!acs_enabled) 1239 acs_enabled = iort_enable_acs(iort_node); 1240 1241 if ((iort_node->type == ACPI_IORT_NODE_SMMU) || 1242 (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) { 1243 1244 fwnode = acpi_alloc_fwnode_static(); 1245 if (!fwnode) 1246 return; 1247 1248 iort_set_fwnode(iort_node, fwnode); 1249 1250 ret = iort_add_smmu_platform_device(iort_node); 1251 if (ret) { 1252 iort_delete_fwnode(iort_node); 1253 acpi_free_fwnode_static(fwnode); 1254 return; 1255 } 1256 } 1257 1258 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, 1259 iort_node->length); 1260 } 1261 } 1262 1263 void __init acpi_iort_init(void) 1264 { 1265 acpi_status status; 1266 1267 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table); 1268 if (ACPI_FAILURE(status)) { 1269 if (status != AE_NOT_FOUND) { 1270 const char *msg = acpi_format_exception(status); 1271 1272 pr_err("Failed to get table, %s\n", msg); 1273 } 1274 1275 return; 1276 } 1277 1278 iort_init_platform_devices(); 1279 } 1280