1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "AMD-Vi: " fmt 9 #define dev_fmt(fmt) pr_fmt(fmt) 10 11 #include <linux/pci.h> 12 #include <linux/acpi.h> 13 #include <linux/list.h> 14 #include <linux/bitmap.h> 15 #include <linux/slab.h> 16 #include <linux/syscore_ops.h> 17 #include <linux/interrupt.h> 18 #include <linux/msi.h> 19 #include <linux/irq.h> 20 #include <linux/amd-iommu.h> 21 #include <linux/export.h> 22 #include <linux/kmemleak.h> 23 #include <linux/mem_encrypt.h> 24 #include <asm/pci-direct.h> 25 #include <asm/iommu.h> 26 #include <asm/apic.h> 27 #include <asm/gart.h> 28 #include <asm/x86_init.h> 29 #include <asm/iommu_table.h> 30 #include <asm/io_apic.h> 31 #include <asm/irq_remapping.h> 32 #include <asm/set_memory.h> 33 34 #include <linux/crash_dump.h> 35 36 #include "amd_iommu.h" 37 #include "../irq_remapping.h" 38 39 /* 40 * definitions for the ACPI scanning code 41 */ 42 #define IVRS_HEADER_LENGTH 48 43 44 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40 45 #define ACPI_IVMD_TYPE_ALL 0x20 46 #define ACPI_IVMD_TYPE 0x21 47 #define ACPI_IVMD_TYPE_RANGE 0x22 48 49 #define IVHD_DEV_ALL 0x01 50 #define IVHD_DEV_SELECT 0x02 51 #define IVHD_DEV_SELECT_RANGE_START 0x03 52 #define IVHD_DEV_RANGE_END 0x04 53 #define IVHD_DEV_ALIAS 0x42 54 #define IVHD_DEV_ALIAS_RANGE 0x43 55 #define IVHD_DEV_EXT_SELECT 0x46 56 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 57 #define IVHD_DEV_SPECIAL 0x48 58 #define IVHD_DEV_ACPI_HID 0xf0 59 60 #define UID_NOT_PRESENT 0 61 #define UID_IS_INTEGER 1 62 #define UID_IS_CHARACTER 2 63 64 #define IVHD_SPECIAL_IOAPIC 1 65 #define IVHD_SPECIAL_HPET 2 66 67 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 68 #define IVHD_FLAG_PASSPW_EN_MASK 0x02 69 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 70 #define IVHD_FLAG_ISOC_EN_MASK 0x08 71 72 #define IVMD_FLAG_EXCL_RANGE 0x08 73 #define IVMD_FLAG_IW 0x04 74 #define IVMD_FLAG_IR 0x02 75 #define IVMD_FLAG_UNITY_MAP 0x01 76 77 #define ACPI_DEVFLAG_INITPASS 0x01 78 #define ACPI_DEVFLAG_EXTINT 0x02 79 #define ACPI_DEVFLAG_NMI 0x04 80 #define ACPI_DEVFLAG_SYSMGT1 0x10 81 #define ACPI_DEVFLAG_SYSMGT2 0x20 82 #define ACPI_DEVFLAG_LINT0 0x40 83 #define ACPI_DEVFLAG_LINT1 0x80 84 #define ACPI_DEVFLAG_ATSDIS 0x10000000 85 86 #define LOOP_TIMEOUT 100000 87 /* 88 * ACPI table definitions 89 * 90 * These data structures are laid over the table to parse the important values 91 * out of it. 92 */ 93 94 extern const struct iommu_ops amd_iommu_ops; 95 96 /* 97 * structure describing one IOMMU in the ACPI table. Typically followed by one 98 * or more ivhd_entrys. 99 */ 100 struct ivhd_header { 101 u8 type; 102 u8 flags; 103 u16 length; 104 u16 devid; 105 u16 cap_ptr; 106 u64 mmio_phys; 107 u16 pci_seg; 108 u16 info; 109 u32 efr_attr; 110 111 /* Following only valid on IVHD type 11h and 40h */ 112 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */ 113 u64 res; 114 } __attribute__((packed)); 115 116 /* 117 * A device entry describing which devices a specific IOMMU translates and 118 * which requestor ids they use. 119 */ 120 struct ivhd_entry { 121 u8 type; 122 u16 devid; 123 u8 flags; 124 u32 ext; 125 u32 hidh; 126 u64 cid; 127 u8 uidf; 128 u8 uidl; 129 u8 uid; 130 } __attribute__((packed)); 131 132 /* 133 * An AMD IOMMU memory definition structure. It defines things like exclusion 134 * ranges for devices and regions that should be unity mapped. 135 */ 136 struct ivmd_header { 137 u8 type; 138 u8 flags; 139 u16 length; 140 u16 devid; 141 u16 aux; 142 u64 resv; 143 u64 range_start; 144 u64 range_length; 145 } __attribute__((packed)); 146 147 bool amd_iommu_dump; 148 bool amd_iommu_irq_remap __read_mostly; 149 150 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 151 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; 152 153 static bool amd_iommu_detected; 154 static bool __initdata amd_iommu_disabled; 155 static int amd_iommu_target_ivhd_type; 156 157 u16 amd_iommu_last_bdf; /* largest PCI device id we have 158 to handle */ 159 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 160 we find in ACPI */ 161 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 162 163 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 164 system */ 165 166 /* Array to assign indices to IOMMUs*/ 167 struct amd_iommu *amd_iommus[MAX_IOMMUS]; 168 169 /* Number of IOMMUs present in the system */ 170 static int amd_iommus_present; 171 172 /* IOMMUs have a non-present cache? */ 173 bool amd_iommu_np_cache __read_mostly; 174 bool amd_iommu_iotlb_sup __read_mostly = true; 175 176 u32 amd_iommu_max_pasid __read_mostly = ~0; 177 178 bool amd_iommu_v2_present __read_mostly; 179 static bool amd_iommu_pc_present __read_mostly; 180 181 bool amd_iommu_force_isolation __read_mostly; 182 183 /* 184 * Pointer to the device table which is shared by all AMD IOMMUs 185 * it is indexed by the PCI device id or the HT unit id and contains 186 * information about the domain the device belongs to as well as the 187 * page table root pointer. 188 */ 189 struct dev_table_entry *amd_iommu_dev_table; 190 /* 191 * Pointer to a device table which the content of old device table 192 * will be copied to. It's only be used in kdump kernel. 193 */ 194 static struct dev_table_entry *old_dev_tbl_cpy; 195 196 /* 197 * The alias table is a driver specific data structure which contains the 198 * mappings of the PCI device ids to the actual requestor ids on the IOMMU. 199 * More than one device can share the same requestor id. 200 */ 201 u16 *amd_iommu_alias_table; 202 203 /* 204 * The rlookup table is used to find the IOMMU which is responsible 205 * for a specific device. It is also indexed by the PCI device id. 206 */ 207 struct amd_iommu **amd_iommu_rlookup_table; 208 EXPORT_SYMBOL(amd_iommu_rlookup_table); 209 210 /* 211 * This table is used to find the irq remapping table for a given device id 212 * quickly. 213 */ 214 struct irq_remap_table **irq_lookup_table; 215 216 /* 217 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap 218 * to know which ones are already in use. 219 */ 220 unsigned long *amd_iommu_pd_alloc_bitmap; 221 222 static u32 dev_table_size; /* size of the device table */ 223 static u32 alias_table_size; /* size of the alias table */ 224 static u32 rlookup_table_size; /* size if the rlookup table */ 225 226 enum iommu_init_state { 227 IOMMU_START_STATE, 228 IOMMU_IVRS_DETECTED, 229 IOMMU_ACPI_FINISHED, 230 IOMMU_ENABLED, 231 IOMMU_PCI_INIT, 232 IOMMU_INTERRUPTS_EN, 233 IOMMU_DMA_OPS, 234 IOMMU_INITIALIZED, 235 IOMMU_NOT_FOUND, 236 IOMMU_INIT_ERROR, 237 IOMMU_CMDLINE_DISABLED, 238 }; 239 240 /* Early ioapic and hpet maps from kernel command line */ 241 #define EARLY_MAP_SIZE 4 242 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; 243 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; 244 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE]; 245 246 static int __initdata early_ioapic_map_size; 247 static int __initdata early_hpet_map_size; 248 static int __initdata early_acpihid_map_size; 249 250 static bool __initdata cmdline_maps; 251 252 static enum iommu_init_state init_state = IOMMU_START_STATE; 253 254 static int amd_iommu_enable_interrupts(void); 255 static int __init iommu_go_to_state(enum iommu_init_state state); 256 static void init_device_table_dma(void); 257 258 static bool amd_iommu_pre_enabled = true; 259 260 static u32 amd_iommu_ivinfo __initdata; 261 262 bool translation_pre_enabled(struct amd_iommu *iommu) 263 { 264 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); 265 } 266 EXPORT_SYMBOL(translation_pre_enabled); 267 268 static void clear_translation_pre_enabled(struct amd_iommu *iommu) 269 { 270 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 271 } 272 273 static void init_translation_status(struct amd_iommu *iommu) 274 { 275 u64 ctrl; 276 277 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 278 if (ctrl & (1<<CONTROL_IOMMU_EN)) 279 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 280 } 281 282 static inline void update_last_devid(u16 devid) 283 { 284 if (devid > amd_iommu_last_bdf) 285 amd_iommu_last_bdf = devid; 286 } 287 288 static inline unsigned long tbl_size(int entry_size) 289 { 290 unsigned shift = PAGE_SHIFT + 291 get_order(((int)amd_iommu_last_bdf + 1) * entry_size); 292 293 return 1UL << shift; 294 } 295 296 int amd_iommu_get_num_iommus(void) 297 { 298 return amd_iommus_present; 299 } 300 301 /* 302 * For IVHD type 0x11/0x40, EFR is also available via IVHD. 303 * Default to IVHD EFR since it is available sooner 304 * (i.e. before PCI init). 305 */ 306 static void __init early_iommu_features_init(struct amd_iommu *iommu, 307 struct ivhd_header *h) 308 { 309 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) 310 iommu->features = h->efr_reg; 311 } 312 313 /* Access to l1 and l2 indexed register spaces */ 314 315 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) 316 { 317 u32 val; 318 319 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 320 pci_read_config_dword(iommu->dev, 0xfc, &val); 321 return val; 322 } 323 324 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) 325 { 326 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); 327 pci_write_config_dword(iommu->dev, 0xfc, val); 328 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 329 } 330 331 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) 332 { 333 u32 val; 334 335 pci_write_config_dword(iommu->dev, 0xf0, address); 336 pci_read_config_dword(iommu->dev, 0xf4, &val); 337 return val; 338 } 339 340 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) 341 { 342 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); 343 pci_write_config_dword(iommu->dev, 0xf4, val); 344 } 345 346 /**************************************************************************** 347 * 348 * AMD IOMMU MMIO register space handling functions 349 * 350 * These functions are used to program the IOMMU device registers in 351 * MMIO space required for that driver. 352 * 353 ****************************************************************************/ 354 355 /* 356 * This function set the exclusion range in the IOMMU. DMA accesses to the 357 * exclusion range are passed through untranslated 358 */ 359 static void iommu_set_exclusion_range(struct amd_iommu *iommu) 360 { 361 u64 start = iommu->exclusion_start & PAGE_MASK; 362 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; 363 u64 entry; 364 365 if (!iommu->exclusion_start) 366 return; 367 368 entry = start | MMIO_EXCL_ENABLE_MASK; 369 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 370 &entry, sizeof(entry)); 371 372 entry = limit; 373 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 374 &entry, sizeof(entry)); 375 } 376 377 static void iommu_set_cwwb_range(struct amd_iommu *iommu) 378 { 379 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); 380 u64 entry = start & PM_ADDR_MASK; 381 382 if (!iommu_feature(iommu, FEATURE_SNP)) 383 return; 384 385 /* Note: 386 * Re-purpose Exclusion base/limit registers for Completion wait 387 * write-back base/limit. 388 */ 389 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 390 &entry, sizeof(entry)); 391 392 /* Note: 393 * Default to 4 Kbytes, which can be specified by setting base 394 * address equal to the limit address. 395 */ 396 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 397 &entry, sizeof(entry)); 398 } 399 400 /* Programs the physical address of the device table into the IOMMU hardware */ 401 static void iommu_set_device_table(struct amd_iommu *iommu) 402 { 403 u64 entry; 404 405 BUG_ON(iommu->mmio_base == NULL); 406 407 entry = iommu_virt_to_phys(amd_iommu_dev_table); 408 entry |= (dev_table_size >> 12) - 1; 409 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, 410 &entry, sizeof(entry)); 411 } 412 413 /* Generic functions to enable/disable certain features of the IOMMU. */ 414 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) 415 { 416 u64 ctrl; 417 418 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 419 ctrl |= (1ULL << bit); 420 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 421 } 422 423 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) 424 { 425 u64 ctrl; 426 427 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 428 ctrl &= ~(1ULL << bit); 429 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 430 } 431 432 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) 433 { 434 u64 ctrl; 435 436 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 437 ctrl &= ~CTRL_INV_TO_MASK; 438 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; 439 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 440 } 441 442 /* Function to enable the hardware */ 443 static void iommu_enable(struct amd_iommu *iommu) 444 { 445 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 446 } 447 448 static void iommu_disable(struct amd_iommu *iommu) 449 { 450 if (!iommu->mmio_base) 451 return; 452 453 /* Disable command buffer */ 454 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 455 456 /* Disable event logging and event interrupts */ 457 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); 458 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 459 460 /* Disable IOMMU GA_LOG */ 461 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 462 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 463 464 /* Disable IOMMU hardware itself */ 465 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); 466 } 467 468 /* 469 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 470 * the system has one. 471 */ 472 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) 473 { 474 if (!request_mem_region(address, end, "amd_iommu")) { 475 pr_err("Can not reserve memory region %llx-%llx for mmio\n", 476 address, end); 477 pr_err("This is a BIOS bug. Please contact your hardware vendor\n"); 478 return NULL; 479 } 480 481 return (u8 __iomem *)ioremap(address, end); 482 } 483 484 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) 485 { 486 if (iommu->mmio_base) 487 iounmap(iommu->mmio_base); 488 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); 489 } 490 491 static inline u32 get_ivhd_header_size(struct ivhd_header *h) 492 { 493 u32 size = 0; 494 495 switch (h->type) { 496 case 0x10: 497 size = 24; 498 break; 499 case 0x11: 500 case 0x40: 501 size = 40; 502 break; 503 } 504 return size; 505 } 506 507 /**************************************************************************** 508 * 509 * The functions below belong to the first pass of AMD IOMMU ACPI table 510 * parsing. In this pass we try to find out the highest device id this 511 * code has to handle. Upon this information the size of the shared data 512 * structures is determined later. 513 * 514 ****************************************************************************/ 515 516 /* 517 * This function calculates the length of a given IVHD entry 518 */ 519 static inline int ivhd_entry_length(u8 *ivhd) 520 { 521 u32 type = ((struct ivhd_entry *)ivhd)->type; 522 523 if (type < 0x80) { 524 return 0x04 << (*ivhd >> 6); 525 } else if (type == IVHD_DEV_ACPI_HID) { 526 /* For ACPI_HID, offset 21 is uid len */ 527 return *((u8 *)ivhd + 21) + 22; 528 } 529 return 0; 530 } 531 532 /* 533 * After reading the highest device id from the IOMMU PCI capability header 534 * this function looks if there is a higher device id defined in the ACPI table 535 */ 536 static int __init find_last_devid_from_ivhd(struct ivhd_header *h) 537 { 538 u8 *p = (void *)h, *end = (void *)h; 539 struct ivhd_entry *dev; 540 541 u32 ivhd_size = get_ivhd_header_size(h); 542 543 if (!ivhd_size) { 544 pr_err("Unsupported IVHD type %#x\n", h->type); 545 return -EINVAL; 546 } 547 548 p += ivhd_size; 549 end += h->length; 550 551 while (p < end) { 552 dev = (struct ivhd_entry *)p; 553 switch (dev->type) { 554 case IVHD_DEV_ALL: 555 /* Use maximum BDF value for DEV_ALL */ 556 update_last_devid(0xffff); 557 break; 558 case IVHD_DEV_SELECT: 559 case IVHD_DEV_RANGE_END: 560 case IVHD_DEV_ALIAS: 561 case IVHD_DEV_EXT_SELECT: 562 /* all the above subfield types refer to device ids */ 563 update_last_devid(dev->devid); 564 break; 565 default: 566 break; 567 } 568 p += ivhd_entry_length(p); 569 } 570 571 WARN_ON(p != end); 572 573 return 0; 574 } 575 576 static int __init check_ivrs_checksum(struct acpi_table_header *table) 577 { 578 int i; 579 u8 checksum = 0, *p = (u8 *)table; 580 581 for (i = 0; i < table->length; ++i) 582 checksum += p[i]; 583 if (checksum != 0) { 584 /* ACPI table corrupt */ 585 pr_err(FW_BUG "IVRS invalid checksum\n"); 586 return -ENODEV; 587 } 588 589 return 0; 590 } 591 592 /* 593 * Iterate over all IVHD entries in the ACPI table and find the highest device 594 * id which we need to handle. This is the first of three functions which parse 595 * the ACPI table. So we check the checksum here. 596 */ 597 static int __init find_last_devid_acpi(struct acpi_table_header *table) 598 { 599 u8 *p = (u8 *)table, *end = (u8 *)table; 600 struct ivhd_header *h; 601 602 p += IVRS_HEADER_LENGTH; 603 604 end += table->length; 605 while (p < end) { 606 h = (struct ivhd_header *)p; 607 if (h->type == amd_iommu_target_ivhd_type) { 608 int ret = find_last_devid_from_ivhd(h); 609 610 if (ret) 611 return ret; 612 } 613 p += h->length; 614 } 615 WARN_ON(p != end); 616 617 return 0; 618 } 619 620 /**************************************************************************** 621 * 622 * The following functions belong to the code path which parses the ACPI table 623 * the second time. In this ACPI parsing iteration we allocate IOMMU specific 624 * data structures, initialize the device/alias/rlookup table and also 625 * basically initialize the hardware. 626 * 627 ****************************************************************************/ 628 629 /* 630 * Allocates the command buffer. This buffer is per AMD IOMMU. We can 631 * write commands to that buffer later and the IOMMU will execute them 632 * asynchronously 633 */ 634 static int __init alloc_command_buffer(struct amd_iommu *iommu) 635 { 636 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 637 get_order(CMD_BUFFER_SIZE)); 638 639 return iommu->cmd_buf ? 0 : -ENOMEM; 640 } 641 642 /* 643 * This function resets the command buffer if the IOMMU stopped fetching 644 * commands from it. 645 */ 646 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) 647 { 648 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 649 650 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 651 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 652 iommu->cmd_buf_head = 0; 653 iommu->cmd_buf_tail = 0; 654 655 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 656 } 657 658 /* 659 * This function writes the command buffer address to the hardware and 660 * enables it. 661 */ 662 static void iommu_enable_command_buffer(struct amd_iommu *iommu) 663 { 664 u64 entry; 665 666 BUG_ON(iommu->cmd_buf == NULL); 667 668 entry = iommu_virt_to_phys(iommu->cmd_buf); 669 entry |= MMIO_CMD_SIZE_512; 670 671 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 672 &entry, sizeof(entry)); 673 674 amd_iommu_reset_cmd_buffer(iommu); 675 } 676 677 /* 678 * This function disables the command buffer 679 */ 680 static void iommu_disable_command_buffer(struct amd_iommu *iommu) 681 { 682 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 683 } 684 685 static void __init free_command_buffer(struct amd_iommu *iommu) 686 { 687 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); 688 } 689 690 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, 691 gfp_t gfp, size_t size) 692 { 693 int order = get_order(size); 694 void *buf = (void *)__get_free_pages(gfp, order); 695 696 if (buf && 697 iommu_feature(iommu, FEATURE_SNP) && 698 set_memory_4k((unsigned long)buf, (1 << order))) { 699 free_pages((unsigned long)buf, order); 700 buf = NULL; 701 } 702 703 return buf; 704 } 705 706 /* allocates the memory where the IOMMU will log its events to */ 707 static int __init alloc_event_buffer(struct amd_iommu *iommu) 708 { 709 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 710 EVT_BUFFER_SIZE); 711 712 return iommu->evt_buf ? 0 : -ENOMEM; 713 } 714 715 static void iommu_enable_event_buffer(struct amd_iommu *iommu) 716 { 717 u64 entry; 718 719 BUG_ON(iommu->evt_buf == NULL); 720 721 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; 722 723 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, 724 &entry, sizeof(entry)); 725 726 /* set head and tail to zero manually */ 727 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 728 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 729 730 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 731 } 732 733 /* 734 * This function disables the event log buffer 735 */ 736 static void iommu_disable_event_buffer(struct amd_iommu *iommu) 737 { 738 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 739 } 740 741 static void __init free_event_buffer(struct amd_iommu *iommu) 742 { 743 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); 744 } 745 746 /* allocates the memory where the IOMMU will log its events to */ 747 static int __init alloc_ppr_log(struct amd_iommu *iommu) 748 { 749 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 750 PPR_LOG_SIZE); 751 752 return iommu->ppr_log ? 0 : -ENOMEM; 753 } 754 755 static void iommu_enable_ppr_log(struct amd_iommu *iommu) 756 { 757 u64 entry; 758 759 if (iommu->ppr_log == NULL) 760 return; 761 762 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; 763 764 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, 765 &entry, sizeof(entry)); 766 767 /* set head and tail to zero manually */ 768 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 769 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 770 771 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); 772 iommu_feature_enable(iommu, CONTROL_PPR_EN); 773 } 774 775 static void __init free_ppr_log(struct amd_iommu *iommu) 776 { 777 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); 778 } 779 780 static void free_ga_log(struct amd_iommu *iommu) 781 { 782 #ifdef CONFIG_IRQ_REMAP 783 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE)); 784 free_pages((unsigned long)iommu->ga_log_tail, get_order(8)); 785 #endif 786 } 787 788 static int iommu_ga_log_enable(struct amd_iommu *iommu) 789 { 790 #ifdef CONFIG_IRQ_REMAP 791 u32 status, i; 792 793 if (!iommu->ga_log) 794 return -EINVAL; 795 796 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 797 798 /* Check if already running */ 799 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 800 return 0; 801 802 iommu_feature_enable(iommu, CONTROL_GAINT_EN); 803 iommu_feature_enable(iommu, CONTROL_GALOG_EN); 804 805 for (i = 0; i < LOOP_TIMEOUT; ++i) { 806 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 807 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 808 break; 809 } 810 811 if (i >= LOOP_TIMEOUT) 812 return -EINVAL; 813 #endif /* CONFIG_IRQ_REMAP */ 814 return 0; 815 } 816 817 #ifdef CONFIG_IRQ_REMAP 818 static int iommu_init_ga_log(struct amd_iommu *iommu) 819 { 820 u64 entry; 821 822 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 823 return 0; 824 825 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 826 get_order(GA_LOG_SIZE)); 827 if (!iommu->ga_log) 828 goto err_out; 829 830 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 831 get_order(8)); 832 if (!iommu->ga_log_tail) 833 goto err_out; 834 835 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; 836 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, 837 &entry, sizeof(entry)); 838 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & 839 (BIT_ULL(52)-1)) & ~7ULL; 840 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, 841 &entry, sizeof(entry)); 842 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 843 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); 844 845 return 0; 846 err_out: 847 free_ga_log(iommu); 848 return -EINVAL; 849 } 850 #endif /* CONFIG_IRQ_REMAP */ 851 852 static int iommu_init_ga(struct amd_iommu *iommu) 853 { 854 int ret = 0; 855 856 #ifdef CONFIG_IRQ_REMAP 857 /* Note: We have already checked GASup from IVRS table. 858 * Now, we need to make sure that GAMSup is set. 859 */ 860 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && 861 !iommu_feature(iommu, FEATURE_GAM_VAPIC)) 862 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 863 864 ret = iommu_init_ga_log(iommu); 865 #endif /* CONFIG_IRQ_REMAP */ 866 867 return ret; 868 } 869 870 static int __init alloc_cwwb_sem(struct amd_iommu *iommu) 871 { 872 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1); 873 874 return iommu->cmd_sem ? 0 : -ENOMEM; 875 } 876 877 static void __init free_cwwb_sem(struct amd_iommu *iommu) 878 { 879 if (iommu->cmd_sem) 880 free_page((unsigned long)iommu->cmd_sem); 881 } 882 883 static void iommu_enable_xt(struct amd_iommu *iommu) 884 { 885 #ifdef CONFIG_IRQ_REMAP 886 /* 887 * XT mode (32-bit APIC destination ID) requires 888 * GA mode (128-bit IRTE support) as a prerequisite. 889 */ 890 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) && 891 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 892 iommu_feature_enable(iommu, CONTROL_XT_EN); 893 #endif /* CONFIG_IRQ_REMAP */ 894 } 895 896 static void iommu_enable_gt(struct amd_iommu *iommu) 897 { 898 if (!iommu_feature(iommu, FEATURE_GT)) 899 return; 900 901 iommu_feature_enable(iommu, CONTROL_GT_EN); 902 } 903 904 /* sets a specific bit in the device table entry. */ 905 static void set_dev_entry_bit(u16 devid, u8 bit) 906 { 907 int i = (bit >> 6) & 0x03; 908 int _bit = bit & 0x3f; 909 910 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); 911 } 912 913 static int get_dev_entry_bit(u16 devid, u8 bit) 914 { 915 int i = (bit >> 6) & 0x03; 916 int _bit = bit & 0x3f; 917 918 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; 919 } 920 921 922 static bool copy_device_table(void) 923 { 924 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0; 925 struct dev_table_entry *old_devtb = NULL; 926 u32 lo, hi, devid, old_devtb_size; 927 phys_addr_t old_devtb_phys; 928 struct amd_iommu *iommu; 929 u16 dom_id, dte_v, irq_v; 930 gfp_t gfp_flag; 931 u64 tmp; 932 933 if (!amd_iommu_pre_enabled) 934 return false; 935 936 pr_warn("Translation is already enabled - trying to copy translation structures\n"); 937 for_each_iommu(iommu) { 938 /* All IOMMUs should use the same device table with the same size */ 939 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); 940 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); 941 entry = (((u64) hi) << 32) + lo; 942 if (last_entry && last_entry != entry) { 943 pr_err("IOMMU:%d should use the same dev table as others!\n", 944 iommu->index); 945 return false; 946 } 947 last_entry = entry; 948 949 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; 950 if (old_devtb_size != dev_table_size) { 951 pr_err("The device table size of IOMMU:%d is not expected!\n", 952 iommu->index); 953 return false; 954 } 955 } 956 957 /* 958 * When SME is enabled in the first kernel, the entry includes the 959 * memory encryption mask(sme_me_mask), we must remove the memory 960 * encryption mask to obtain the true physical address in kdump kernel. 961 */ 962 old_devtb_phys = __sme_clr(entry) & PAGE_MASK; 963 964 if (old_devtb_phys >= 0x100000000ULL) { 965 pr_err("The address of old device table is above 4G, not trustworthy!\n"); 966 return false; 967 } 968 old_devtb = (sme_active() && is_kdump_kernel()) 969 ? (__force void *)ioremap_encrypted(old_devtb_phys, 970 dev_table_size) 971 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); 972 973 if (!old_devtb) 974 return false; 975 976 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32; 977 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, 978 get_order(dev_table_size)); 979 if (old_dev_tbl_cpy == NULL) { 980 pr_err("Failed to allocate memory for copying old device table!\n"); 981 return false; 982 } 983 984 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 985 old_dev_tbl_cpy[devid] = old_devtb[devid]; 986 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; 987 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; 988 989 if (dte_v && dom_id) { 990 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; 991 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; 992 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap); 993 /* If gcr3 table existed, mask it out */ 994 if (old_devtb[devid].data[0] & DTE_FLAG_GV) { 995 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; 996 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; 997 old_dev_tbl_cpy[devid].data[1] &= ~tmp; 998 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A; 999 tmp |= DTE_FLAG_GV; 1000 old_dev_tbl_cpy[devid].data[0] &= ~tmp; 1001 } 1002 } 1003 1004 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; 1005 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; 1006 int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK; 1007 if (irq_v && (int_ctl || int_tab_len)) { 1008 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || 1009 (int_tab_len != DTE_INTTABLEN)) { 1010 pr_err("Wrong old irq remapping flag: %#x\n", devid); 1011 return false; 1012 } 1013 1014 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; 1015 } 1016 } 1017 memunmap(old_devtb); 1018 1019 return true; 1020 } 1021 1022 void amd_iommu_apply_erratum_63(u16 devid) 1023 { 1024 int sysmgt; 1025 1026 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | 1027 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); 1028 1029 if (sysmgt == 0x01) 1030 set_dev_entry_bit(devid, DEV_ENTRY_IW); 1031 } 1032 1033 /* Writes the specific IOMMU for a device into the rlookup table */ 1034 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) 1035 { 1036 amd_iommu_rlookup_table[devid] = iommu; 1037 } 1038 1039 /* 1040 * This function takes the device specific flags read from the ACPI 1041 * table and sets up the device table entry with that information 1042 */ 1043 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, 1044 u16 devid, u32 flags, u32 ext_flags) 1045 { 1046 if (flags & ACPI_DEVFLAG_INITPASS) 1047 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); 1048 if (flags & ACPI_DEVFLAG_EXTINT) 1049 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); 1050 if (flags & ACPI_DEVFLAG_NMI) 1051 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); 1052 if (flags & ACPI_DEVFLAG_SYSMGT1) 1053 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); 1054 if (flags & ACPI_DEVFLAG_SYSMGT2) 1055 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); 1056 if (flags & ACPI_DEVFLAG_LINT0) 1057 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); 1058 if (flags & ACPI_DEVFLAG_LINT1) 1059 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); 1060 1061 amd_iommu_apply_erratum_63(devid); 1062 1063 set_iommu_for_device(iommu, devid); 1064 } 1065 1066 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) 1067 { 1068 struct devid_map *entry; 1069 struct list_head *list; 1070 1071 if (type == IVHD_SPECIAL_IOAPIC) 1072 list = &ioapic_map; 1073 else if (type == IVHD_SPECIAL_HPET) 1074 list = &hpet_map; 1075 else 1076 return -EINVAL; 1077 1078 list_for_each_entry(entry, list, list) { 1079 if (!(entry->id == id && entry->cmd_line)) 1080 continue; 1081 1082 pr_info("Command-line override present for %s id %d - ignoring\n", 1083 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); 1084 1085 *devid = entry->devid; 1086 1087 return 0; 1088 } 1089 1090 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1091 if (!entry) 1092 return -ENOMEM; 1093 1094 entry->id = id; 1095 entry->devid = *devid; 1096 entry->cmd_line = cmd_line; 1097 1098 list_add_tail(&entry->list, list); 1099 1100 return 0; 1101 } 1102 1103 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid, 1104 bool cmd_line) 1105 { 1106 struct acpihid_map_entry *entry; 1107 struct list_head *list = &acpihid_map; 1108 1109 list_for_each_entry(entry, list, list) { 1110 if (strcmp(entry->hid, hid) || 1111 (*uid && *entry->uid && strcmp(entry->uid, uid)) || 1112 !entry->cmd_line) 1113 continue; 1114 1115 pr_info("Command-line override for hid:%s uid:%s\n", 1116 hid, uid); 1117 *devid = entry->devid; 1118 return 0; 1119 } 1120 1121 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1122 if (!entry) 1123 return -ENOMEM; 1124 1125 memcpy(entry->uid, uid, strlen(uid)); 1126 memcpy(entry->hid, hid, strlen(hid)); 1127 entry->devid = *devid; 1128 entry->cmd_line = cmd_line; 1129 entry->root_devid = (entry->devid & (~0x7)); 1130 1131 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n", 1132 entry->cmd_line ? "cmd" : "ivrs", 1133 entry->hid, entry->uid, entry->root_devid); 1134 1135 list_add_tail(&entry->list, list); 1136 return 0; 1137 } 1138 1139 static int __init add_early_maps(void) 1140 { 1141 int i, ret; 1142 1143 for (i = 0; i < early_ioapic_map_size; ++i) { 1144 ret = add_special_device(IVHD_SPECIAL_IOAPIC, 1145 early_ioapic_map[i].id, 1146 &early_ioapic_map[i].devid, 1147 early_ioapic_map[i].cmd_line); 1148 if (ret) 1149 return ret; 1150 } 1151 1152 for (i = 0; i < early_hpet_map_size; ++i) { 1153 ret = add_special_device(IVHD_SPECIAL_HPET, 1154 early_hpet_map[i].id, 1155 &early_hpet_map[i].devid, 1156 early_hpet_map[i].cmd_line); 1157 if (ret) 1158 return ret; 1159 } 1160 1161 for (i = 0; i < early_acpihid_map_size; ++i) { 1162 ret = add_acpi_hid_device(early_acpihid_map[i].hid, 1163 early_acpihid_map[i].uid, 1164 &early_acpihid_map[i].devid, 1165 early_acpihid_map[i].cmd_line); 1166 if (ret) 1167 return ret; 1168 } 1169 1170 return 0; 1171 } 1172 1173 /* 1174 * Takes a pointer to an AMD IOMMU entry in the ACPI table and 1175 * initializes the hardware and our data structures with it. 1176 */ 1177 static int __init init_iommu_from_acpi(struct amd_iommu *iommu, 1178 struct ivhd_header *h) 1179 { 1180 u8 *p = (u8 *)h; 1181 u8 *end = p, flags = 0; 1182 u16 devid = 0, devid_start = 0, devid_to = 0; 1183 u32 dev_i, ext_flags = 0; 1184 bool alias = false; 1185 struct ivhd_entry *e; 1186 u32 ivhd_size; 1187 int ret; 1188 1189 1190 ret = add_early_maps(); 1191 if (ret) 1192 return ret; 1193 1194 amd_iommu_apply_ivrs_quirks(); 1195 1196 /* 1197 * First save the recommended feature enable bits from ACPI 1198 */ 1199 iommu->acpi_flags = h->flags; 1200 1201 /* 1202 * Done. Now parse the device entries 1203 */ 1204 ivhd_size = get_ivhd_header_size(h); 1205 if (!ivhd_size) { 1206 pr_err("Unsupported IVHD type %#x\n", h->type); 1207 return -EINVAL; 1208 } 1209 1210 p += ivhd_size; 1211 1212 end += h->length; 1213 1214 1215 while (p < end) { 1216 e = (struct ivhd_entry *)p; 1217 switch (e->type) { 1218 case IVHD_DEV_ALL: 1219 1220 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags); 1221 1222 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i) 1223 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); 1224 break; 1225 case IVHD_DEV_SELECT: 1226 1227 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " 1228 "flags: %02x\n", 1229 PCI_BUS_NUM(e->devid), 1230 PCI_SLOT(e->devid), 1231 PCI_FUNC(e->devid), 1232 e->flags); 1233 1234 devid = e->devid; 1235 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1236 break; 1237 case IVHD_DEV_SELECT_RANGE_START: 1238 1239 DUMP_printk(" DEV_SELECT_RANGE_START\t " 1240 "devid: %02x:%02x.%x flags: %02x\n", 1241 PCI_BUS_NUM(e->devid), 1242 PCI_SLOT(e->devid), 1243 PCI_FUNC(e->devid), 1244 e->flags); 1245 1246 devid_start = e->devid; 1247 flags = e->flags; 1248 ext_flags = 0; 1249 alias = false; 1250 break; 1251 case IVHD_DEV_ALIAS: 1252 1253 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " 1254 "flags: %02x devid_to: %02x:%02x.%x\n", 1255 PCI_BUS_NUM(e->devid), 1256 PCI_SLOT(e->devid), 1257 PCI_FUNC(e->devid), 1258 e->flags, 1259 PCI_BUS_NUM(e->ext >> 8), 1260 PCI_SLOT(e->ext >> 8), 1261 PCI_FUNC(e->ext >> 8)); 1262 1263 devid = e->devid; 1264 devid_to = e->ext >> 8; 1265 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); 1266 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); 1267 amd_iommu_alias_table[devid] = devid_to; 1268 break; 1269 case IVHD_DEV_ALIAS_RANGE: 1270 1271 DUMP_printk(" DEV_ALIAS_RANGE\t\t " 1272 "devid: %02x:%02x.%x flags: %02x " 1273 "devid_to: %02x:%02x.%x\n", 1274 PCI_BUS_NUM(e->devid), 1275 PCI_SLOT(e->devid), 1276 PCI_FUNC(e->devid), 1277 e->flags, 1278 PCI_BUS_NUM(e->ext >> 8), 1279 PCI_SLOT(e->ext >> 8), 1280 PCI_FUNC(e->ext >> 8)); 1281 1282 devid_start = e->devid; 1283 flags = e->flags; 1284 devid_to = e->ext >> 8; 1285 ext_flags = 0; 1286 alias = true; 1287 break; 1288 case IVHD_DEV_EXT_SELECT: 1289 1290 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " 1291 "flags: %02x ext: %08x\n", 1292 PCI_BUS_NUM(e->devid), 1293 PCI_SLOT(e->devid), 1294 PCI_FUNC(e->devid), 1295 e->flags, e->ext); 1296 1297 devid = e->devid; 1298 set_dev_entry_from_acpi(iommu, devid, e->flags, 1299 e->ext); 1300 break; 1301 case IVHD_DEV_EXT_SELECT_RANGE: 1302 1303 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " 1304 "%02x:%02x.%x flags: %02x ext: %08x\n", 1305 PCI_BUS_NUM(e->devid), 1306 PCI_SLOT(e->devid), 1307 PCI_FUNC(e->devid), 1308 e->flags, e->ext); 1309 1310 devid_start = e->devid; 1311 flags = e->flags; 1312 ext_flags = e->ext; 1313 alias = false; 1314 break; 1315 case IVHD_DEV_RANGE_END: 1316 1317 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", 1318 PCI_BUS_NUM(e->devid), 1319 PCI_SLOT(e->devid), 1320 PCI_FUNC(e->devid)); 1321 1322 devid = e->devid; 1323 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { 1324 if (alias) { 1325 amd_iommu_alias_table[dev_i] = devid_to; 1326 set_dev_entry_from_acpi(iommu, 1327 devid_to, flags, ext_flags); 1328 } 1329 set_dev_entry_from_acpi(iommu, dev_i, 1330 flags, ext_flags); 1331 } 1332 break; 1333 case IVHD_DEV_SPECIAL: { 1334 u8 handle, type; 1335 const char *var; 1336 u16 devid; 1337 int ret; 1338 1339 handle = e->ext & 0xff; 1340 devid = (e->ext >> 8) & 0xffff; 1341 type = (e->ext >> 24) & 0xff; 1342 1343 if (type == IVHD_SPECIAL_IOAPIC) 1344 var = "IOAPIC"; 1345 else if (type == IVHD_SPECIAL_HPET) 1346 var = "HPET"; 1347 else 1348 var = "UNKNOWN"; 1349 1350 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n", 1351 var, (int)handle, 1352 PCI_BUS_NUM(devid), 1353 PCI_SLOT(devid), 1354 PCI_FUNC(devid)); 1355 1356 ret = add_special_device(type, handle, &devid, false); 1357 if (ret) 1358 return ret; 1359 1360 /* 1361 * add_special_device might update the devid in case a 1362 * command-line override is present. So call 1363 * set_dev_entry_from_acpi after add_special_device. 1364 */ 1365 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1366 1367 break; 1368 } 1369 case IVHD_DEV_ACPI_HID: { 1370 u16 devid; 1371 u8 hid[ACPIHID_HID_LEN]; 1372 u8 uid[ACPIHID_UID_LEN]; 1373 int ret; 1374 1375 if (h->type != 0x40) { 1376 pr_err(FW_BUG "Invalid IVHD device type %#x\n", 1377 e->type); 1378 break; 1379 } 1380 1381 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1); 1382 hid[ACPIHID_HID_LEN - 1] = '\0'; 1383 1384 if (!(*hid)) { 1385 pr_err(FW_BUG "Invalid HID.\n"); 1386 break; 1387 } 1388 1389 uid[0] = '\0'; 1390 switch (e->uidf) { 1391 case UID_NOT_PRESENT: 1392 1393 if (e->uidl != 0) 1394 pr_warn(FW_BUG "Invalid UID length.\n"); 1395 1396 break; 1397 case UID_IS_INTEGER: 1398 1399 sprintf(uid, "%d", e->uid); 1400 1401 break; 1402 case UID_IS_CHARACTER: 1403 1404 memcpy(uid, &e->uid, e->uidl); 1405 uid[e->uidl] = '\0'; 1406 1407 break; 1408 default: 1409 break; 1410 } 1411 1412 devid = e->devid; 1413 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n", 1414 hid, uid, 1415 PCI_BUS_NUM(devid), 1416 PCI_SLOT(devid), 1417 PCI_FUNC(devid)); 1418 1419 flags = e->flags; 1420 1421 ret = add_acpi_hid_device(hid, uid, &devid, false); 1422 if (ret) 1423 return ret; 1424 1425 /* 1426 * add_special_device might update the devid in case a 1427 * command-line override is present. So call 1428 * set_dev_entry_from_acpi after add_special_device. 1429 */ 1430 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1431 1432 break; 1433 } 1434 default: 1435 break; 1436 } 1437 1438 p += ivhd_entry_length(p); 1439 } 1440 1441 return 0; 1442 } 1443 1444 static void __init free_iommu_one(struct amd_iommu *iommu) 1445 { 1446 free_cwwb_sem(iommu); 1447 free_command_buffer(iommu); 1448 free_event_buffer(iommu); 1449 free_ppr_log(iommu); 1450 free_ga_log(iommu); 1451 iommu_unmap_mmio_space(iommu); 1452 } 1453 1454 static void __init free_iommu_all(void) 1455 { 1456 struct amd_iommu *iommu, *next; 1457 1458 for_each_iommu_safe(iommu, next) { 1459 list_del(&iommu->list); 1460 free_iommu_one(iommu); 1461 kfree(iommu); 1462 } 1463 } 1464 1465 /* 1466 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) 1467 * Workaround: 1468 * BIOS should disable L2B micellaneous clock gating by setting 1469 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b 1470 */ 1471 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) 1472 { 1473 u32 value; 1474 1475 if ((boot_cpu_data.x86 != 0x15) || 1476 (boot_cpu_data.x86_model < 0x10) || 1477 (boot_cpu_data.x86_model > 0x1f)) 1478 return; 1479 1480 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1481 pci_read_config_dword(iommu->dev, 0xf4, &value); 1482 1483 if (value & BIT(2)) 1484 return; 1485 1486 /* Select NB indirect register 0x90 and enable writing */ 1487 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); 1488 1489 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); 1490 pci_info(iommu->dev, "Applying erratum 746 workaround\n"); 1491 1492 /* Clear the enable writing bit */ 1493 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1494 } 1495 1496 /* 1497 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission) 1498 * Workaround: 1499 * BIOS should enable ATS write permission check by setting 1500 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b 1501 */ 1502 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) 1503 { 1504 u32 value; 1505 1506 if ((boot_cpu_data.x86 != 0x15) || 1507 (boot_cpu_data.x86_model < 0x30) || 1508 (boot_cpu_data.x86_model > 0x3f)) 1509 return; 1510 1511 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */ 1512 value = iommu_read_l2(iommu, 0x47); 1513 1514 if (value & BIT(0)) 1515 return; 1516 1517 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */ 1518 iommu_write_l2(iommu, 0x47, value | BIT(0)); 1519 1520 pci_info(iommu->dev, "Applying ATS write check workaround\n"); 1521 } 1522 1523 /* 1524 * This function clues the initialization function for one IOMMU 1525 * together and also allocates the command buffer and programs the 1526 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1527 */ 1528 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) 1529 { 1530 int ret; 1531 1532 raw_spin_lock_init(&iommu->lock); 1533 iommu->cmd_sem_val = 0; 1534 1535 /* Add IOMMU to internal data structures */ 1536 list_add_tail(&iommu->list, &amd_iommu_list); 1537 iommu->index = amd_iommus_present++; 1538 1539 if (unlikely(iommu->index >= MAX_IOMMUS)) { 1540 WARN(1, "System has more IOMMUs than supported by this driver\n"); 1541 return -ENOSYS; 1542 } 1543 1544 /* Index is fine - add IOMMU to the array */ 1545 amd_iommus[iommu->index] = iommu; 1546 1547 /* 1548 * Copy data from ACPI table entry to the iommu struct 1549 */ 1550 iommu->devid = h->devid; 1551 iommu->cap_ptr = h->cap_ptr; 1552 iommu->pci_seg = h->pci_seg; 1553 iommu->mmio_phys = h->mmio_phys; 1554 1555 switch (h->type) { 1556 case 0x10: 1557 /* Check if IVHD EFR contains proper max banks/counters */ 1558 if ((h->efr_attr != 0) && 1559 ((h->efr_attr & (0xF << 13)) != 0) && 1560 ((h->efr_attr & (0x3F << 17)) != 0)) 1561 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1562 else 1563 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1564 1565 /* 1566 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. 1567 * GAM also requires GA mode. Therefore, we need to 1568 * check cmpxchg16b support before enabling it. 1569 */ 1570 if (!boot_cpu_has(X86_FEATURE_CX16) || 1571 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) 1572 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1573 break; 1574 case 0x11: 1575 case 0x40: 1576 if (h->efr_reg & (1 << 9)) 1577 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1578 else 1579 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1580 1581 /* 1582 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. 1583 * XT, GAM also requires GA mode. Therefore, we need to 1584 * check cmpxchg16b support before enabling them. 1585 */ 1586 if (!boot_cpu_has(X86_FEATURE_CX16) || 1587 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { 1588 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1589 break; 1590 } 1591 1592 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) 1593 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; 1594 1595 early_iommu_features_init(iommu, h); 1596 1597 break; 1598 default: 1599 return -EINVAL; 1600 } 1601 1602 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, 1603 iommu->mmio_phys_end); 1604 if (!iommu->mmio_base) 1605 return -ENOMEM; 1606 1607 if (alloc_cwwb_sem(iommu)) 1608 return -ENOMEM; 1609 1610 if (alloc_command_buffer(iommu)) 1611 return -ENOMEM; 1612 1613 if (alloc_event_buffer(iommu)) 1614 return -ENOMEM; 1615 1616 iommu->int_enabled = false; 1617 1618 init_translation_status(iommu); 1619 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { 1620 iommu_disable(iommu); 1621 clear_translation_pre_enabled(iommu); 1622 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", 1623 iommu->index); 1624 } 1625 if (amd_iommu_pre_enabled) 1626 amd_iommu_pre_enabled = translation_pre_enabled(iommu); 1627 1628 ret = init_iommu_from_acpi(iommu, h); 1629 if (ret) 1630 return ret; 1631 1632 if (amd_iommu_irq_remap) { 1633 ret = amd_iommu_create_irq_domain(iommu); 1634 if (ret) 1635 return ret; 1636 } 1637 1638 /* 1639 * Make sure IOMMU is not considered to translate itself. The IVRS 1640 * table tells us so, but this is a lie! 1641 */ 1642 amd_iommu_rlookup_table[iommu->devid] = NULL; 1643 1644 return 0; 1645 } 1646 1647 /** 1648 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type 1649 * @ivrs: Pointer to the IVRS header 1650 * 1651 * This function search through all IVDB of the maximum supported IVHD 1652 */ 1653 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs) 1654 { 1655 u8 *base = (u8 *)ivrs; 1656 struct ivhd_header *ivhd = (struct ivhd_header *) 1657 (base + IVRS_HEADER_LENGTH); 1658 u8 last_type = ivhd->type; 1659 u16 devid = ivhd->devid; 1660 1661 while (((u8 *)ivhd - base < ivrs->length) && 1662 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { 1663 u8 *p = (u8 *) ivhd; 1664 1665 if (ivhd->devid == devid) 1666 last_type = ivhd->type; 1667 ivhd = (struct ivhd_header *)(p + ivhd->length); 1668 } 1669 1670 return last_type; 1671 } 1672 1673 /* 1674 * Iterates over all IOMMU entries in the ACPI table, allocates the 1675 * IOMMU structure and initializes it with init_iommu_one() 1676 */ 1677 static int __init init_iommu_all(struct acpi_table_header *table) 1678 { 1679 u8 *p = (u8 *)table, *end = (u8 *)table; 1680 struct ivhd_header *h; 1681 struct amd_iommu *iommu; 1682 int ret; 1683 1684 end += table->length; 1685 p += IVRS_HEADER_LENGTH; 1686 1687 while (p < end) { 1688 h = (struct ivhd_header *)p; 1689 if (*p == amd_iommu_target_ivhd_type) { 1690 1691 DUMP_printk("device: %02x:%02x.%01x cap: %04x " 1692 "seg: %d flags: %01x info %04x\n", 1693 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), 1694 PCI_FUNC(h->devid), h->cap_ptr, 1695 h->pci_seg, h->flags, h->info); 1696 DUMP_printk(" mmio-addr: %016llx\n", 1697 h->mmio_phys); 1698 1699 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 1700 if (iommu == NULL) 1701 return -ENOMEM; 1702 1703 ret = init_iommu_one(iommu, h); 1704 if (ret) 1705 return ret; 1706 } 1707 p += h->length; 1708 1709 } 1710 WARN_ON(p != end); 1711 1712 return 0; 1713 } 1714 1715 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 1716 u8 fxn, u64 *value, bool is_write); 1717 1718 static void init_iommu_perf_ctr(struct amd_iommu *iommu) 1719 { 1720 struct pci_dev *pdev = iommu->dev; 1721 u64 val = 0xabcd, val2 = 0, save_reg = 0; 1722 1723 if (!iommu_feature(iommu, FEATURE_PC)) 1724 return; 1725 1726 amd_iommu_pc_present = true; 1727 1728 /* save the value to restore, if writable */ 1729 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false)) 1730 goto pc_false; 1731 1732 /* Check if the performance counters can be written to */ 1733 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || 1734 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || 1735 (val != val2)) 1736 goto pc_false; 1737 1738 /* restore */ 1739 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true)) 1740 goto pc_false; 1741 1742 pci_info(pdev, "IOMMU performance counters supported\n"); 1743 1744 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); 1745 iommu->max_banks = (u8) ((val >> 12) & 0x3f); 1746 iommu->max_counters = (u8) ((val >> 7) & 0xf); 1747 1748 return; 1749 1750 pc_false: 1751 pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n"); 1752 amd_iommu_pc_present = false; 1753 return; 1754 } 1755 1756 static ssize_t amd_iommu_show_cap(struct device *dev, 1757 struct device_attribute *attr, 1758 char *buf) 1759 { 1760 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1761 return sprintf(buf, "%x\n", iommu->cap); 1762 } 1763 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); 1764 1765 static ssize_t amd_iommu_show_features(struct device *dev, 1766 struct device_attribute *attr, 1767 char *buf) 1768 { 1769 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1770 return sprintf(buf, "%llx\n", iommu->features); 1771 } 1772 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); 1773 1774 static struct attribute *amd_iommu_attrs[] = { 1775 &dev_attr_cap.attr, 1776 &dev_attr_features.attr, 1777 NULL, 1778 }; 1779 1780 static struct attribute_group amd_iommu_group = { 1781 .name = "amd-iommu", 1782 .attrs = amd_iommu_attrs, 1783 }; 1784 1785 static const struct attribute_group *amd_iommu_groups[] = { 1786 &amd_iommu_group, 1787 NULL, 1788 }; 1789 1790 /* 1791 * Note: IVHD 0x11 and 0x40 also contains exact copy 1792 * of the IOMMU Extended Feature Register [MMIO Offset 0030h]. 1793 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init). 1794 */ 1795 static void __init late_iommu_features_init(struct amd_iommu *iommu) 1796 { 1797 u64 features; 1798 1799 if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) 1800 return; 1801 1802 /* read extended feature bits */ 1803 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); 1804 1805 if (!iommu->features) { 1806 iommu->features = features; 1807 return; 1808 } 1809 1810 /* 1811 * Sanity check and warn if EFR values from 1812 * IVHD and MMIO conflict. 1813 */ 1814 if (features != iommu->features) 1815 pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).", 1816 features, iommu->features); 1817 } 1818 1819 static int __init iommu_init_pci(struct amd_iommu *iommu) 1820 { 1821 int cap_ptr = iommu->cap_ptr; 1822 int ret; 1823 1824 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid), 1825 iommu->devid & 0xff); 1826 if (!iommu->dev) 1827 return -ENODEV; 1828 1829 /* Prevent binding other PCI device drivers to IOMMU devices */ 1830 iommu->dev->match_driver = false; 1831 1832 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 1833 &iommu->cap); 1834 1835 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) 1836 amd_iommu_iotlb_sup = false; 1837 1838 late_iommu_features_init(iommu); 1839 1840 if (iommu_feature(iommu, FEATURE_GT)) { 1841 int glxval; 1842 u32 max_pasid; 1843 u64 pasmax; 1844 1845 pasmax = iommu->features & FEATURE_PASID_MASK; 1846 pasmax >>= FEATURE_PASID_SHIFT; 1847 max_pasid = (1 << (pasmax + 1)) - 1; 1848 1849 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); 1850 1851 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); 1852 1853 glxval = iommu->features & FEATURE_GLXVAL_MASK; 1854 glxval >>= FEATURE_GLXVAL_SHIFT; 1855 1856 if (amd_iommu_max_glx_val == -1) 1857 amd_iommu_max_glx_val = glxval; 1858 else 1859 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); 1860 } 1861 1862 if (iommu_feature(iommu, FEATURE_GT) && 1863 iommu_feature(iommu, FEATURE_PPR)) { 1864 iommu->is_iommu_v2 = true; 1865 amd_iommu_v2_present = true; 1866 } 1867 1868 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) 1869 return -ENOMEM; 1870 1871 ret = iommu_init_ga(iommu); 1872 if (ret) 1873 return ret; 1874 1875 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) 1876 amd_iommu_np_cache = true; 1877 1878 init_iommu_perf_ctr(iommu); 1879 1880 if (is_rd890_iommu(iommu->dev)) { 1881 int i, j; 1882 1883 iommu->root_pdev = 1884 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number, 1885 PCI_DEVFN(0, 0)); 1886 1887 /* 1888 * Some rd890 systems may not be fully reconfigured by the 1889 * BIOS, so it's necessary for us to store this information so 1890 * it can be reprogrammed on resume 1891 */ 1892 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, 1893 &iommu->stored_addr_lo); 1894 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, 1895 &iommu->stored_addr_hi); 1896 1897 /* Low bit locks writes to configuration space */ 1898 iommu->stored_addr_lo &= ~1; 1899 1900 for (i = 0; i < 6; i++) 1901 for (j = 0; j < 0x12; j++) 1902 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); 1903 1904 for (i = 0; i < 0x83; i++) 1905 iommu->stored_l2[i] = iommu_read_l2(iommu, i); 1906 } 1907 1908 amd_iommu_erratum_746_workaround(iommu); 1909 amd_iommu_ats_write_check_workaround(iommu); 1910 1911 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, 1912 amd_iommu_groups, "ivhd%d", iommu->index); 1913 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops); 1914 iommu_device_register(&iommu->iommu); 1915 1916 return pci_enable_device(iommu->dev); 1917 } 1918 1919 static void print_iommu_info(void) 1920 { 1921 static const char * const feat_str[] = { 1922 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", 1923 "IA", "GA", "HE", "PC" 1924 }; 1925 struct amd_iommu *iommu; 1926 1927 for_each_iommu(iommu) { 1928 struct pci_dev *pdev = iommu->dev; 1929 int i; 1930 1931 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr); 1932 1933 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { 1934 pci_info(pdev, "Extended features (%#llx):", 1935 iommu->features); 1936 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { 1937 if (iommu_feature(iommu, (1ULL << i))) 1938 pr_cont(" %s", feat_str[i]); 1939 } 1940 1941 if (iommu->features & FEATURE_GAM_VAPIC) 1942 pr_cont(" GA_vAPIC"); 1943 1944 pr_cont("\n"); 1945 } 1946 } 1947 if (irq_remapping_enabled) { 1948 pr_info("Interrupt remapping enabled\n"); 1949 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 1950 pr_info("Virtual APIC enabled\n"); 1951 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 1952 pr_info("X2APIC enabled\n"); 1953 } 1954 } 1955 1956 static int __init amd_iommu_init_pci(void) 1957 { 1958 struct amd_iommu *iommu; 1959 int ret = 0; 1960 1961 for_each_iommu(iommu) { 1962 ret = iommu_init_pci(iommu); 1963 if (ret) 1964 break; 1965 1966 /* Need to setup range after PCI init */ 1967 iommu_set_cwwb_range(iommu); 1968 } 1969 1970 /* 1971 * Order is important here to make sure any unity map requirements are 1972 * fulfilled. The unity mappings are created and written to the device 1973 * table during the amd_iommu_init_api() call. 1974 * 1975 * After that we call init_device_table_dma() to make sure any 1976 * uninitialized DTE will block DMA, and in the end we flush the caches 1977 * of all IOMMUs to make sure the changes to the device table are 1978 * active. 1979 */ 1980 ret = amd_iommu_init_api(); 1981 1982 init_device_table_dma(); 1983 1984 for_each_iommu(iommu) 1985 iommu_flush_all_caches(iommu); 1986 1987 if (!ret) 1988 print_iommu_info(); 1989 1990 return ret; 1991 } 1992 1993 /**************************************************************************** 1994 * 1995 * The following functions initialize the MSI interrupts for all IOMMUs 1996 * in the system. It's a bit challenging because there could be multiple 1997 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per 1998 * pci_dev. 1999 * 2000 ****************************************************************************/ 2001 2002 static int iommu_setup_msi(struct amd_iommu *iommu) 2003 { 2004 int r; 2005 2006 r = pci_enable_msi(iommu->dev); 2007 if (r) 2008 return r; 2009 2010 r = request_threaded_irq(iommu->dev->irq, 2011 amd_iommu_int_handler, 2012 amd_iommu_int_thread, 2013 0, "AMD-Vi", 2014 iommu); 2015 2016 if (r) { 2017 pci_disable_msi(iommu->dev); 2018 return r; 2019 } 2020 2021 return 0; 2022 } 2023 2024 union intcapxt { 2025 u64 capxt; 2026 struct { 2027 u64 reserved_0 : 2, 2028 dest_mode_logical : 1, 2029 reserved_1 : 5, 2030 destid_0_23 : 24, 2031 vector : 8, 2032 reserved_2 : 16, 2033 destid_24_31 : 8; 2034 }; 2035 } __attribute__ ((packed)); 2036 2037 /* 2038 * There isn't really any need to mask/unmask at the irqchip level because 2039 * the 64-bit INTCAPXT registers can be updated atomically without tearing 2040 * when the affinity is being updated. 2041 */ 2042 static void intcapxt_unmask_irq(struct irq_data *data) 2043 { 2044 } 2045 2046 static void intcapxt_mask_irq(struct irq_data *data) 2047 { 2048 } 2049 2050 static struct irq_chip intcapxt_controller; 2051 2052 static int intcapxt_irqdomain_activate(struct irq_domain *domain, 2053 struct irq_data *irqd, bool reserve) 2054 { 2055 struct amd_iommu *iommu = irqd->chip_data; 2056 struct irq_cfg *cfg = irqd_cfg(irqd); 2057 union intcapxt xt; 2058 2059 xt.capxt = 0ULL; 2060 xt.dest_mode_logical = apic->dest_mode_logical; 2061 xt.vector = cfg->vector; 2062 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); 2063 xt.destid_24_31 = cfg->dest_apicid >> 24; 2064 2065 /** 2066 * Current IOMMU implemtation uses the same IRQ for all 2067 * 3 IOMMU interrupts. 2068 */ 2069 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); 2070 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); 2071 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); 2072 return 0; 2073 } 2074 2075 static void intcapxt_irqdomain_deactivate(struct irq_domain *domain, 2076 struct irq_data *irqd) 2077 { 2078 intcapxt_mask_irq(irqd); 2079 } 2080 2081 2082 static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, 2083 unsigned int nr_irqs, void *arg) 2084 { 2085 struct irq_alloc_info *info = arg; 2086 int i, ret; 2087 2088 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI) 2089 return -EINVAL; 2090 2091 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 2092 if (ret < 0) 2093 return ret; 2094 2095 for (i = virq; i < virq + nr_irqs; i++) { 2096 struct irq_data *irqd = irq_domain_get_irq_data(domain, i); 2097 2098 irqd->chip = &intcapxt_controller; 2099 irqd->chip_data = info->data; 2100 __irq_set_handler(i, handle_edge_irq, 0, "edge"); 2101 } 2102 2103 return ret; 2104 } 2105 2106 static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq, 2107 unsigned int nr_irqs) 2108 { 2109 irq_domain_free_irqs_top(domain, virq, nr_irqs); 2110 } 2111 2112 static int intcapxt_set_affinity(struct irq_data *irqd, 2113 const struct cpumask *mask, bool force) 2114 { 2115 struct irq_data *parent = irqd->parent_data; 2116 int ret; 2117 2118 ret = parent->chip->irq_set_affinity(parent, mask, force); 2119 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) 2120 return ret; 2121 2122 return intcapxt_irqdomain_activate(irqd->domain, irqd, false); 2123 } 2124 2125 static struct irq_chip intcapxt_controller = { 2126 .name = "IOMMU-MSI", 2127 .irq_unmask = intcapxt_unmask_irq, 2128 .irq_mask = intcapxt_mask_irq, 2129 .irq_ack = irq_chip_ack_parent, 2130 .irq_retrigger = irq_chip_retrigger_hierarchy, 2131 .irq_set_affinity = intcapxt_set_affinity, 2132 .flags = IRQCHIP_SKIP_SET_WAKE, 2133 }; 2134 2135 static const struct irq_domain_ops intcapxt_domain_ops = { 2136 .alloc = intcapxt_irqdomain_alloc, 2137 .free = intcapxt_irqdomain_free, 2138 .activate = intcapxt_irqdomain_activate, 2139 .deactivate = intcapxt_irqdomain_deactivate, 2140 }; 2141 2142 2143 static struct irq_domain *iommu_irqdomain; 2144 2145 static struct irq_domain *iommu_get_irqdomain(void) 2146 { 2147 struct fwnode_handle *fn; 2148 2149 /* No need for locking here (yet) as the init is single-threaded */ 2150 if (iommu_irqdomain) 2151 return iommu_irqdomain; 2152 2153 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI"); 2154 if (!fn) 2155 return NULL; 2156 2157 iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0, 2158 fn, &intcapxt_domain_ops, 2159 NULL); 2160 if (!iommu_irqdomain) 2161 irq_domain_free_fwnode(fn); 2162 2163 return iommu_irqdomain; 2164 } 2165 2166 static int iommu_setup_intcapxt(struct amd_iommu *iommu) 2167 { 2168 struct irq_domain *domain; 2169 struct irq_alloc_info info; 2170 int irq, ret; 2171 2172 domain = iommu_get_irqdomain(); 2173 if (!domain) 2174 return -ENXIO; 2175 2176 init_irq_alloc_info(&info, NULL); 2177 info.type = X86_IRQ_ALLOC_TYPE_AMDVI; 2178 info.data = iommu; 2179 2180 irq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info); 2181 if (irq < 0) { 2182 irq_domain_remove(domain); 2183 return irq; 2184 } 2185 2186 ret = request_threaded_irq(irq, amd_iommu_int_handler, 2187 amd_iommu_int_thread, 0, "AMD-Vi", iommu); 2188 if (ret) { 2189 irq_domain_free_irqs(irq, 1); 2190 irq_domain_remove(domain); 2191 return ret; 2192 } 2193 2194 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); 2195 return 0; 2196 } 2197 2198 static int iommu_init_irq(struct amd_iommu *iommu) 2199 { 2200 int ret; 2201 2202 if (iommu->int_enabled) 2203 goto enable_faults; 2204 2205 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 2206 ret = iommu_setup_intcapxt(iommu); 2207 else if (iommu->dev->msi_cap) 2208 ret = iommu_setup_msi(iommu); 2209 else 2210 ret = -ENODEV; 2211 2212 if (ret) 2213 return ret; 2214 2215 iommu->int_enabled = true; 2216 enable_faults: 2217 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 2218 2219 if (iommu->ppr_log != NULL) 2220 iommu_feature_enable(iommu, CONTROL_PPRINT_EN); 2221 2222 iommu_ga_log_enable(iommu); 2223 2224 return 0; 2225 } 2226 2227 /**************************************************************************** 2228 * 2229 * The next functions belong to the third pass of parsing the ACPI 2230 * table. In this last pass the memory mapping requirements are 2231 * gathered (like exclusion and unity mapping ranges). 2232 * 2233 ****************************************************************************/ 2234 2235 static void __init free_unity_maps(void) 2236 { 2237 struct unity_map_entry *entry, *next; 2238 2239 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { 2240 list_del(&entry->list); 2241 kfree(entry); 2242 } 2243 } 2244 2245 /* called for unity map ACPI definition */ 2246 static int __init init_unity_map_range(struct ivmd_header *m) 2247 { 2248 struct unity_map_entry *e = NULL; 2249 char *s; 2250 2251 e = kzalloc(sizeof(*e), GFP_KERNEL); 2252 if (e == NULL) 2253 return -ENOMEM; 2254 2255 switch (m->type) { 2256 default: 2257 kfree(e); 2258 return 0; 2259 case ACPI_IVMD_TYPE: 2260 s = "IVMD_TYPEi\t\t\t"; 2261 e->devid_start = e->devid_end = m->devid; 2262 break; 2263 case ACPI_IVMD_TYPE_ALL: 2264 s = "IVMD_TYPE_ALL\t\t"; 2265 e->devid_start = 0; 2266 e->devid_end = amd_iommu_last_bdf; 2267 break; 2268 case ACPI_IVMD_TYPE_RANGE: 2269 s = "IVMD_TYPE_RANGE\t\t"; 2270 e->devid_start = m->devid; 2271 e->devid_end = m->aux; 2272 break; 2273 } 2274 e->address_start = PAGE_ALIGN(m->range_start); 2275 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); 2276 e->prot = m->flags >> 1; 2277 2278 /* 2279 * Treat per-device exclusion ranges as r/w unity-mapped regions 2280 * since some buggy BIOSes might lead to the overwritten exclusion 2281 * range (exclusion_start and exclusion_length members). This 2282 * happens when there are multiple exclusion ranges (IVMD entries) 2283 * defined in ACPI table. 2284 */ 2285 if (m->flags & IVMD_FLAG_EXCL_RANGE) 2286 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1; 2287 2288 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" 2289 " range_start: %016llx range_end: %016llx flags: %x\n", s, 2290 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), 2291 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end), 2292 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), 2293 e->address_start, e->address_end, m->flags); 2294 2295 list_add_tail(&e->list, &amd_iommu_unity_map); 2296 2297 return 0; 2298 } 2299 2300 /* iterates over all memory definitions we find in the ACPI table */ 2301 static int __init init_memory_definitions(struct acpi_table_header *table) 2302 { 2303 u8 *p = (u8 *)table, *end = (u8 *)table; 2304 struct ivmd_header *m; 2305 2306 end += table->length; 2307 p += IVRS_HEADER_LENGTH; 2308 2309 while (p < end) { 2310 m = (struct ivmd_header *)p; 2311 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) 2312 init_unity_map_range(m); 2313 2314 p += m->length; 2315 } 2316 2317 return 0; 2318 } 2319 2320 /* 2321 * Init the device table to not allow DMA access for devices 2322 */ 2323 static void init_device_table_dma(void) 2324 { 2325 u32 devid; 2326 2327 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 2328 set_dev_entry_bit(devid, DEV_ENTRY_VALID); 2329 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); 2330 } 2331 } 2332 2333 static void __init uninit_device_table_dma(void) 2334 { 2335 u32 devid; 2336 2337 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 2338 amd_iommu_dev_table[devid].data[0] = 0ULL; 2339 amd_iommu_dev_table[devid].data[1] = 0ULL; 2340 } 2341 } 2342 2343 static void init_device_table(void) 2344 { 2345 u32 devid; 2346 2347 if (!amd_iommu_irq_remap) 2348 return; 2349 2350 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) 2351 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); 2352 } 2353 2354 static void iommu_init_flags(struct amd_iommu *iommu) 2355 { 2356 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 2357 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 2358 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 2359 2360 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? 2361 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 2362 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 2363 2364 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 2365 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 2366 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 2367 2368 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? 2369 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 2370 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 2371 2372 /* 2373 * make IOMMU memory accesses cache coherent 2374 */ 2375 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 2376 2377 /* Set IOTLB invalidation timeout to 1s */ 2378 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); 2379 } 2380 2381 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) 2382 { 2383 int i, j; 2384 u32 ioc_feature_control; 2385 struct pci_dev *pdev = iommu->root_pdev; 2386 2387 /* RD890 BIOSes may not have completely reconfigured the iommu */ 2388 if (!is_rd890_iommu(iommu->dev) || !pdev) 2389 return; 2390 2391 /* 2392 * First, we need to ensure that the iommu is enabled. This is 2393 * controlled by a register in the northbridge 2394 */ 2395 2396 /* Select Northbridge indirect register 0x75 and enable writing */ 2397 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); 2398 pci_read_config_dword(pdev, 0x64, &ioc_feature_control); 2399 2400 /* Enable the iommu */ 2401 if (!(ioc_feature_control & 0x1)) 2402 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); 2403 2404 /* Restore the iommu BAR */ 2405 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2406 iommu->stored_addr_lo); 2407 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, 2408 iommu->stored_addr_hi); 2409 2410 /* Restore the l1 indirect regs for each of the 6 l1s */ 2411 for (i = 0; i < 6; i++) 2412 for (j = 0; j < 0x12; j++) 2413 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); 2414 2415 /* Restore the l2 indirect regs */ 2416 for (i = 0; i < 0x83; i++) 2417 iommu_write_l2(iommu, i, iommu->stored_l2[i]); 2418 2419 /* Lock PCI setup registers */ 2420 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2421 iommu->stored_addr_lo | 1); 2422 } 2423 2424 static void iommu_enable_ga(struct amd_iommu *iommu) 2425 { 2426 #ifdef CONFIG_IRQ_REMAP 2427 switch (amd_iommu_guest_ir) { 2428 case AMD_IOMMU_GUEST_IR_VAPIC: 2429 iommu_feature_enable(iommu, CONTROL_GAM_EN); 2430 fallthrough; 2431 case AMD_IOMMU_GUEST_IR_LEGACY_GA: 2432 iommu_feature_enable(iommu, CONTROL_GA_EN); 2433 iommu->irte_ops = &irte_128_ops; 2434 break; 2435 default: 2436 iommu->irte_ops = &irte_32_ops; 2437 break; 2438 } 2439 #endif 2440 } 2441 2442 static void early_enable_iommu(struct amd_iommu *iommu) 2443 { 2444 iommu_disable(iommu); 2445 iommu_init_flags(iommu); 2446 iommu_set_device_table(iommu); 2447 iommu_enable_command_buffer(iommu); 2448 iommu_enable_event_buffer(iommu); 2449 iommu_set_exclusion_range(iommu); 2450 iommu_enable_ga(iommu); 2451 iommu_enable_xt(iommu); 2452 iommu_enable(iommu); 2453 iommu_flush_all_caches(iommu); 2454 } 2455 2456 /* 2457 * This function finally enables all IOMMUs found in the system after 2458 * they have been initialized. 2459 * 2460 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy 2461 * the old content of device table entries. Not this case or copy failed, 2462 * just continue as normal kernel does. 2463 */ 2464 static void early_enable_iommus(void) 2465 { 2466 struct amd_iommu *iommu; 2467 2468 2469 if (!copy_device_table()) { 2470 /* 2471 * If come here because of failure in copying device table from old 2472 * kernel with all IOMMUs enabled, print error message and try to 2473 * free allocated old_dev_tbl_cpy. 2474 */ 2475 if (amd_iommu_pre_enabled) 2476 pr_err("Failed to copy DEV table from previous kernel.\n"); 2477 if (old_dev_tbl_cpy != NULL) 2478 free_pages((unsigned long)old_dev_tbl_cpy, 2479 get_order(dev_table_size)); 2480 2481 for_each_iommu(iommu) { 2482 clear_translation_pre_enabled(iommu); 2483 early_enable_iommu(iommu); 2484 } 2485 } else { 2486 pr_info("Copied DEV table from previous kernel.\n"); 2487 free_pages((unsigned long)amd_iommu_dev_table, 2488 get_order(dev_table_size)); 2489 amd_iommu_dev_table = old_dev_tbl_cpy; 2490 for_each_iommu(iommu) { 2491 iommu_disable_command_buffer(iommu); 2492 iommu_disable_event_buffer(iommu); 2493 iommu_enable_command_buffer(iommu); 2494 iommu_enable_event_buffer(iommu); 2495 iommu_enable_ga(iommu); 2496 iommu_enable_xt(iommu); 2497 iommu_set_device_table(iommu); 2498 iommu_flush_all_caches(iommu); 2499 } 2500 } 2501 2502 #ifdef CONFIG_IRQ_REMAP 2503 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 2504 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); 2505 #endif 2506 } 2507 2508 static void enable_iommus_v2(void) 2509 { 2510 struct amd_iommu *iommu; 2511 2512 for_each_iommu(iommu) { 2513 iommu_enable_ppr_log(iommu); 2514 iommu_enable_gt(iommu); 2515 } 2516 } 2517 2518 static void enable_iommus(void) 2519 { 2520 early_enable_iommus(); 2521 2522 enable_iommus_v2(); 2523 } 2524 2525 static void disable_iommus(void) 2526 { 2527 struct amd_iommu *iommu; 2528 2529 for_each_iommu(iommu) 2530 iommu_disable(iommu); 2531 2532 #ifdef CONFIG_IRQ_REMAP 2533 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 2534 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP); 2535 #endif 2536 } 2537 2538 /* 2539 * Suspend/Resume support 2540 * disable suspend until real resume implemented 2541 */ 2542 2543 static void amd_iommu_resume(void) 2544 { 2545 struct amd_iommu *iommu; 2546 2547 for_each_iommu(iommu) 2548 iommu_apply_resume_quirks(iommu); 2549 2550 /* re-load the hardware */ 2551 enable_iommus(); 2552 2553 amd_iommu_enable_interrupts(); 2554 } 2555 2556 static int amd_iommu_suspend(void) 2557 { 2558 /* disable IOMMUs to go out of the way for BIOS */ 2559 disable_iommus(); 2560 2561 return 0; 2562 } 2563 2564 static struct syscore_ops amd_iommu_syscore_ops = { 2565 .suspend = amd_iommu_suspend, 2566 .resume = amd_iommu_resume, 2567 }; 2568 2569 static void __init free_iommu_resources(void) 2570 { 2571 kmemleak_free(irq_lookup_table); 2572 free_pages((unsigned long)irq_lookup_table, 2573 get_order(rlookup_table_size)); 2574 irq_lookup_table = NULL; 2575 2576 kmem_cache_destroy(amd_iommu_irq_cache); 2577 amd_iommu_irq_cache = NULL; 2578 2579 free_pages((unsigned long)amd_iommu_rlookup_table, 2580 get_order(rlookup_table_size)); 2581 amd_iommu_rlookup_table = NULL; 2582 2583 free_pages((unsigned long)amd_iommu_alias_table, 2584 get_order(alias_table_size)); 2585 amd_iommu_alias_table = NULL; 2586 2587 free_pages((unsigned long)amd_iommu_dev_table, 2588 get_order(dev_table_size)); 2589 amd_iommu_dev_table = NULL; 2590 2591 free_iommu_all(); 2592 } 2593 2594 /* SB IOAPIC is always on this device in AMD systems */ 2595 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) 2596 2597 static bool __init check_ioapic_information(void) 2598 { 2599 const char *fw_bug = FW_BUG; 2600 bool ret, has_sb_ioapic; 2601 int idx; 2602 2603 has_sb_ioapic = false; 2604 ret = false; 2605 2606 /* 2607 * If we have map overrides on the kernel command line the 2608 * messages in this function might not describe firmware bugs 2609 * anymore - so be careful 2610 */ 2611 if (cmdline_maps) 2612 fw_bug = ""; 2613 2614 for (idx = 0; idx < nr_ioapics; idx++) { 2615 int devid, id = mpc_ioapic_id(idx); 2616 2617 devid = get_ioapic_devid(id); 2618 if (devid < 0) { 2619 pr_err("%s: IOAPIC[%d] not in IVRS table\n", 2620 fw_bug, id); 2621 ret = false; 2622 } else if (devid == IOAPIC_SB_DEVID) { 2623 has_sb_ioapic = true; 2624 ret = true; 2625 } 2626 } 2627 2628 if (!has_sb_ioapic) { 2629 /* 2630 * We expect the SB IOAPIC to be listed in the IVRS 2631 * table. The system timer is connected to the SB IOAPIC 2632 * and if we don't have it in the list the system will 2633 * panic at boot time. This situation usually happens 2634 * when the BIOS is buggy and provides us the wrong 2635 * device id for the IOAPIC in the system. 2636 */ 2637 pr_err("%s: No southbridge IOAPIC found\n", fw_bug); 2638 } 2639 2640 if (!ret) 2641 pr_err("Disabling interrupt remapping\n"); 2642 2643 return ret; 2644 } 2645 2646 static void __init free_dma_resources(void) 2647 { 2648 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 2649 get_order(MAX_DOMAIN_ID/8)); 2650 amd_iommu_pd_alloc_bitmap = NULL; 2651 2652 free_unity_maps(); 2653 } 2654 2655 static void __init ivinfo_init(void *ivrs) 2656 { 2657 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET)); 2658 } 2659 2660 /* 2661 * This is the hardware init function for AMD IOMMU in the system. 2662 * This function is called either from amd_iommu_init or from the interrupt 2663 * remapping setup code. 2664 * 2665 * This function basically parses the ACPI table for AMD IOMMU (IVRS) 2666 * four times: 2667 * 2668 * 1 pass) Discover the most comprehensive IVHD type to use. 2669 * 2670 * 2 pass) Find the highest PCI device id the driver has to handle. 2671 * Upon this information the size of the data structures is 2672 * determined that needs to be allocated. 2673 * 2674 * 3 pass) Initialize the data structures just allocated with the 2675 * information in the ACPI table about available AMD IOMMUs 2676 * in the system. It also maps the PCI devices in the 2677 * system to specific IOMMUs 2678 * 2679 * 4 pass) After the basic data structures are allocated and 2680 * initialized we update them with information about memory 2681 * remapping requirements parsed out of the ACPI table in 2682 * this last pass. 2683 * 2684 * After everything is set up the IOMMUs are enabled and the necessary 2685 * hotplug and suspend notifiers are registered. 2686 */ 2687 static int __init early_amd_iommu_init(void) 2688 { 2689 struct acpi_table_header *ivrs_base; 2690 acpi_status status; 2691 int i, remap_cache_sz, ret = 0; 2692 u32 pci_id; 2693 2694 if (!amd_iommu_detected) 2695 return -ENODEV; 2696 2697 status = acpi_get_table("IVRS", 0, &ivrs_base); 2698 if (status == AE_NOT_FOUND) 2699 return -ENODEV; 2700 else if (ACPI_FAILURE(status)) { 2701 const char *err = acpi_format_exception(status); 2702 pr_err("IVRS table error: %s\n", err); 2703 return -EINVAL; 2704 } 2705 2706 /* 2707 * Validate checksum here so we don't need to do it when 2708 * we actually parse the table 2709 */ 2710 ret = check_ivrs_checksum(ivrs_base); 2711 if (ret) 2712 goto out; 2713 2714 ivinfo_init(ivrs_base); 2715 2716 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); 2717 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); 2718 2719 /* 2720 * First parse ACPI tables to find the largest Bus/Dev/Func 2721 * we need to handle. Upon this information the shared data 2722 * structures for the IOMMUs in the system will be allocated 2723 */ 2724 ret = find_last_devid_acpi(ivrs_base); 2725 if (ret) 2726 goto out; 2727 2728 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); 2729 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); 2730 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); 2731 2732 /* Device table - directly used by all IOMMUs */ 2733 ret = -ENOMEM; 2734 amd_iommu_dev_table = (void *)__get_free_pages( 2735 GFP_KERNEL | __GFP_ZERO | GFP_DMA32, 2736 get_order(dev_table_size)); 2737 if (amd_iommu_dev_table == NULL) 2738 goto out; 2739 2740 /* 2741 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the 2742 * IOMMU see for that device 2743 */ 2744 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, 2745 get_order(alias_table_size)); 2746 if (amd_iommu_alias_table == NULL) 2747 goto out; 2748 2749 /* IOMMU rlookup table - find the IOMMU for a specific device */ 2750 amd_iommu_rlookup_table = (void *)__get_free_pages( 2751 GFP_KERNEL | __GFP_ZERO, 2752 get_order(rlookup_table_size)); 2753 if (amd_iommu_rlookup_table == NULL) 2754 goto out; 2755 2756 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( 2757 GFP_KERNEL | __GFP_ZERO, 2758 get_order(MAX_DOMAIN_ID/8)); 2759 if (amd_iommu_pd_alloc_bitmap == NULL) 2760 goto out; 2761 2762 /* 2763 * let all alias entries point to itself 2764 */ 2765 for (i = 0; i <= amd_iommu_last_bdf; ++i) 2766 amd_iommu_alias_table[i] = i; 2767 2768 /* 2769 * never allocate domain 0 because its used as the non-allocated and 2770 * error value placeholder 2771 */ 2772 __set_bit(0, amd_iommu_pd_alloc_bitmap); 2773 2774 /* 2775 * now the data structures are allocated and basically initialized 2776 * start the real acpi table scan 2777 */ 2778 ret = init_iommu_all(ivrs_base); 2779 if (ret) 2780 goto out; 2781 2782 /* Disable IOMMU if there's Stoney Ridge graphics */ 2783 for (i = 0; i < 32; i++) { 2784 pci_id = read_pci_config(0, i, 0, 0); 2785 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { 2786 pr_info("Disable IOMMU on Stoney Ridge\n"); 2787 amd_iommu_disabled = true; 2788 break; 2789 } 2790 } 2791 2792 /* Disable any previously enabled IOMMUs */ 2793 if (!is_kdump_kernel() || amd_iommu_disabled) 2794 disable_iommus(); 2795 2796 if (amd_iommu_irq_remap) 2797 amd_iommu_irq_remap = check_ioapic_information(); 2798 2799 if (amd_iommu_irq_remap) { 2800 /* 2801 * Interrupt remapping enabled, create kmem_cache for the 2802 * remapping tables. 2803 */ 2804 ret = -ENOMEM; 2805 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) 2806 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32); 2807 else 2808 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2); 2809 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", 2810 remap_cache_sz, 2811 DTE_INTTAB_ALIGNMENT, 2812 0, NULL); 2813 if (!amd_iommu_irq_cache) 2814 goto out; 2815 2816 irq_lookup_table = (void *)__get_free_pages( 2817 GFP_KERNEL | __GFP_ZERO, 2818 get_order(rlookup_table_size)); 2819 kmemleak_alloc(irq_lookup_table, rlookup_table_size, 2820 1, GFP_KERNEL); 2821 if (!irq_lookup_table) 2822 goto out; 2823 } 2824 2825 ret = init_memory_definitions(ivrs_base); 2826 if (ret) 2827 goto out; 2828 2829 /* init the device table */ 2830 init_device_table(); 2831 2832 out: 2833 /* Don't leak any ACPI memory */ 2834 acpi_put_table(ivrs_base); 2835 ivrs_base = NULL; 2836 2837 return ret; 2838 } 2839 2840 static int amd_iommu_enable_interrupts(void) 2841 { 2842 struct amd_iommu *iommu; 2843 int ret = 0; 2844 2845 for_each_iommu(iommu) { 2846 ret = iommu_init_irq(iommu); 2847 if (ret) 2848 goto out; 2849 } 2850 2851 out: 2852 return ret; 2853 } 2854 2855 static bool detect_ivrs(void) 2856 { 2857 struct acpi_table_header *ivrs_base; 2858 acpi_status status; 2859 2860 status = acpi_get_table("IVRS", 0, &ivrs_base); 2861 if (status == AE_NOT_FOUND) 2862 return false; 2863 else if (ACPI_FAILURE(status)) { 2864 const char *err = acpi_format_exception(status); 2865 pr_err("IVRS table error: %s\n", err); 2866 return false; 2867 } 2868 2869 acpi_put_table(ivrs_base); 2870 2871 /* Make sure ACS will be enabled during PCI probe */ 2872 pci_request_acs(); 2873 2874 return true; 2875 } 2876 2877 /**************************************************************************** 2878 * 2879 * AMD IOMMU Initialization State Machine 2880 * 2881 ****************************************************************************/ 2882 2883 static int __init state_next(void) 2884 { 2885 int ret = 0; 2886 2887 switch (init_state) { 2888 case IOMMU_START_STATE: 2889 if (!detect_ivrs()) { 2890 init_state = IOMMU_NOT_FOUND; 2891 ret = -ENODEV; 2892 } else { 2893 init_state = IOMMU_IVRS_DETECTED; 2894 } 2895 break; 2896 case IOMMU_IVRS_DETECTED: 2897 ret = early_amd_iommu_init(); 2898 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; 2899 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { 2900 pr_info("AMD IOMMU disabled\n"); 2901 init_state = IOMMU_CMDLINE_DISABLED; 2902 ret = -EINVAL; 2903 } 2904 break; 2905 case IOMMU_ACPI_FINISHED: 2906 early_enable_iommus(); 2907 x86_platform.iommu_shutdown = disable_iommus; 2908 init_state = IOMMU_ENABLED; 2909 break; 2910 case IOMMU_ENABLED: 2911 register_syscore_ops(&amd_iommu_syscore_ops); 2912 ret = amd_iommu_init_pci(); 2913 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; 2914 enable_iommus_v2(); 2915 break; 2916 case IOMMU_PCI_INIT: 2917 ret = amd_iommu_enable_interrupts(); 2918 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 2919 break; 2920 case IOMMU_INTERRUPTS_EN: 2921 ret = amd_iommu_init_dma_ops(); 2922 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; 2923 break; 2924 case IOMMU_DMA_OPS: 2925 init_state = IOMMU_INITIALIZED; 2926 break; 2927 case IOMMU_INITIALIZED: 2928 /* Nothing to do */ 2929 break; 2930 case IOMMU_NOT_FOUND: 2931 case IOMMU_INIT_ERROR: 2932 case IOMMU_CMDLINE_DISABLED: 2933 /* Error states => do nothing */ 2934 ret = -EINVAL; 2935 break; 2936 default: 2937 /* Unknown state */ 2938 BUG(); 2939 } 2940 2941 if (ret) { 2942 free_dma_resources(); 2943 if (!irq_remapping_enabled) { 2944 disable_iommus(); 2945 free_iommu_resources(); 2946 } else { 2947 struct amd_iommu *iommu; 2948 2949 uninit_device_table_dma(); 2950 for_each_iommu(iommu) 2951 iommu_flush_all_caches(iommu); 2952 } 2953 } 2954 return ret; 2955 } 2956 2957 static int __init iommu_go_to_state(enum iommu_init_state state) 2958 { 2959 int ret = -EINVAL; 2960 2961 while (init_state != state) { 2962 if (init_state == IOMMU_NOT_FOUND || 2963 init_state == IOMMU_INIT_ERROR || 2964 init_state == IOMMU_CMDLINE_DISABLED) 2965 break; 2966 ret = state_next(); 2967 } 2968 2969 return ret; 2970 } 2971 2972 #ifdef CONFIG_IRQ_REMAP 2973 int __init amd_iommu_prepare(void) 2974 { 2975 int ret; 2976 2977 amd_iommu_irq_remap = true; 2978 2979 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); 2980 if (ret) 2981 return ret; 2982 return amd_iommu_irq_remap ? 0 : -ENODEV; 2983 } 2984 2985 int __init amd_iommu_enable(void) 2986 { 2987 int ret; 2988 2989 ret = iommu_go_to_state(IOMMU_ENABLED); 2990 if (ret) 2991 return ret; 2992 2993 irq_remapping_enabled = 1; 2994 return amd_iommu_xt_mode; 2995 } 2996 2997 void amd_iommu_disable(void) 2998 { 2999 amd_iommu_suspend(); 3000 } 3001 3002 int amd_iommu_reenable(int mode) 3003 { 3004 amd_iommu_resume(); 3005 3006 return 0; 3007 } 3008 3009 int __init amd_iommu_enable_faulting(void) 3010 { 3011 /* We enable MSI later when PCI is initialized */ 3012 return 0; 3013 } 3014 #endif 3015 3016 /* 3017 * This is the core init function for AMD IOMMU hardware in the system. 3018 * This function is called from the generic x86 DMA layer initialization 3019 * code. 3020 */ 3021 static int __init amd_iommu_init(void) 3022 { 3023 struct amd_iommu *iommu; 3024 int ret; 3025 3026 ret = iommu_go_to_state(IOMMU_INITIALIZED); 3027 #ifdef CONFIG_GART_IOMMU 3028 if (ret && list_empty(&amd_iommu_list)) { 3029 /* 3030 * We failed to initialize the AMD IOMMU - try fallback 3031 * to GART if possible. 3032 */ 3033 gart_iommu_init(); 3034 } 3035 #endif 3036 3037 for_each_iommu(iommu) 3038 amd_iommu_debugfs_setup(iommu); 3039 3040 return ret; 3041 } 3042 3043 static bool amd_iommu_sme_check(void) 3044 { 3045 if (!sme_active() || (boot_cpu_data.x86 != 0x17)) 3046 return true; 3047 3048 /* For Fam17h, a specific level of support is required */ 3049 if (boot_cpu_data.microcode >= 0x08001205) 3050 return true; 3051 3052 if ((boot_cpu_data.microcode >= 0x08001126) && 3053 (boot_cpu_data.microcode <= 0x080011ff)) 3054 return true; 3055 3056 pr_notice("IOMMU not currently supported when SME is active\n"); 3057 3058 return false; 3059 } 3060 3061 /**************************************************************************** 3062 * 3063 * Early detect code. This code runs at IOMMU detection time in the DMA 3064 * layer. It just looks if there is an IVRS ACPI table to detect AMD 3065 * IOMMUs 3066 * 3067 ****************************************************************************/ 3068 int __init amd_iommu_detect(void) 3069 { 3070 int ret; 3071 3072 if (no_iommu || (iommu_detected && !gart_iommu_aperture)) 3073 return -ENODEV; 3074 3075 if (!amd_iommu_sme_check()) 3076 return -ENODEV; 3077 3078 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); 3079 if (ret) 3080 return ret; 3081 3082 amd_iommu_detected = true; 3083 iommu_detected = 1; 3084 x86_init.iommu.iommu_init = amd_iommu_init; 3085 3086 return 1; 3087 } 3088 3089 /**************************************************************************** 3090 * 3091 * Parsing functions for the AMD IOMMU specific kernel command line 3092 * options. 3093 * 3094 ****************************************************************************/ 3095 3096 static int __init parse_amd_iommu_dump(char *str) 3097 { 3098 amd_iommu_dump = true; 3099 3100 return 1; 3101 } 3102 3103 static int __init parse_amd_iommu_intr(char *str) 3104 { 3105 for (; *str; ++str) { 3106 if (strncmp(str, "legacy", 6) == 0) { 3107 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 3108 break; 3109 } 3110 if (strncmp(str, "vapic", 5) == 0) { 3111 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 3112 break; 3113 } 3114 } 3115 return 1; 3116 } 3117 3118 static int __init parse_amd_iommu_options(char *str) 3119 { 3120 for (; *str; ++str) { 3121 if (strncmp(str, "fullflush", 9) == 0) 3122 amd_iommu_unmap_flush = true; 3123 if (strncmp(str, "off", 3) == 0) 3124 amd_iommu_disabled = true; 3125 if (strncmp(str, "force_isolation", 15) == 0) 3126 amd_iommu_force_isolation = true; 3127 } 3128 3129 return 1; 3130 } 3131 3132 static int __init parse_ivrs_ioapic(char *str) 3133 { 3134 unsigned int bus, dev, fn; 3135 int ret, id, i; 3136 u16 devid; 3137 3138 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 3139 3140 if (ret != 4) { 3141 pr_err("Invalid command line: ivrs_ioapic%s\n", str); 3142 return 1; 3143 } 3144 3145 if (early_ioapic_map_size == EARLY_MAP_SIZE) { 3146 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", 3147 str); 3148 return 1; 3149 } 3150 3151 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 3152 3153 cmdline_maps = true; 3154 i = early_ioapic_map_size++; 3155 early_ioapic_map[i].id = id; 3156 early_ioapic_map[i].devid = devid; 3157 early_ioapic_map[i].cmd_line = true; 3158 3159 return 1; 3160 } 3161 3162 static int __init parse_ivrs_hpet(char *str) 3163 { 3164 unsigned int bus, dev, fn; 3165 int ret, id, i; 3166 u16 devid; 3167 3168 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 3169 3170 if (ret != 4) { 3171 pr_err("Invalid command line: ivrs_hpet%s\n", str); 3172 return 1; 3173 } 3174 3175 if (early_hpet_map_size == EARLY_MAP_SIZE) { 3176 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", 3177 str); 3178 return 1; 3179 } 3180 3181 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 3182 3183 cmdline_maps = true; 3184 i = early_hpet_map_size++; 3185 early_hpet_map[i].id = id; 3186 early_hpet_map[i].devid = devid; 3187 early_hpet_map[i].cmd_line = true; 3188 3189 return 1; 3190 } 3191 3192 static int __init parse_ivrs_acpihid(char *str) 3193 { 3194 u32 bus, dev, fn; 3195 char *hid, *uid, *p; 3196 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0}; 3197 int ret, i; 3198 3199 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid); 3200 if (ret != 4) { 3201 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str); 3202 return 1; 3203 } 3204 3205 p = acpiid; 3206 hid = strsep(&p, ":"); 3207 uid = p; 3208 3209 if (!hid || !(*hid) || !uid) { 3210 pr_err("Invalid command line: hid or uid\n"); 3211 return 1; 3212 } 3213 3214 i = early_acpihid_map_size++; 3215 memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); 3216 memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); 3217 early_acpihid_map[i].devid = 3218 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 3219 early_acpihid_map[i].cmd_line = true; 3220 3221 return 1; 3222 } 3223 3224 __setup("amd_iommu_dump", parse_amd_iommu_dump); 3225 __setup("amd_iommu=", parse_amd_iommu_options); 3226 __setup("amd_iommu_intr=", parse_amd_iommu_intr); 3227 __setup("ivrs_ioapic", parse_ivrs_ioapic); 3228 __setup("ivrs_hpet", parse_ivrs_hpet); 3229 __setup("ivrs_acpihid", parse_ivrs_acpihid); 3230 3231 IOMMU_INIT_FINISH(amd_iommu_detect, 3232 gart_iommu_hole_init, 3233 NULL, 3234 NULL); 3235 3236 bool amd_iommu_v2_supported(void) 3237 { 3238 return amd_iommu_v2_present; 3239 } 3240 EXPORT_SYMBOL(amd_iommu_v2_supported); 3241 3242 struct amd_iommu *get_amd_iommu(unsigned int idx) 3243 { 3244 unsigned int i = 0; 3245 struct amd_iommu *iommu; 3246 3247 for_each_iommu(iommu) 3248 if (i++ == idx) 3249 return iommu; 3250 return NULL; 3251 } 3252 EXPORT_SYMBOL(get_amd_iommu); 3253 3254 /**************************************************************************** 3255 * 3256 * IOMMU EFR Performance Counter support functionality. This code allows 3257 * access to the IOMMU PC functionality. 3258 * 3259 ****************************************************************************/ 3260 3261 u8 amd_iommu_pc_get_max_banks(unsigned int idx) 3262 { 3263 struct amd_iommu *iommu = get_amd_iommu(idx); 3264 3265 if (iommu) 3266 return iommu->max_banks; 3267 3268 return 0; 3269 } 3270 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); 3271 3272 bool amd_iommu_pc_supported(void) 3273 { 3274 return amd_iommu_pc_present; 3275 } 3276 EXPORT_SYMBOL(amd_iommu_pc_supported); 3277 3278 u8 amd_iommu_pc_get_max_counters(unsigned int idx) 3279 { 3280 struct amd_iommu *iommu = get_amd_iommu(idx); 3281 3282 if (iommu) 3283 return iommu->max_counters; 3284 3285 return 0; 3286 } 3287 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); 3288 3289 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 3290 u8 fxn, u64 *value, bool is_write) 3291 { 3292 u32 offset; 3293 u32 max_offset_lim; 3294 3295 /* Make sure the IOMMU PC resource is available */ 3296 if (!amd_iommu_pc_present) 3297 return -ENODEV; 3298 3299 /* Check for valid iommu and pc register indexing */ 3300 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) 3301 return -ENODEV; 3302 3303 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn); 3304 3305 /* Limit the offset to the hw defined mmio region aperture */ 3306 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | 3307 (iommu->max_counters << 8) | 0x28); 3308 if ((offset < MMIO_CNTR_REG_OFFSET) || 3309 (offset > max_offset_lim)) 3310 return -EINVAL; 3311 3312 if (is_write) { 3313 u64 val = *value & GENMASK_ULL(47, 0); 3314 3315 writel((u32)val, iommu->mmio_base + offset); 3316 writel((val >> 32), iommu->mmio_base + offset + 4); 3317 } else { 3318 *value = readl(iommu->mmio_base + offset + 4); 3319 *value <<= 32; 3320 *value |= readl(iommu->mmio_base + offset); 3321 *value &= GENMASK_ULL(47, 0); 3322 } 3323 3324 return 0; 3325 } 3326 3327 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3328 { 3329 if (!iommu) 3330 return -EINVAL; 3331 3332 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); 3333 } 3334 EXPORT_SYMBOL(amd_iommu_pc_get_reg); 3335 3336 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3337 { 3338 if (!iommu) 3339 return -EINVAL; 3340 3341 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); 3342 } 3343 EXPORT_SYMBOL(amd_iommu_pc_set_reg); 3344