1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "AMD-Vi: " fmt 9 #define dev_fmt(fmt) pr_fmt(fmt) 10 11 #include <linux/pci.h> 12 #include <linux/acpi.h> 13 #include <linux/list.h> 14 #include <linux/bitmap.h> 15 #include <linux/slab.h> 16 #include <linux/syscore_ops.h> 17 #include <linux/interrupt.h> 18 #include <linux/msi.h> 19 #include <linux/amd-iommu.h> 20 #include <linux/export.h> 21 #include <linux/kmemleak.h> 22 #include <linux/mem_encrypt.h> 23 #include <asm/pci-direct.h> 24 #include <asm/iommu.h> 25 #include <asm/apic.h> 26 #include <asm/gart.h> 27 #include <asm/x86_init.h> 28 #include <asm/iommu_table.h> 29 #include <asm/io_apic.h> 30 #include <asm/irq_remapping.h> 31 32 #include <linux/crash_dump.h> 33 34 #include "amd_iommu.h" 35 #include "../irq_remapping.h" 36 37 /* 38 * definitions for the ACPI scanning code 39 */ 40 #define IVRS_HEADER_LENGTH 48 41 42 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40 43 #define ACPI_IVMD_TYPE_ALL 0x20 44 #define ACPI_IVMD_TYPE 0x21 45 #define ACPI_IVMD_TYPE_RANGE 0x22 46 47 #define IVHD_DEV_ALL 0x01 48 #define IVHD_DEV_SELECT 0x02 49 #define IVHD_DEV_SELECT_RANGE_START 0x03 50 #define IVHD_DEV_RANGE_END 0x04 51 #define IVHD_DEV_ALIAS 0x42 52 #define IVHD_DEV_ALIAS_RANGE 0x43 53 #define IVHD_DEV_EXT_SELECT 0x46 54 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 55 #define IVHD_DEV_SPECIAL 0x48 56 #define IVHD_DEV_ACPI_HID 0xf0 57 58 #define UID_NOT_PRESENT 0 59 #define UID_IS_INTEGER 1 60 #define UID_IS_CHARACTER 2 61 62 #define IVHD_SPECIAL_IOAPIC 1 63 #define IVHD_SPECIAL_HPET 2 64 65 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 66 #define IVHD_FLAG_PASSPW_EN_MASK 0x02 67 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 68 #define IVHD_FLAG_ISOC_EN_MASK 0x08 69 70 #define IVMD_FLAG_EXCL_RANGE 0x08 71 #define IVMD_FLAG_IW 0x04 72 #define IVMD_FLAG_IR 0x02 73 #define IVMD_FLAG_UNITY_MAP 0x01 74 75 #define ACPI_DEVFLAG_INITPASS 0x01 76 #define ACPI_DEVFLAG_EXTINT 0x02 77 #define ACPI_DEVFLAG_NMI 0x04 78 #define ACPI_DEVFLAG_SYSMGT1 0x10 79 #define ACPI_DEVFLAG_SYSMGT2 0x20 80 #define ACPI_DEVFLAG_LINT0 0x40 81 #define ACPI_DEVFLAG_LINT1 0x80 82 #define ACPI_DEVFLAG_ATSDIS 0x10000000 83 84 #define LOOP_TIMEOUT 100000 85 /* 86 * ACPI table definitions 87 * 88 * These data structures are laid over the table to parse the important values 89 * out of it. 90 */ 91 92 extern const struct iommu_ops amd_iommu_ops; 93 94 /* 95 * structure describing one IOMMU in the ACPI table. Typically followed by one 96 * or more ivhd_entrys. 97 */ 98 struct ivhd_header { 99 u8 type; 100 u8 flags; 101 u16 length; 102 u16 devid; 103 u16 cap_ptr; 104 u64 mmio_phys; 105 u16 pci_seg; 106 u16 info; 107 u32 efr_attr; 108 109 /* Following only valid on IVHD type 11h and 40h */ 110 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */ 111 u64 res; 112 } __attribute__((packed)); 113 114 /* 115 * A device entry describing which devices a specific IOMMU translates and 116 * which requestor ids they use. 117 */ 118 struct ivhd_entry { 119 u8 type; 120 u16 devid; 121 u8 flags; 122 u32 ext; 123 u32 hidh; 124 u64 cid; 125 u8 uidf; 126 u8 uidl; 127 u8 uid; 128 } __attribute__((packed)); 129 130 /* 131 * An AMD IOMMU memory definition structure. It defines things like exclusion 132 * ranges for devices and regions that should be unity mapped. 133 */ 134 struct ivmd_header { 135 u8 type; 136 u8 flags; 137 u16 length; 138 u16 devid; 139 u16 aux; 140 u64 resv; 141 u64 range_start; 142 u64 range_length; 143 } __attribute__((packed)); 144 145 bool amd_iommu_dump; 146 bool amd_iommu_irq_remap __read_mostly; 147 148 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 149 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; 150 151 static bool amd_iommu_detected; 152 static bool __initdata amd_iommu_disabled; 153 static int amd_iommu_target_ivhd_type; 154 155 u16 amd_iommu_last_bdf; /* largest PCI device id we have 156 to handle */ 157 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 158 we find in ACPI */ 159 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 160 161 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 162 system */ 163 164 /* Array to assign indices to IOMMUs*/ 165 struct amd_iommu *amd_iommus[MAX_IOMMUS]; 166 167 /* Number of IOMMUs present in the system */ 168 static int amd_iommus_present; 169 170 /* IOMMUs have a non-present cache? */ 171 bool amd_iommu_np_cache __read_mostly; 172 bool amd_iommu_iotlb_sup __read_mostly = true; 173 174 u32 amd_iommu_max_pasid __read_mostly = ~0; 175 176 bool amd_iommu_v2_present __read_mostly; 177 static bool amd_iommu_pc_present __read_mostly; 178 179 bool amd_iommu_force_isolation __read_mostly; 180 181 /* 182 * Pointer to the device table which is shared by all AMD IOMMUs 183 * it is indexed by the PCI device id or the HT unit id and contains 184 * information about the domain the device belongs to as well as the 185 * page table root pointer. 186 */ 187 struct dev_table_entry *amd_iommu_dev_table; 188 /* 189 * Pointer to a device table which the content of old device table 190 * will be copied to. It's only be used in kdump kernel. 191 */ 192 static struct dev_table_entry *old_dev_tbl_cpy; 193 194 /* 195 * The alias table is a driver specific data structure which contains the 196 * mappings of the PCI device ids to the actual requestor ids on the IOMMU. 197 * More than one device can share the same requestor id. 198 */ 199 u16 *amd_iommu_alias_table; 200 201 /* 202 * The rlookup table is used to find the IOMMU which is responsible 203 * for a specific device. It is also indexed by the PCI device id. 204 */ 205 struct amd_iommu **amd_iommu_rlookup_table; 206 EXPORT_SYMBOL(amd_iommu_rlookup_table); 207 208 /* 209 * This table is used to find the irq remapping table for a given device id 210 * quickly. 211 */ 212 struct irq_remap_table **irq_lookup_table; 213 214 /* 215 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap 216 * to know which ones are already in use. 217 */ 218 unsigned long *amd_iommu_pd_alloc_bitmap; 219 220 static u32 dev_table_size; /* size of the device table */ 221 static u32 alias_table_size; /* size of the alias table */ 222 static u32 rlookup_table_size; /* size if the rlookup table */ 223 224 enum iommu_init_state { 225 IOMMU_START_STATE, 226 IOMMU_IVRS_DETECTED, 227 IOMMU_ACPI_FINISHED, 228 IOMMU_ENABLED, 229 IOMMU_PCI_INIT, 230 IOMMU_INTERRUPTS_EN, 231 IOMMU_DMA_OPS, 232 IOMMU_INITIALIZED, 233 IOMMU_NOT_FOUND, 234 IOMMU_INIT_ERROR, 235 IOMMU_CMDLINE_DISABLED, 236 }; 237 238 /* Early ioapic and hpet maps from kernel command line */ 239 #define EARLY_MAP_SIZE 4 240 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; 241 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; 242 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE]; 243 244 static int __initdata early_ioapic_map_size; 245 static int __initdata early_hpet_map_size; 246 static int __initdata early_acpihid_map_size; 247 248 static bool __initdata cmdline_maps; 249 250 static enum iommu_init_state init_state = IOMMU_START_STATE; 251 252 static int amd_iommu_enable_interrupts(void); 253 static int __init iommu_go_to_state(enum iommu_init_state state); 254 static void init_device_table_dma(void); 255 256 static bool amd_iommu_pre_enabled = true; 257 258 bool translation_pre_enabled(struct amd_iommu *iommu) 259 { 260 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); 261 } 262 EXPORT_SYMBOL(translation_pre_enabled); 263 264 static void clear_translation_pre_enabled(struct amd_iommu *iommu) 265 { 266 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 267 } 268 269 static void init_translation_status(struct amd_iommu *iommu) 270 { 271 u64 ctrl; 272 273 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 274 if (ctrl & (1<<CONTROL_IOMMU_EN)) 275 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 276 } 277 278 static inline void update_last_devid(u16 devid) 279 { 280 if (devid > amd_iommu_last_bdf) 281 amd_iommu_last_bdf = devid; 282 } 283 284 static inline unsigned long tbl_size(int entry_size) 285 { 286 unsigned shift = PAGE_SHIFT + 287 get_order(((int)amd_iommu_last_bdf + 1) * entry_size); 288 289 return 1UL << shift; 290 } 291 292 int amd_iommu_get_num_iommus(void) 293 { 294 return amd_iommus_present; 295 } 296 297 /* Access to l1 and l2 indexed register spaces */ 298 299 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) 300 { 301 u32 val; 302 303 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 304 pci_read_config_dword(iommu->dev, 0xfc, &val); 305 return val; 306 } 307 308 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) 309 { 310 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); 311 pci_write_config_dword(iommu->dev, 0xfc, val); 312 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 313 } 314 315 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) 316 { 317 u32 val; 318 319 pci_write_config_dword(iommu->dev, 0xf0, address); 320 pci_read_config_dword(iommu->dev, 0xf4, &val); 321 return val; 322 } 323 324 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) 325 { 326 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); 327 pci_write_config_dword(iommu->dev, 0xf4, val); 328 } 329 330 /**************************************************************************** 331 * 332 * AMD IOMMU MMIO register space handling functions 333 * 334 * These functions are used to program the IOMMU device registers in 335 * MMIO space required for that driver. 336 * 337 ****************************************************************************/ 338 339 /* 340 * This function set the exclusion range in the IOMMU. DMA accesses to the 341 * exclusion range are passed through untranslated 342 */ 343 static void iommu_set_exclusion_range(struct amd_iommu *iommu) 344 { 345 u64 start = iommu->exclusion_start & PAGE_MASK; 346 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; 347 u64 entry; 348 349 if (!iommu->exclusion_start) 350 return; 351 352 entry = start | MMIO_EXCL_ENABLE_MASK; 353 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 354 &entry, sizeof(entry)); 355 356 entry = limit; 357 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 358 &entry, sizeof(entry)); 359 } 360 361 static void iommu_set_cwwb_range(struct amd_iommu *iommu) 362 { 363 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); 364 u64 entry = start & PM_ADDR_MASK; 365 366 if (!iommu_feature(iommu, FEATURE_SNP)) 367 return; 368 369 /* Note: 370 * Re-purpose Exclusion base/limit registers for Completion wait 371 * write-back base/limit. 372 */ 373 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 374 &entry, sizeof(entry)); 375 376 /* Note: 377 * Default to 4 Kbytes, which can be specified by setting base 378 * address equal to the limit address. 379 */ 380 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 381 &entry, sizeof(entry)); 382 } 383 384 /* Programs the physical address of the device table into the IOMMU hardware */ 385 static void iommu_set_device_table(struct amd_iommu *iommu) 386 { 387 u64 entry; 388 389 BUG_ON(iommu->mmio_base == NULL); 390 391 entry = iommu_virt_to_phys(amd_iommu_dev_table); 392 entry |= (dev_table_size >> 12) - 1; 393 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, 394 &entry, sizeof(entry)); 395 } 396 397 /* Generic functions to enable/disable certain features of the IOMMU. */ 398 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) 399 { 400 u64 ctrl; 401 402 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 403 ctrl |= (1ULL << bit); 404 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 405 } 406 407 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) 408 { 409 u64 ctrl; 410 411 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 412 ctrl &= ~(1ULL << bit); 413 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 414 } 415 416 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) 417 { 418 u64 ctrl; 419 420 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 421 ctrl &= ~CTRL_INV_TO_MASK; 422 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; 423 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 424 } 425 426 /* Function to enable the hardware */ 427 static void iommu_enable(struct amd_iommu *iommu) 428 { 429 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 430 } 431 432 static void iommu_disable(struct amd_iommu *iommu) 433 { 434 if (!iommu->mmio_base) 435 return; 436 437 /* Disable command buffer */ 438 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 439 440 /* Disable event logging and event interrupts */ 441 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); 442 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 443 444 /* Disable IOMMU GA_LOG */ 445 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 446 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 447 448 /* Disable IOMMU hardware itself */ 449 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); 450 } 451 452 /* 453 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 454 * the system has one. 455 */ 456 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) 457 { 458 if (!request_mem_region(address, end, "amd_iommu")) { 459 pr_err("Can not reserve memory region %llx-%llx for mmio\n", 460 address, end); 461 pr_err("This is a BIOS bug. Please contact your hardware vendor\n"); 462 return NULL; 463 } 464 465 return (u8 __iomem *)ioremap(address, end); 466 } 467 468 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) 469 { 470 if (iommu->mmio_base) 471 iounmap(iommu->mmio_base); 472 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); 473 } 474 475 static inline u32 get_ivhd_header_size(struct ivhd_header *h) 476 { 477 u32 size = 0; 478 479 switch (h->type) { 480 case 0x10: 481 size = 24; 482 break; 483 case 0x11: 484 case 0x40: 485 size = 40; 486 break; 487 } 488 return size; 489 } 490 491 /**************************************************************************** 492 * 493 * The functions below belong to the first pass of AMD IOMMU ACPI table 494 * parsing. In this pass we try to find out the highest device id this 495 * code has to handle. Upon this information the size of the shared data 496 * structures is determined later. 497 * 498 ****************************************************************************/ 499 500 /* 501 * This function calculates the length of a given IVHD entry 502 */ 503 static inline int ivhd_entry_length(u8 *ivhd) 504 { 505 u32 type = ((struct ivhd_entry *)ivhd)->type; 506 507 if (type < 0x80) { 508 return 0x04 << (*ivhd >> 6); 509 } else if (type == IVHD_DEV_ACPI_HID) { 510 /* For ACPI_HID, offset 21 is uid len */ 511 return *((u8 *)ivhd + 21) + 22; 512 } 513 return 0; 514 } 515 516 /* 517 * After reading the highest device id from the IOMMU PCI capability header 518 * this function looks if there is a higher device id defined in the ACPI table 519 */ 520 static int __init find_last_devid_from_ivhd(struct ivhd_header *h) 521 { 522 u8 *p = (void *)h, *end = (void *)h; 523 struct ivhd_entry *dev; 524 525 u32 ivhd_size = get_ivhd_header_size(h); 526 527 if (!ivhd_size) { 528 pr_err("Unsupported IVHD type %#x\n", h->type); 529 return -EINVAL; 530 } 531 532 p += ivhd_size; 533 end += h->length; 534 535 while (p < end) { 536 dev = (struct ivhd_entry *)p; 537 switch (dev->type) { 538 case IVHD_DEV_ALL: 539 /* Use maximum BDF value for DEV_ALL */ 540 update_last_devid(0xffff); 541 break; 542 case IVHD_DEV_SELECT: 543 case IVHD_DEV_RANGE_END: 544 case IVHD_DEV_ALIAS: 545 case IVHD_DEV_EXT_SELECT: 546 /* all the above subfield types refer to device ids */ 547 update_last_devid(dev->devid); 548 break; 549 default: 550 break; 551 } 552 p += ivhd_entry_length(p); 553 } 554 555 WARN_ON(p != end); 556 557 return 0; 558 } 559 560 static int __init check_ivrs_checksum(struct acpi_table_header *table) 561 { 562 int i; 563 u8 checksum = 0, *p = (u8 *)table; 564 565 for (i = 0; i < table->length; ++i) 566 checksum += p[i]; 567 if (checksum != 0) { 568 /* ACPI table corrupt */ 569 pr_err(FW_BUG "IVRS invalid checksum\n"); 570 return -ENODEV; 571 } 572 573 return 0; 574 } 575 576 /* 577 * Iterate over all IVHD entries in the ACPI table and find the highest device 578 * id which we need to handle. This is the first of three functions which parse 579 * the ACPI table. So we check the checksum here. 580 */ 581 static int __init find_last_devid_acpi(struct acpi_table_header *table) 582 { 583 u8 *p = (u8 *)table, *end = (u8 *)table; 584 struct ivhd_header *h; 585 586 p += IVRS_HEADER_LENGTH; 587 588 end += table->length; 589 while (p < end) { 590 h = (struct ivhd_header *)p; 591 if (h->type == amd_iommu_target_ivhd_type) { 592 int ret = find_last_devid_from_ivhd(h); 593 594 if (ret) 595 return ret; 596 } 597 p += h->length; 598 } 599 WARN_ON(p != end); 600 601 return 0; 602 } 603 604 /**************************************************************************** 605 * 606 * The following functions belong to the code path which parses the ACPI table 607 * the second time. In this ACPI parsing iteration we allocate IOMMU specific 608 * data structures, initialize the device/alias/rlookup table and also 609 * basically initialize the hardware. 610 * 611 ****************************************************************************/ 612 613 /* 614 * Allocates the command buffer. This buffer is per AMD IOMMU. We can 615 * write commands to that buffer later and the IOMMU will execute them 616 * asynchronously 617 */ 618 static int __init alloc_command_buffer(struct amd_iommu *iommu) 619 { 620 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 621 get_order(CMD_BUFFER_SIZE)); 622 623 return iommu->cmd_buf ? 0 : -ENOMEM; 624 } 625 626 /* 627 * This function resets the command buffer if the IOMMU stopped fetching 628 * commands from it. 629 */ 630 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) 631 { 632 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 633 634 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 635 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 636 iommu->cmd_buf_head = 0; 637 iommu->cmd_buf_tail = 0; 638 639 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 640 } 641 642 /* 643 * This function writes the command buffer address to the hardware and 644 * enables it. 645 */ 646 static void iommu_enable_command_buffer(struct amd_iommu *iommu) 647 { 648 u64 entry; 649 650 BUG_ON(iommu->cmd_buf == NULL); 651 652 entry = iommu_virt_to_phys(iommu->cmd_buf); 653 entry |= MMIO_CMD_SIZE_512; 654 655 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 656 &entry, sizeof(entry)); 657 658 amd_iommu_reset_cmd_buffer(iommu); 659 } 660 661 /* 662 * This function disables the command buffer 663 */ 664 static void iommu_disable_command_buffer(struct amd_iommu *iommu) 665 { 666 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 667 } 668 669 static void __init free_command_buffer(struct amd_iommu *iommu) 670 { 671 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); 672 } 673 674 /* allocates the memory where the IOMMU will log its events to */ 675 static int __init alloc_event_buffer(struct amd_iommu *iommu) 676 { 677 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 678 get_order(EVT_BUFFER_SIZE)); 679 680 return iommu->evt_buf ? 0 : -ENOMEM; 681 } 682 683 static void iommu_enable_event_buffer(struct amd_iommu *iommu) 684 { 685 u64 entry; 686 687 BUG_ON(iommu->evt_buf == NULL); 688 689 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; 690 691 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, 692 &entry, sizeof(entry)); 693 694 /* set head and tail to zero manually */ 695 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 696 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 697 698 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 699 } 700 701 /* 702 * This function disables the event log buffer 703 */ 704 static void iommu_disable_event_buffer(struct amd_iommu *iommu) 705 { 706 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 707 } 708 709 static void __init free_event_buffer(struct amd_iommu *iommu) 710 { 711 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); 712 } 713 714 /* allocates the memory where the IOMMU will log its events to */ 715 static int __init alloc_ppr_log(struct amd_iommu *iommu) 716 { 717 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 718 get_order(PPR_LOG_SIZE)); 719 720 return iommu->ppr_log ? 0 : -ENOMEM; 721 } 722 723 static void iommu_enable_ppr_log(struct amd_iommu *iommu) 724 { 725 u64 entry; 726 727 if (iommu->ppr_log == NULL) 728 return; 729 730 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; 731 732 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, 733 &entry, sizeof(entry)); 734 735 /* set head and tail to zero manually */ 736 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 737 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 738 739 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); 740 iommu_feature_enable(iommu, CONTROL_PPR_EN); 741 } 742 743 static void __init free_ppr_log(struct amd_iommu *iommu) 744 { 745 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); 746 } 747 748 static void free_ga_log(struct amd_iommu *iommu) 749 { 750 #ifdef CONFIG_IRQ_REMAP 751 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE)); 752 free_pages((unsigned long)iommu->ga_log_tail, get_order(8)); 753 #endif 754 } 755 756 static int iommu_ga_log_enable(struct amd_iommu *iommu) 757 { 758 #ifdef CONFIG_IRQ_REMAP 759 u32 status, i; 760 761 if (!iommu->ga_log) 762 return -EINVAL; 763 764 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 765 766 /* Check if already running */ 767 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 768 return 0; 769 770 iommu_feature_enable(iommu, CONTROL_GAINT_EN); 771 iommu_feature_enable(iommu, CONTROL_GALOG_EN); 772 773 for (i = 0; i < LOOP_TIMEOUT; ++i) { 774 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 775 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 776 break; 777 } 778 779 if (i >= LOOP_TIMEOUT) 780 return -EINVAL; 781 #endif /* CONFIG_IRQ_REMAP */ 782 return 0; 783 } 784 785 #ifdef CONFIG_IRQ_REMAP 786 static int iommu_init_ga_log(struct amd_iommu *iommu) 787 { 788 u64 entry; 789 790 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 791 return 0; 792 793 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 794 get_order(GA_LOG_SIZE)); 795 if (!iommu->ga_log) 796 goto err_out; 797 798 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 799 get_order(8)); 800 if (!iommu->ga_log_tail) 801 goto err_out; 802 803 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; 804 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, 805 &entry, sizeof(entry)); 806 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & 807 (BIT_ULL(52)-1)) & ~7ULL; 808 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, 809 &entry, sizeof(entry)); 810 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 811 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); 812 813 return 0; 814 err_out: 815 free_ga_log(iommu); 816 return -EINVAL; 817 } 818 #endif /* CONFIG_IRQ_REMAP */ 819 820 static int iommu_init_ga(struct amd_iommu *iommu) 821 { 822 int ret = 0; 823 824 #ifdef CONFIG_IRQ_REMAP 825 /* Note: We have already checked GASup from IVRS table. 826 * Now, we need to make sure that GAMSup is set. 827 */ 828 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && 829 !iommu_feature(iommu, FEATURE_GAM_VAPIC)) 830 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 831 832 ret = iommu_init_ga_log(iommu); 833 #endif /* CONFIG_IRQ_REMAP */ 834 835 return ret; 836 } 837 838 static int __init alloc_cwwb_sem(struct amd_iommu *iommu) 839 { 840 iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL); 841 842 return iommu->cmd_sem ? 0 : -ENOMEM; 843 } 844 845 static void __init free_cwwb_sem(struct amd_iommu *iommu) 846 { 847 if (iommu->cmd_sem) 848 free_page((unsigned long)iommu->cmd_sem); 849 } 850 851 static void iommu_enable_xt(struct amd_iommu *iommu) 852 { 853 #ifdef CONFIG_IRQ_REMAP 854 /* 855 * XT mode (32-bit APIC destination ID) requires 856 * GA mode (128-bit IRTE support) as a prerequisite. 857 */ 858 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) && 859 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 860 iommu_feature_enable(iommu, CONTROL_XT_EN); 861 #endif /* CONFIG_IRQ_REMAP */ 862 } 863 864 static void iommu_enable_gt(struct amd_iommu *iommu) 865 { 866 if (!iommu_feature(iommu, FEATURE_GT)) 867 return; 868 869 iommu_feature_enable(iommu, CONTROL_GT_EN); 870 } 871 872 /* sets a specific bit in the device table entry. */ 873 static void set_dev_entry_bit(u16 devid, u8 bit) 874 { 875 int i = (bit >> 6) & 0x03; 876 int _bit = bit & 0x3f; 877 878 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); 879 } 880 881 static int get_dev_entry_bit(u16 devid, u8 bit) 882 { 883 int i = (bit >> 6) & 0x03; 884 int _bit = bit & 0x3f; 885 886 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; 887 } 888 889 890 static bool copy_device_table(void) 891 { 892 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0; 893 struct dev_table_entry *old_devtb = NULL; 894 u32 lo, hi, devid, old_devtb_size; 895 phys_addr_t old_devtb_phys; 896 struct amd_iommu *iommu; 897 u16 dom_id, dte_v, irq_v; 898 gfp_t gfp_flag; 899 u64 tmp; 900 901 if (!amd_iommu_pre_enabled) 902 return false; 903 904 pr_warn("Translation is already enabled - trying to copy translation structures\n"); 905 for_each_iommu(iommu) { 906 /* All IOMMUs should use the same device table with the same size */ 907 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); 908 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); 909 entry = (((u64) hi) << 32) + lo; 910 if (last_entry && last_entry != entry) { 911 pr_err("IOMMU:%d should use the same dev table as others!\n", 912 iommu->index); 913 return false; 914 } 915 last_entry = entry; 916 917 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; 918 if (old_devtb_size != dev_table_size) { 919 pr_err("The device table size of IOMMU:%d is not expected!\n", 920 iommu->index); 921 return false; 922 } 923 } 924 925 /* 926 * When SME is enabled in the first kernel, the entry includes the 927 * memory encryption mask(sme_me_mask), we must remove the memory 928 * encryption mask to obtain the true physical address in kdump kernel. 929 */ 930 old_devtb_phys = __sme_clr(entry) & PAGE_MASK; 931 932 if (old_devtb_phys >= 0x100000000ULL) { 933 pr_err("The address of old device table is above 4G, not trustworthy!\n"); 934 return false; 935 } 936 old_devtb = (sme_active() && is_kdump_kernel()) 937 ? (__force void *)ioremap_encrypted(old_devtb_phys, 938 dev_table_size) 939 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); 940 941 if (!old_devtb) 942 return false; 943 944 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32; 945 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, 946 get_order(dev_table_size)); 947 if (old_dev_tbl_cpy == NULL) { 948 pr_err("Failed to allocate memory for copying old device table!\n"); 949 return false; 950 } 951 952 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 953 old_dev_tbl_cpy[devid] = old_devtb[devid]; 954 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; 955 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; 956 957 if (dte_v && dom_id) { 958 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; 959 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; 960 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap); 961 /* If gcr3 table existed, mask it out */ 962 if (old_devtb[devid].data[0] & DTE_FLAG_GV) { 963 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; 964 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; 965 old_dev_tbl_cpy[devid].data[1] &= ~tmp; 966 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A; 967 tmp |= DTE_FLAG_GV; 968 old_dev_tbl_cpy[devid].data[0] &= ~tmp; 969 } 970 } 971 972 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; 973 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; 974 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK; 975 if (irq_v && (int_ctl || int_tab_len)) { 976 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || 977 (int_tab_len != DTE_IRQ_TABLE_LEN)) { 978 pr_err("Wrong old irq remapping flag: %#x\n", devid); 979 return false; 980 } 981 982 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; 983 } 984 } 985 memunmap(old_devtb); 986 987 return true; 988 } 989 990 void amd_iommu_apply_erratum_63(u16 devid) 991 { 992 int sysmgt; 993 994 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | 995 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); 996 997 if (sysmgt == 0x01) 998 set_dev_entry_bit(devid, DEV_ENTRY_IW); 999 } 1000 1001 /* Writes the specific IOMMU for a device into the rlookup table */ 1002 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) 1003 { 1004 amd_iommu_rlookup_table[devid] = iommu; 1005 } 1006 1007 /* 1008 * This function takes the device specific flags read from the ACPI 1009 * table and sets up the device table entry with that information 1010 */ 1011 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, 1012 u16 devid, u32 flags, u32 ext_flags) 1013 { 1014 if (flags & ACPI_DEVFLAG_INITPASS) 1015 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); 1016 if (flags & ACPI_DEVFLAG_EXTINT) 1017 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); 1018 if (flags & ACPI_DEVFLAG_NMI) 1019 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); 1020 if (flags & ACPI_DEVFLAG_SYSMGT1) 1021 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); 1022 if (flags & ACPI_DEVFLAG_SYSMGT2) 1023 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); 1024 if (flags & ACPI_DEVFLAG_LINT0) 1025 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); 1026 if (flags & ACPI_DEVFLAG_LINT1) 1027 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); 1028 1029 amd_iommu_apply_erratum_63(devid); 1030 1031 set_iommu_for_device(iommu, devid); 1032 } 1033 1034 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) 1035 { 1036 struct devid_map *entry; 1037 struct list_head *list; 1038 1039 if (type == IVHD_SPECIAL_IOAPIC) 1040 list = &ioapic_map; 1041 else if (type == IVHD_SPECIAL_HPET) 1042 list = &hpet_map; 1043 else 1044 return -EINVAL; 1045 1046 list_for_each_entry(entry, list, list) { 1047 if (!(entry->id == id && entry->cmd_line)) 1048 continue; 1049 1050 pr_info("Command-line override present for %s id %d - ignoring\n", 1051 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); 1052 1053 *devid = entry->devid; 1054 1055 return 0; 1056 } 1057 1058 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1059 if (!entry) 1060 return -ENOMEM; 1061 1062 entry->id = id; 1063 entry->devid = *devid; 1064 entry->cmd_line = cmd_line; 1065 1066 list_add_tail(&entry->list, list); 1067 1068 return 0; 1069 } 1070 1071 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid, 1072 bool cmd_line) 1073 { 1074 struct acpihid_map_entry *entry; 1075 struct list_head *list = &acpihid_map; 1076 1077 list_for_each_entry(entry, list, list) { 1078 if (strcmp(entry->hid, hid) || 1079 (*uid && *entry->uid && strcmp(entry->uid, uid)) || 1080 !entry->cmd_line) 1081 continue; 1082 1083 pr_info("Command-line override for hid:%s uid:%s\n", 1084 hid, uid); 1085 *devid = entry->devid; 1086 return 0; 1087 } 1088 1089 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1090 if (!entry) 1091 return -ENOMEM; 1092 1093 memcpy(entry->uid, uid, strlen(uid)); 1094 memcpy(entry->hid, hid, strlen(hid)); 1095 entry->devid = *devid; 1096 entry->cmd_line = cmd_line; 1097 entry->root_devid = (entry->devid & (~0x7)); 1098 1099 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n", 1100 entry->cmd_line ? "cmd" : "ivrs", 1101 entry->hid, entry->uid, entry->root_devid); 1102 1103 list_add_tail(&entry->list, list); 1104 return 0; 1105 } 1106 1107 static int __init add_early_maps(void) 1108 { 1109 int i, ret; 1110 1111 for (i = 0; i < early_ioapic_map_size; ++i) { 1112 ret = add_special_device(IVHD_SPECIAL_IOAPIC, 1113 early_ioapic_map[i].id, 1114 &early_ioapic_map[i].devid, 1115 early_ioapic_map[i].cmd_line); 1116 if (ret) 1117 return ret; 1118 } 1119 1120 for (i = 0; i < early_hpet_map_size; ++i) { 1121 ret = add_special_device(IVHD_SPECIAL_HPET, 1122 early_hpet_map[i].id, 1123 &early_hpet_map[i].devid, 1124 early_hpet_map[i].cmd_line); 1125 if (ret) 1126 return ret; 1127 } 1128 1129 for (i = 0; i < early_acpihid_map_size; ++i) { 1130 ret = add_acpi_hid_device(early_acpihid_map[i].hid, 1131 early_acpihid_map[i].uid, 1132 &early_acpihid_map[i].devid, 1133 early_acpihid_map[i].cmd_line); 1134 if (ret) 1135 return ret; 1136 } 1137 1138 return 0; 1139 } 1140 1141 /* 1142 * Takes a pointer to an AMD IOMMU entry in the ACPI table and 1143 * initializes the hardware and our data structures with it. 1144 */ 1145 static int __init init_iommu_from_acpi(struct amd_iommu *iommu, 1146 struct ivhd_header *h) 1147 { 1148 u8 *p = (u8 *)h; 1149 u8 *end = p, flags = 0; 1150 u16 devid = 0, devid_start = 0, devid_to = 0; 1151 u32 dev_i, ext_flags = 0; 1152 bool alias = false; 1153 struct ivhd_entry *e; 1154 u32 ivhd_size; 1155 int ret; 1156 1157 1158 ret = add_early_maps(); 1159 if (ret) 1160 return ret; 1161 1162 amd_iommu_apply_ivrs_quirks(); 1163 1164 /* 1165 * First save the recommended feature enable bits from ACPI 1166 */ 1167 iommu->acpi_flags = h->flags; 1168 1169 /* 1170 * Done. Now parse the device entries 1171 */ 1172 ivhd_size = get_ivhd_header_size(h); 1173 if (!ivhd_size) { 1174 pr_err("Unsupported IVHD type %#x\n", h->type); 1175 return -EINVAL; 1176 } 1177 1178 p += ivhd_size; 1179 1180 end += h->length; 1181 1182 1183 while (p < end) { 1184 e = (struct ivhd_entry *)p; 1185 switch (e->type) { 1186 case IVHD_DEV_ALL: 1187 1188 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags); 1189 1190 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i) 1191 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); 1192 break; 1193 case IVHD_DEV_SELECT: 1194 1195 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " 1196 "flags: %02x\n", 1197 PCI_BUS_NUM(e->devid), 1198 PCI_SLOT(e->devid), 1199 PCI_FUNC(e->devid), 1200 e->flags); 1201 1202 devid = e->devid; 1203 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1204 break; 1205 case IVHD_DEV_SELECT_RANGE_START: 1206 1207 DUMP_printk(" DEV_SELECT_RANGE_START\t " 1208 "devid: %02x:%02x.%x flags: %02x\n", 1209 PCI_BUS_NUM(e->devid), 1210 PCI_SLOT(e->devid), 1211 PCI_FUNC(e->devid), 1212 e->flags); 1213 1214 devid_start = e->devid; 1215 flags = e->flags; 1216 ext_flags = 0; 1217 alias = false; 1218 break; 1219 case IVHD_DEV_ALIAS: 1220 1221 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " 1222 "flags: %02x devid_to: %02x:%02x.%x\n", 1223 PCI_BUS_NUM(e->devid), 1224 PCI_SLOT(e->devid), 1225 PCI_FUNC(e->devid), 1226 e->flags, 1227 PCI_BUS_NUM(e->ext >> 8), 1228 PCI_SLOT(e->ext >> 8), 1229 PCI_FUNC(e->ext >> 8)); 1230 1231 devid = e->devid; 1232 devid_to = e->ext >> 8; 1233 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); 1234 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); 1235 amd_iommu_alias_table[devid] = devid_to; 1236 break; 1237 case IVHD_DEV_ALIAS_RANGE: 1238 1239 DUMP_printk(" DEV_ALIAS_RANGE\t\t " 1240 "devid: %02x:%02x.%x flags: %02x " 1241 "devid_to: %02x:%02x.%x\n", 1242 PCI_BUS_NUM(e->devid), 1243 PCI_SLOT(e->devid), 1244 PCI_FUNC(e->devid), 1245 e->flags, 1246 PCI_BUS_NUM(e->ext >> 8), 1247 PCI_SLOT(e->ext >> 8), 1248 PCI_FUNC(e->ext >> 8)); 1249 1250 devid_start = e->devid; 1251 flags = e->flags; 1252 devid_to = e->ext >> 8; 1253 ext_flags = 0; 1254 alias = true; 1255 break; 1256 case IVHD_DEV_EXT_SELECT: 1257 1258 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " 1259 "flags: %02x ext: %08x\n", 1260 PCI_BUS_NUM(e->devid), 1261 PCI_SLOT(e->devid), 1262 PCI_FUNC(e->devid), 1263 e->flags, e->ext); 1264 1265 devid = e->devid; 1266 set_dev_entry_from_acpi(iommu, devid, e->flags, 1267 e->ext); 1268 break; 1269 case IVHD_DEV_EXT_SELECT_RANGE: 1270 1271 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " 1272 "%02x:%02x.%x flags: %02x ext: %08x\n", 1273 PCI_BUS_NUM(e->devid), 1274 PCI_SLOT(e->devid), 1275 PCI_FUNC(e->devid), 1276 e->flags, e->ext); 1277 1278 devid_start = e->devid; 1279 flags = e->flags; 1280 ext_flags = e->ext; 1281 alias = false; 1282 break; 1283 case IVHD_DEV_RANGE_END: 1284 1285 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", 1286 PCI_BUS_NUM(e->devid), 1287 PCI_SLOT(e->devid), 1288 PCI_FUNC(e->devid)); 1289 1290 devid = e->devid; 1291 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { 1292 if (alias) { 1293 amd_iommu_alias_table[dev_i] = devid_to; 1294 set_dev_entry_from_acpi(iommu, 1295 devid_to, flags, ext_flags); 1296 } 1297 set_dev_entry_from_acpi(iommu, dev_i, 1298 flags, ext_flags); 1299 } 1300 break; 1301 case IVHD_DEV_SPECIAL: { 1302 u8 handle, type; 1303 const char *var; 1304 u16 devid; 1305 int ret; 1306 1307 handle = e->ext & 0xff; 1308 devid = (e->ext >> 8) & 0xffff; 1309 type = (e->ext >> 24) & 0xff; 1310 1311 if (type == IVHD_SPECIAL_IOAPIC) 1312 var = "IOAPIC"; 1313 else if (type == IVHD_SPECIAL_HPET) 1314 var = "HPET"; 1315 else 1316 var = "UNKNOWN"; 1317 1318 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n", 1319 var, (int)handle, 1320 PCI_BUS_NUM(devid), 1321 PCI_SLOT(devid), 1322 PCI_FUNC(devid)); 1323 1324 ret = add_special_device(type, handle, &devid, false); 1325 if (ret) 1326 return ret; 1327 1328 /* 1329 * add_special_device might update the devid in case a 1330 * command-line override is present. So call 1331 * set_dev_entry_from_acpi after add_special_device. 1332 */ 1333 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1334 1335 break; 1336 } 1337 case IVHD_DEV_ACPI_HID: { 1338 u16 devid; 1339 u8 hid[ACPIHID_HID_LEN]; 1340 u8 uid[ACPIHID_UID_LEN]; 1341 int ret; 1342 1343 if (h->type != 0x40) { 1344 pr_err(FW_BUG "Invalid IVHD device type %#x\n", 1345 e->type); 1346 break; 1347 } 1348 1349 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1); 1350 hid[ACPIHID_HID_LEN - 1] = '\0'; 1351 1352 if (!(*hid)) { 1353 pr_err(FW_BUG "Invalid HID.\n"); 1354 break; 1355 } 1356 1357 uid[0] = '\0'; 1358 switch (e->uidf) { 1359 case UID_NOT_PRESENT: 1360 1361 if (e->uidl != 0) 1362 pr_warn(FW_BUG "Invalid UID length.\n"); 1363 1364 break; 1365 case UID_IS_INTEGER: 1366 1367 sprintf(uid, "%d", e->uid); 1368 1369 break; 1370 case UID_IS_CHARACTER: 1371 1372 memcpy(uid, &e->uid, e->uidl); 1373 uid[e->uidl] = '\0'; 1374 1375 break; 1376 default: 1377 break; 1378 } 1379 1380 devid = e->devid; 1381 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n", 1382 hid, uid, 1383 PCI_BUS_NUM(devid), 1384 PCI_SLOT(devid), 1385 PCI_FUNC(devid)); 1386 1387 flags = e->flags; 1388 1389 ret = add_acpi_hid_device(hid, uid, &devid, false); 1390 if (ret) 1391 return ret; 1392 1393 /* 1394 * add_special_device might update the devid in case a 1395 * command-line override is present. So call 1396 * set_dev_entry_from_acpi after add_special_device. 1397 */ 1398 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1399 1400 break; 1401 } 1402 default: 1403 break; 1404 } 1405 1406 p += ivhd_entry_length(p); 1407 } 1408 1409 return 0; 1410 } 1411 1412 static void __init free_iommu_one(struct amd_iommu *iommu) 1413 { 1414 free_cwwb_sem(iommu); 1415 free_command_buffer(iommu); 1416 free_event_buffer(iommu); 1417 free_ppr_log(iommu); 1418 free_ga_log(iommu); 1419 iommu_unmap_mmio_space(iommu); 1420 } 1421 1422 static void __init free_iommu_all(void) 1423 { 1424 struct amd_iommu *iommu, *next; 1425 1426 for_each_iommu_safe(iommu, next) { 1427 list_del(&iommu->list); 1428 free_iommu_one(iommu); 1429 kfree(iommu); 1430 } 1431 } 1432 1433 /* 1434 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) 1435 * Workaround: 1436 * BIOS should disable L2B micellaneous clock gating by setting 1437 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b 1438 */ 1439 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) 1440 { 1441 u32 value; 1442 1443 if ((boot_cpu_data.x86 != 0x15) || 1444 (boot_cpu_data.x86_model < 0x10) || 1445 (boot_cpu_data.x86_model > 0x1f)) 1446 return; 1447 1448 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1449 pci_read_config_dword(iommu->dev, 0xf4, &value); 1450 1451 if (value & BIT(2)) 1452 return; 1453 1454 /* Select NB indirect register 0x90 and enable writing */ 1455 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); 1456 1457 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); 1458 pci_info(iommu->dev, "Applying erratum 746 workaround\n"); 1459 1460 /* Clear the enable writing bit */ 1461 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1462 } 1463 1464 /* 1465 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission) 1466 * Workaround: 1467 * BIOS should enable ATS write permission check by setting 1468 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b 1469 */ 1470 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) 1471 { 1472 u32 value; 1473 1474 if ((boot_cpu_data.x86 != 0x15) || 1475 (boot_cpu_data.x86_model < 0x30) || 1476 (boot_cpu_data.x86_model > 0x3f)) 1477 return; 1478 1479 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */ 1480 value = iommu_read_l2(iommu, 0x47); 1481 1482 if (value & BIT(0)) 1483 return; 1484 1485 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */ 1486 iommu_write_l2(iommu, 0x47, value | BIT(0)); 1487 1488 pci_info(iommu->dev, "Applying ATS write check workaround\n"); 1489 } 1490 1491 /* 1492 * This function clues the initialization function for one IOMMU 1493 * together and also allocates the command buffer and programs the 1494 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1495 */ 1496 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) 1497 { 1498 int ret; 1499 1500 raw_spin_lock_init(&iommu->lock); 1501 iommu->cmd_sem_val = 0; 1502 1503 /* Add IOMMU to internal data structures */ 1504 list_add_tail(&iommu->list, &amd_iommu_list); 1505 iommu->index = amd_iommus_present++; 1506 1507 if (unlikely(iommu->index >= MAX_IOMMUS)) { 1508 WARN(1, "System has more IOMMUs than supported by this driver\n"); 1509 return -ENOSYS; 1510 } 1511 1512 /* Index is fine - add IOMMU to the array */ 1513 amd_iommus[iommu->index] = iommu; 1514 1515 /* 1516 * Copy data from ACPI table entry to the iommu struct 1517 */ 1518 iommu->devid = h->devid; 1519 iommu->cap_ptr = h->cap_ptr; 1520 iommu->pci_seg = h->pci_seg; 1521 iommu->mmio_phys = h->mmio_phys; 1522 1523 switch (h->type) { 1524 case 0x10: 1525 /* Check if IVHD EFR contains proper max banks/counters */ 1526 if ((h->efr_attr != 0) && 1527 ((h->efr_attr & (0xF << 13)) != 0) && 1528 ((h->efr_attr & (0x3F << 17)) != 0)) 1529 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1530 else 1531 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1532 1533 /* 1534 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. 1535 * GAM also requires GA mode. Therefore, we need to 1536 * check cmpxchg16b support before enabling it. 1537 */ 1538 if (!boot_cpu_has(X86_FEATURE_CX16) || 1539 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) 1540 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1541 break; 1542 case 0x11: 1543 case 0x40: 1544 if (h->efr_reg & (1 << 9)) 1545 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1546 else 1547 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1548 1549 /* 1550 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. 1551 * XT, GAM also requires GA mode. Therefore, we need to 1552 * check cmpxchg16b support before enabling them. 1553 */ 1554 if (!boot_cpu_has(X86_FEATURE_CX16) || 1555 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { 1556 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1557 break; 1558 } 1559 1560 /* 1561 * Note: Since iommu_update_intcapxt() leverages 1562 * the IOMMU MMIO access to MSI capability block registers 1563 * for MSI address lo/hi/data, we need to check both 1564 * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support. 1565 */ 1566 if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) && 1567 (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT))) 1568 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; 1569 break; 1570 default: 1571 return -EINVAL; 1572 } 1573 1574 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, 1575 iommu->mmio_phys_end); 1576 if (!iommu->mmio_base) 1577 return -ENOMEM; 1578 1579 if (alloc_cwwb_sem(iommu)) 1580 return -ENOMEM; 1581 1582 if (alloc_command_buffer(iommu)) 1583 return -ENOMEM; 1584 1585 if (alloc_event_buffer(iommu)) 1586 return -ENOMEM; 1587 1588 iommu->int_enabled = false; 1589 1590 init_translation_status(iommu); 1591 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { 1592 iommu_disable(iommu); 1593 clear_translation_pre_enabled(iommu); 1594 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", 1595 iommu->index); 1596 } 1597 if (amd_iommu_pre_enabled) 1598 amd_iommu_pre_enabled = translation_pre_enabled(iommu); 1599 1600 ret = init_iommu_from_acpi(iommu, h); 1601 if (ret) 1602 return ret; 1603 1604 ret = amd_iommu_create_irq_domain(iommu); 1605 if (ret) 1606 return ret; 1607 1608 /* 1609 * Make sure IOMMU is not considered to translate itself. The IVRS 1610 * table tells us so, but this is a lie! 1611 */ 1612 amd_iommu_rlookup_table[iommu->devid] = NULL; 1613 1614 return 0; 1615 } 1616 1617 /** 1618 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type 1619 * @ivrs: Pointer to the IVRS header 1620 * 1621 * This function search through all IVDB of the maximum supported IVHD 1622 */ 1623 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs) 1624 { 1625 u8 *base = (u8 *)ivrs; 1626 struct ivhd_header *ivhd = (struct ivhd_header *) 1627 (base + IVRS_HEADER_LENGTH); 1628 u8 last_type = ivhd->type; 1629 u16 devid = ivhd->devid; 1630 1631 while (((u8 *)ivhd - base < ivrs->length) && 1632 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { 1633 u8 *p = (u8 *) ivhd; 1634 1635 if (ivhd->devid == devid) 1636 last_type = ivhd->type; 1637 ivhd = (struct ivhd_header *)(p + ivhd->length); 1638 } 1639 1640 return last_type; 1641 } 1642 1643 /* 1644 * Iterates over all IOMMU entries in the ACPI table, allocates the 1645 * IOMMU structure and initializes it with init_iommu_one() 1646 */ 1647 static int __init init_iommu_all(struct acpi_table_header *table) 1648 { 1649 u8 *p = (u8 *)table, *end = (u8 *)table; 1650 struct ivhd_header *h; 1651 struct amd_iommu *iommu; 1652 int ret; 1653 1654 end += table->length; 1655 p += IVRS_HEADER_LENGTH; 1656 1657 while (p < end) { 1658 h = (struct ivhd_header *)p; 1659 if (*p == amd_iommu_target_ivhd_type) { 1660 1661 DUMP_printk("device: %02x:%02x.%01x cap: %04x " 1662 "seg: %d flags: %01x info %04x\n", 1663 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), 1664 PCI_FUNC(h->devid), h->cap_ptr, 1665 h->pci_seg, h->flags, h->info); 1666 DUMP_printk(" mmio-addr: %016llx\n", 1667 h->mmio_phys); 1668 1669 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 1670 if (iommu == NULL) 1671 return -ENOMEM; 1672 1673 ret = init_iommu_one(iommu, h); 1674 if (ret) 1675 return ret; 1676 } 1677 p += h->length; 1678 1679 } 1680 WARN_ON(p != end); 1681 1682 return 0; 1683 } 1684 1685 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 1686 u8 fxn, u64 *value, bool is_write); 1687 1688 static void init_iommu_perf_ctr(struct amd_iommu *iommu) 1689 { 1690 struct pci_dev *pdev = iommu->dev; 1691 u64 val = 0xabcd, val2 = 0, save_reg = 0; 1692 1693 if (!iommu_feature(iommu, FEATURE_PC)) 1694 return; 1695 1696 amd_iommu_pc_present = true; 1697 1698 /* save the value to restore, if writable */ 1699 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false)) 1700 goto pc_false; 1701 1702 /* Check if the performance counters can be written to */ 1703 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || 1704 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || 1705 (val != val2)) 1706 goto pc_false; 1707 1708 /* restore */ 1709 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true)) 1710 goto pc_false; 1711 1712 pci_info(pdev, "IOMMU performance counters supported\n"); 1713 1714 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); 1715 iommu->max_banks = (u8) ((val >> 12) & 0x3f); 1716 iommu->max_counters = (u8) ((val >> 7) & 0xf); 1717 1718 return; 1719 1720 pc_false: 1721 pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n"); 1722 amd_iommu_pc_present = false; 1723 return; 1724 } 1725 1726 static ssize_t amd_iommu_show_cap(struct device *dev, 1727 struct device_attribute *attr, 1728 char *buf) 1729 { 1730 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1731 return sprintf(buf, "%x\n", iommu->cap); 1732 } 1733 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); 1734 1735 static ssize_t amd_iommu_show_features(struct device *dev, 1736 struct device_attribute *attr, 1737 char *buf) 1738 { 1739 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1740 return sprintf(buf, "%llx\n", iommu->features); 1741 } 1742 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); 1743 1744 static struct attribute *amd_iommu_attrs[] = { 1745 &dev_attr_cap.attr, 1746 &dev_attr_features.attr, 1747 NULL, 1748 }; 1749 1750 static struct attribute_group amd_iommu_group = { 1751 .name = "amd-iommu", 1752 .attrs = amd_iommu_attrs, 1753 }; 1754 1755 static const struct attribute_group *amd_iommu_groups[] = { 1756 &amd_iommu_group, 1757 NULL, 1758 }; 1759 1760 static int __init iommu_init_pci(struct amd_iommu *iommu) 1761 { 1762 int cap_ptr = iommu->cap_ptr; 1763 int ret; 1764 1765 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid), 1766 iommu->devid & 0xff); 1767 if (!iommu->dev) 1768 return -ENODEV; 1769 1770 /* Prevent binding other PCI device drivers to IOMMU devices */ 1771 iommu->dev->match_driver = false; 1772 1773 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 1774 &iommu->cap); 1775 1776 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) 1777 amd_iommu_iotlb_sup = false; 1778 1779 /* read extended feature bits */ 1780 iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); 1781 1782 if (iommu_feature(iommu, FEATURE_GT)) { 1783 int glxval; 1784 u32 max_pasid; 1785 u64 pasmax; 1786 1787 pasmax = iommu->features & FEATURE_PASID_MASK; 1788 pasmax >>= FEATURE_PASID_SHIFT; 1789 max_pasid = (1 << (pasmax + 1)) - 1; 1790 1791 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); 1792 1793 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); 1794 1795 glxval = iommu->features & FEATURE_GLXVAL_MASK; 1796 glxval >>= FEATURE_GLXVAL_SHIFT; 1797 1798 if (amd_iommu_max_glx_val == -1) 1799 amd_iommu_max_glx_val = glxval; 1800 else 1801 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); 1802 } 1803 1804 if (iommu_feature(iommu, FEATURE_GT) && 1805 iommu_feature(iommu, FEATURE_PPR)) { 1806 iommu->is_iommu_v2 = true; 1807 amd_iommu_v2_present = true; 1808 } 1809 1810 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) 1811 return -ENOMEM; 1812 1813 ret = iommu_init_ga(iommu); 1814 if (ret) 1815 return ret; 1816 1817 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) 1818 amd_iommu_np_cache = true; 1819 1820 init_iommu_perf_ctr(iommu); 1821 1822 if (is_rd890_iommu(iommu->dev)) { 1823 int i, j; 1824 1825 iommu->root_pdev = 1826 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number, 1827 PCI_DEVFN(0, 0)); 1828 1829 /* 1830 * Some rd890 systems may not be fully reconfigured by the 1831 * BIOS, so it's necessary for us to store this information so 1832 * it can be reprogrammed on resume 1833 */ 1834 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, 1835 &iommu->stored_addr_lo); 1836 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, 1837 &iommu->stored_addr_hi); 1838 1839 /* Low bit locks writes to configuration space */ 1840 iommu->stored_addr_lo &= ~1; 1841 1842 for (i = 0; i < 6; i++) 1843 for (j = 0; j < 0x12; j++) 1844 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); 1845 1846 for (i = 0; i < 0x83; i++) 1847 iommu->stored_l2[i] = iommu_read_l2(iommu, i); 1848 } 1849 1850 amd_iommu_erratum_746_workaround(iommu); 1851 amd_iommu_ats_write_check_workaround(iommu); 1852 1853 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, 1854 amd_iommu_groups, "ivhd%d", iommu->index); 1855 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops); 1856 iommu_device_register(&iommu->iommu); 1857 1858 return pci_enable_device(iommu->dev); 1859 } 1860 1861 static void print_iommu_info(void) 1862 { 1863 static const char * const feat_str[] = { 1864 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", 1865 "IA", "GA", "HE", "PC" 1866 }; 1867 struct amd_iommu *iommu; 1868 1869 for_each_iommu(iommu) { 1870 struct pci_dev *pdev = iommu->dev; 1871 int i; 1872 1873 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr); 1874 1875 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { 1876 pci_info(pdev, "Extended features (%#llx):", 1877 iommu->features); 1878 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { 1879 if (iommu_feature(iommu, (1ULL << i))) 1880 pr_cont(" %s", feat_str[i]); 1881 } 1882 1883 if (iommu->features & FEATURE_GAM_VAPIC) 1884 pr_cont(" GA_vAPIC"); 1885 1886 pr_cont("\n"); 1887 } 1888 } 1889 if (irq_remapping_enabled) { 1890 pr_info("Interrupt remapping enabled\n"); 1891 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 1892 pr_info("Virtual APIC enabled\n"); 1893 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 1894 pr_info("X2APIC enabled\n"); 1895 } 1896 } 1897 1898 static int __init amd_iommu_init_pci(void) 1899 { 1900 struct amd_iommu *iommu; 1901 int ret = 0; 1902 1903 for_each_iommu(iommu) { 1904 ret = iommu_init_pci(iommu); 1905 if (ret) 1906 break; 1907 1908 /* Need to setup range after PCI init */ 1909 iommu_set_cwwb_range(iommu); 1910 } 1911 1912 /* 1913 * Order is important here to make sure any unity map requirements are 1914 * fulfilled. The unity mappings are created and written to the device 1915 * table during the amd_iommu_init_api() call. 1916 * 1917 * After that we call init_device_table_dma() to make sure any 1918 * uninitialized DTE will block DMA, and in the end we flush the caches 1919 * of all IOMMUs to make sure the changes to the device table are 1920 * active. 1921 */ 1922 ret = amd_iommu_init_api(); 1923 1924 init_device_table_dma(); 1925 1926 for_each_iommu(iommu) 1927 iommu_flush_all_caches(iommu); 1928 1929 if (!ret) 1930 print_iommu_info(); 1931 1932 return ret; 1933 } 1934 1935 /**************************************************************************** 1936 * 1937 * The following functions initialize the MSI interrupts for all IOMMUs 1938 * in the system. It's a bit challenging because there could be multiple 1939 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per 1940 * pci_dev. 1941 * 1942 ****************************************************************************/ 1943 1944 static int iommu_setup_msi(struct amd_iommu *iommu) 1945 { 1946 int r; 1947 1948 r = pci_enable_msi(iommu->dev); 1949 if (r) 1950 return r; 1951 1952 r = request_threaded_irq(iommu->dev->irq, 1953 amd_iommu_int_handler, 1954 amd_iommu_int_thread, 1955 0, "AMD-Vi", 1956 iommu); 1957 1958 if (r) { 1959 pci_disable_msi(iommu->dev); 1960 return r; 1961 } 1962 1963 iommu->int_enabled = true; 1964 1965 return 0; 1966 } 1967 1968 union intcapxt { 1969 u64 capxt; 1970 u64 reserved_0 : 2, 1971 dest_mode_logical : 1, 1972 reserved_1 : 5, 1973 destid_0_23 : 24, 1974 vector : 8, 1975 reserved_2 : 16, 1976 destid_24_31 : 8; 1977 } __attribute__ ((packed)); 1978 1979 /* 1980 * Setup the IntCapXT registers with interrupt routing information 1981 * based on the PCI MSI capability block registers, accessed via 1982 * MMIO MSI address low/hi and MSI data registers. 1983 */ 1984 static void iommu_update_intcapxt(struct amd_iommu *iommu) 1985 { 1986 struct msi_msg msg; 1987 union intcapxt xt; 1988 u32 destid; 1989 1990 msg.address_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET); 1991 msg.address_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET); 1992 msg.data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET); 1993 1994 destid = x86_msi_msg_get_destid(&msg, x2apic_enabled()); 1995 1996 xt.capxt = 0ULL; 1997 xt.dest_mode_logical = msg.arch_data.dest_mode_logical; 1998 xt.vector = msg.arch_data.vector; 1999 xt.destid_0_23 = destid & GENMASK(23, 0); 2000 xt.destid_24_31 = destid >> 24; 2001 2002 /** 2003 * Current IOMMU implemtation uses the same IRQ for all 2004 * 3 IOMMU interrupts. 2005 */ 2006 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); 2007 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); 2008 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); 2009 } 2010 2011 static void _irq_notifier_notify(struct irq_affinity_notify *notify, 2012 const cpumask_t *mask) 2013 { 2014 struct amd_iommu *iommu; 2015 2016 for_each_iommu(iommu) { 2017 if (iommu->dev->irq == notify->irq) { 2018 iommu_update_intcapxt(iommu); 2019 break; 2020 } 2021 } 2022 } 2023 2024 static void _irq_notifier_release(struct kref *ref) 2025 { 2026 } 2027 2028 static int iommu_init_intcapxt(struct amd_iommu *iommu) 2029 { 2030 int ret; 2031 struct irq_affinity_notify *notify = &iommu->intcapxt_notify; 2032 2033 /** 2034 * IntCapXT requires XTSup=1 and MsiCapMmioSup=1, 2035 * which can be inferred from amd_iommu_xt_mode. 2036 */ 2037 if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE) 2038 return 0; 2039 2040 /** 2041 * Also, we need to setup notifier to update the IntCapXT registers 2042 * whenever the irq affinity is changed from user-space. 2043 */ 2044 notify->irq = iommu->dev->irq; 2045 notify->notify = _irq_notifier_notify, 2046 notify->release = _irq_notifier_release, 2047 ret = irq_set_affinity_notifier(iommu->dev->irq, notify); 2048 if (ret) { 2049 pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n", 2050 iommu->devid, iommu->dev->irq); 2051 return ret; 2052 } 2053 2054 iommu_update_intcapxt(iommu); 2055 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); 2056 return ret; 2057 } 2058 2059 static int iommu_init_msi(struct amd_iommu *iommu) 2060 { 2061 int ret; 2062 2063 if (iommu->int_enabled) 2064 goto enable_faults; 2065 2066 if (iommu->dev->msi_cap) 2067 ret = iommu_setup_msi(iommu); 2068 else 2069 ret = -ENODEV; 2070 2071 if (ret) 2072 return ret; 2073 2074 enable_faults: 2075 ret = iommu_init_intcapxt(iommu); 2076 if (ret) 2077 return ret; 2078 2079 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 2080 2081 if (iommu->ppr_log != NULL) 2082 iommu_feature_enable(iommu, CONTROL_PPRINT_EN); 2083 2084 iommu_ga_log_enable(iommu); 2085 2086 return 0; 2087 } 2088 2089 /**************************************************************************** 2090 * 2091 * The next functions belong to the third pass of parsing the ACPI 2092 * table. In this last pass the memory mapping requirements are 2093 * gathered (like exclusion and unity mapping ranges). 2094 * 2095 ****************************************************************************/ 2096 2097 static void __init free_unity_maps(void) 2098 { 2099 struct unity_map_entry *entry, *next; 2100 2101 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { 2102 list_del(&entry->list); 2103 kfree(entry); 2104 } 2105 } 2106 2107 /* called for unity map ACPI definition */ 2108 static int __init init_unity_map_range(struct ivmd_header *m) 2109 { 2110 struct unity_map_entry *e = NULL; 2111 char *s; 2112 2113 e = kzalloc(sizeof(*e), GFP_KERNEL); 2114 if (e == NULL) 2115 return -ENOMEM; 2116 2117 switch (m->type) { 2118 default: 2119 kfree(e); 2120 return 0; 2121 case ACPI_IVMD_TYPE: 2122 s = "IVMD_TYPEi\t\t\t"; 2123 e->devid_start = e->devid_end = m->devid; 2124 break; 2125 case ACPI_IVMD_TYPE_ALL: 2126 s = "IVMD_TYPE_ALL\t\t"; 2127 e->devid_start = 0; 2128 e->devid_end = amd_iommu_last_bdf; 2129 break; 2130 case ACPI_IVMD_TYPE_RANGE: 2131 s = "IVMD_TYPE_RANGE\t\t"; 2132 e->devid_start = m->devid; 2133 e->devid_end = m->aux; 2134 break; 2135 } 2136 e->address_start = PAGE_ALIGN(m->range_start); 2137 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); 2138 e->prot = m->flags >> 1; 2139 2140 /* 2141 * Treat per-device exclusion ranges as r/w unity-mapped regions 2142 * since some buggy BIOSes might lead to the overwritten exclusion 2143 * range (exclusion_start and exclusion_length members). This 2144 * happens when there are multiple exclusion ranges (IVMD entries) 2145 * defined in ACPI table. 2146 */ 2147 if (m->flags & IVMD_FLAG_EXCL_RANGE) 2148 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1; 2149 2150 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" 2151 " range_start: %016llx range_end: %016llx flags: %x\n", s, 2152 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), 2153 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end), 2154 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), 2155 e->address_start, e->address_end, m->flags); 2156 2157 list_add_tail(&e->list, &amd_iommu_unity_map); 2158 2159 return 0; 2160 } 2161 2162 /* iterates over all memory definitions we find in the ACPI table */ 2163 static int __init init_memory_definitions(struct acpi_table_header *table) 2164 { 2165 u8 *p = (u8 *)table, *end = (u8 *)table; 2166 struct ivmd_header *m; 2167 2168 end += table->length; 2169 p += IVRS_HEADER_LENGTH; 2170 2171 while (p < end) { 2172 m = (struct ivmd_header *)p; 2173 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) 2174 init_unity_map_range(m); 2175 2176 p += m->length; 2177 } 2178 2179 return 0; 2180 } 2181 2182 /* 2183 * Init the device table to not allow DMA access for devices 2184 */ 2185 static void init_device_table_dma(void) 2186 { 2187 u32 devid; 2188 2189 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 2190 set_dev_entry_bit(devid, DEV_ENTRY_VALID); 2191 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); 2192 } 2193 } 2194 2195 static void __init uninit_device_table_dma(void) 2196 { 2197 u32 devid; 2198 2199 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 2200 amd_iommu_dev_table[devid].data[0] = 0ULL; 2201 amd_iommu_dev_table[devid].data[1] = 0ULL; 2202 } 2203 } 2204 2205 static void init_device_table(void) 2206 { 2207 u32 devid; 2208 2209 if (!amd_iommu_irq_remap) 2210 return; 2211 2212 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) 2213 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); 2214 } 2215 2216 static void iommu_init_flags(struct amd_iommu *iommu) 2217 { 2218 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 2219 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 2220 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 2221 2222 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? 2223 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 2224 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 2225 2226 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 2227 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 2228 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 2229 2230 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? 2231 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 2232 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 2233 2234 /* 2235 * make IOMMU memory accesses cache coherent 2236 */ 2237 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 2238 2239 /* Set IOTLB invalidation timeout to 1s */ 2240 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); 2241 } 2242 2243 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) 2244 { 2245 int i, j; 2246 u32 ioc_feature_control; 2247 struct pci_dev *pdev = iommu->root_pdev; 2248 2249 /* RD890 BIOSes may not have completely reconfigured the iommu */ 2250 if (!is_rd890_iommu(iommu->dev) || !pdev) 2251 return; 2252 2253 /* 2254 * First, we need to ensure that the iommu is enabled. This is 2255 * controlled by a register in the northbridge 2256 */ 2257 2258 /* Select Northbridge indirect register 0x75 and enable writing */ 2259 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); 2260 pci_read_config_dword(pdev, 0x64, &ioc_feature_control); 2261 2262 /* Enable the iommu */ 2263 if (!(ioc_feature_control & 0x1)) 2264 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); 2265 2266 /* Restore the iommu BAR */ 2267 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2268 iommu->stored_addr_lo); 2269 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, 2270 iommu->stored_addr_hi); 2271 2272 /* Restore the l1 indirect regs for each of the 6 l1s */ 2273 for (i = 0; i < 6; i++) 2274 for (j = 0; j < 0x12; j++) 2275 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); 2276 2277 /* Restore the l2 indirect regs */ 2278 for (i = 0; i < 0x83; i++) 2279 iommu_write_l2(iommu, i, iommu->stored_l2[i]); 2280 2281 /* Lock PCI setup registers */ 2282 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2283 iommu->stored_addr_lo | 1); 2284 } 2285 2286 static void iommu_enable_ga(struct amd_iommu *iommu) 2287 { 2288 #ifdef CONFIG_IRQ_REMAP 2289 switch (amd_iommu_guest_ir) { 2290 case AMD_IOMMU_GUEST_IR_VAPIC: 2291 iommu_feature_enable(iommu, CONTROL_GAM_EN); 2292 fallthrough; 2293 case AMD_IOMMU_GUEST_IR_LEGACY_GA: 2294 iommu_feature_enable(iommu, CONTROL_GA_EN); 2295 iommu->irte_ops = &irte_128_ops; 2296 break; 2297 default: 2298 iommu->irte_ops = &irte_32_ops; 2299 break; 2300 } 2301 #endif 2302 } 2303 2304 static void early_enable_iommu(struct amd_iommu *iommu) 2305 { 2306 iommu_disable(iommu); 2307 iommu_init_flags(iommu); 2308 iommu_set_device_table(iommu); 2309 iommu_enable_command_buffer(iommu); 2310 iommu_enable_event_buffer(iommu); 2311 iommu_set_exclusion_range(iommu); 2312 iommu_enable_ga(iommu); 2313 iommu_enable_xt(iommu); 2314 iommu_enable(iommu); 2315 iommu_flush_all_caches(iommu); 2316 } 2317 2318 /* 2319 * This function finally enables all IOMMUs found in the system after 2320 * they have been initialized. 2321 * 2322 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy 2323 * the old content of device table entries. Not this case or copy failed, 2324 * just continue as normal kernel does. 2325 */ 2326 static void early_enable_iommus(void) 2327 { 2328 struct amd_iommu *iommu; 2329 2330 2331 if (!copy_device_table()) { 2332 /* 2333 * If come here because of failure in copying device table from old 2334 * kernel with all IOMMUs enabled, print error message and try to 2335 * free allocated old_dev_tbl_cpy. 2336 */ 2337 if (amd_iommu_pre_enabled) 2338 pr_err("Failed to copy DEV table from previous kernel.\n"); 2339 if (old_dev_tbl_cpy != NULL) 2340 free_pages((unsigned long)old_dev_tbl_cpy, 2341 get_order(dev_table_size)); 2342 2343 for_each_iommu(iommu) { 2344 clear_translation_pre_enabled(iommu); 2345 early_enable_iommu(iommu); 2346 } 2347 } else { 2348 pr_info("Copied DEV table from previous kernel.\n"); 2349 free_pages((unsigned long)amd_iommu_dev_table, 2350 get_order(dev_table_size)); 2351 amd_iommu_dev_table = old_dev_tbl_cpy; 2352 for_each_iommu(iommu) { 2353 iommu_disable_command_buffer(iommu); 2354 iommu_disable_event_buffer(iommu); 2355 iommu_enable_command_buffer(iommu); 2356 iommu_enable_event_buffer(iommu); 2357 iommu_enable_ga(iommu); 2358 iommu_enable_xt(iommu); 2359 iommu_set_device_table(iommu); 2360 iommu_flush_all_caches(iommu); 2361 } 2362 } 2363 2364 #ifdef CONFIG_IRQ_REMAP 2365 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 2366 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); 2367 #endif 2368 } 2369 2370 static void enable_iommus_v2(void) 2371 { 2372 struct amd_iommu *iommu; 2373 2374 for_each_iommu(iommu) { 2375 iommu_enable_ppr_log(iommu); 2376 iommu_enable_gt(iommu); 2377 } 2378 } 2379 2380 static void enable_iommus(void) 2381 { 2382 early_enable_iommus(); 2383 2384 enable_iommus_v2(); 2385 } 2386 2387 static void disable_iommus(void) 2388 { 2389 struct amd_iommu *iommu; 2390 2391 for_each_iommu(iommu) 2392 iommu_disable(iommu); 2393 2394 #ifdef CONFIG_IRQ_REMAP 2395 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 2396 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP); 2397 #endif 2398 } 2399 2400 /* 2401 * Suspend/Resume support 2402 * disable suspend until real resume implemented 2403 */ 2404 2405 static void amd_iommu_resume(void) 2406 { 2407 struct amd_iommu *iommu; 2408 2409 for_each_iommu(iommu) 2410 iommu_apply_resume_quirks(iommu); 2411 2412 /* re-load the hardware */ 2413 enable_iommus(); 2414 2415 amd_iommu_enable_interrupts(); 2416 } 2417 2418 static int amd_iommu_suspend(void) 2419 { 2420 /* disable IOMMUs to go out of the way for BIOS */ 2421 disable_iommus(); 2422 2423 return 0; 2424 } 2425 2426 static struct syscore_ops amd_iommu_syscore_ops = { 2427 .suspend = amd_iommu_suspend, 2428 .resume = amd_iommu_resume, 2429 }; 2430 2431 static void __init free_iommu_resources(void) 2432 { 2433 kmemleak_free(irq_lookup_table); 2434 free_pages((unsigned long)irq_lookup_table, 2435 get_order(rlookup_table_size)); 2436 irq_lookup_table = NULL; 2437 2438 kmem_cache_destroy(amd_iommu_irq_cache); 2439 amd_iommu_irq_cache = NULL; 2440 2441 free_pages((unsigned long)amd_iommu_rlookup_table, 2442 get_order(rlookup_table_size)); 2443 amd_iommu_rlookup_table = NULL; 2444 2445 free_pages((unsigned long)amd_iommu_alias_table, 2446 get_order(alias_table_size)); 2447 amd_iommu_alias_table = NULL; 2448 2449 free_pages((unsigned long)amd_iommu_dev_table, 2450 get_order(dev_table_size)); 2451 amd_iommu_dev_table = NULL; 2452 2453 free_iommu_all(); 2454 } 2455 2456 /* SB IOAPIC is always on this device in AMD systems */ 2457 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) 2458 2459 static bool __init check_ioapic_information(void) 2460 { 2461 const char *fw_bug = FW_BUG; 2462 bool ret, has_sb_ioapic; 2463 int idx; 2464 2465 has_sb_ioapic = false; 2466 ret = false; 2467 2468 /* 2469 * If we have map overrides on the kernel command line the 2470 * messages in this function might not describe firmware bugs 2471 * anymore - so be careful 2472 */ 2473 if (cmdline_maps) 2474 fw_bug = ""; 2475 2476 for (idx = 0; idx < nr_ioapics; idx++) { 2477 int devid, id = mpc_ioapic_id(idx); 2478 2479 devid = get_ioapic_devid(id); 2480 if (devid < 0) { 2481 pr_err("%s: IOAPIC[%d] not in IVRS table\n", 2482 fw_bug, id); 2483 ret = false; 2484 } else if (devid == IOAPIC_SB_DEVID) { 2485 has_sb_ioapic = true; 2486 ret = true; 2487 } 2488 } 2489 2490 if (!has_sb_ioapic) { 2491 /* 2492 * We expect the SB IOAPIC to be listed in the IVRS 2493 * table. The system timer is connected to the SB IOAPIC 2494 * and if we don't have it in the list the system will 2495 * panic at boot time. This situation usually happens 2496 * when the BIOS is buggy and provides us the wrong 2497 * device id for the IOAPIC in the system. 2498 */ 2499 pr_err("%s: No southbridge IOAPIC found\n", fw_bug); 2500 } 2501 2502 if (!ret) 2503 pr_err("Disabling interrupt remapping\n"); 2504 2505 return ret; 2506 } 2507 2508 static void __init free_dma_resources(void) 2509 { 2510 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 2511 get_order(MAX_DOMAIN_ID/8)); 2512 amd_iommu_pd_alloc_bitmap = NULL; 2513 2514 free_unity_maps(); 2515 } 2516 2517 /* 2518 * This is the hardware init function for AMD IOMMU in the system. 2519 * This function is called either from amd_iommu_init or from the interrupt 2520 * remapping setup code. 2521 * 2522 * This function basically parses the ACPI table for AMD IOMMU (IVRS) 2523 * four times: 2524 * 2525 * 1 pass) Discover the most comprehensive IVHD type to use. 2526 * 2527 * 2 pass) Find the highest PCI device id the driver has to handle. 2528 * Upon this information the size of the data structures is 2529 * determined that needs to be allocated. 2530 * 2531 * 3 pass) Initialize the data structures just allocated with the 2532 * information in the ACPI table about available AMD IOMMUs 2533 * in the system. It also maps the PCI devices in the 2534 * system to specific IOMMUs 2535 * 2536 * 4 pass) After the basic data structures are allocated and 2537 * initialized we update them with information about memory 2538 * remapping requirements parsed out of the ACPI table in 2539 * this last pass. 2540 * 2541 * After everything is set up the IOMMUs are enabled and the necessary 2542 * hotplug and suspend notifiers are registered. 2543 */ 2544 static int __init early_amd_iommu_init(void) 2545 { 2546 struct acpi_table_header *ivrs_base; 2547 acpi_status status; 2548 int i, remap_cache_sz, ret = 0; 2549 u32 pci_id; 2550 2551 if (!amd_iommu_detected) 2552 return -ENODEV; 2553 2554 status = acpi_get_table("IVRS", 0, &ivrs_base); 2555 if (status == AE_NOT_FOUND) 2556 return -ENODEV; 2557 else if (ACPI_FAILURE(status)) { 2558 const char *err = acpi_format_exception(status); 2559 pr_err("IVRS table error: %s\n", err); 2560 return -EINVAL; 2561 } 2562 2563 /* 2564 * Validate checksum here so we don't need to do it when 2565 * we actually parse the table 2566 */ 2567 ret = check_ivrs_checksum(ivrs_base); 2568 if (ret) 2569 goto out; 2570 2571 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); 2572 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); 2573 2574 /* 2575 * First parse ACPI tables to find the largest Bus/Dev/Func 2576 * we need to handle. Upon this information the shared data 2577 * structures for the IOMMUs in the system will be allocated 2578 */ 2579 ret = find_last_devid_acpi(ivrs_base); 2580 if (ret) 2581 goto out; 2582 2583 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); 2584 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); 2585 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); 2586 2587 /* Device table - directly used by all IOMMUs */ 2588 ret = -ENOMEM; 2589 amd_iommu_dev_table = (void *)__get_free_pages( 2590 GFP_KERNEL | __GFP_ZERO | GFP_DMA32, 2591 get_order(dev_table_size)); 2592 if (amd_iommu_dev_table == NULL) 2593 goto out; 2594 2595 /* 2596 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the 2597 * IOMMU see for that device 2598 */ 2599 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, 2600 get_order(alias_table_size)); 2601 if (amd_iommu_alias_table == NULL) 2602 goto out; 2603 2604 /* IOMMU rlookup table - find the IOMMU for a specific device */ 2605 amd_iommu_rlookup_table = (void *)__get_free_pages( 2606 GFP_KERNEL | __GFP_ZERO, 2607 get_order(rlookup_table_size)); 2608 if (amd_iommu_rlookup_table == NULL) 2609 goto out; 2610 2611 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( 2612 GFP_KERNEL | __GFP_ZERO, 2613 get_order(MAX_DOMAIN_ID/8)); 2614 if (amd_iommu_pd_alloc_bitmap == NULL) 2615 goto out; 2616 2617 /* 2618 * let all alias entries point to itself 2619 */ 2620 for (i = 0; i <= amd_iommu_last_bdf; ++i) 2621 amd_iommu_alias_table[i] = i; 2622 2623 /* 2624 * never allocate domain 0 because its used as the non-allocated and 2625 * error value placeholder 2626 */ 2627 __set_bit(0, amd_iommu_pd_alloc_bitmap); 2628 2629 /* 2630 * now the data structures are allocated and basically initialized 2631 * start the real acpi table scan 2632 */ 2633 ret = init_iommu_all(ivrs_base); 2634 if (ret) 2635 goto out; 2636 2637 /* Disable IOMMU if there's Stoney Ridge graphics */ 2638 for (i = 0; i < 32; i++) { 2639 pci_id = read_pci_config(0, i, 0, 0); 2640 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { 2641 pr_info("Disable IOMMU on Stoney Ridge\n"); 2642 amd_iommu_disabled = true; 2643 break; 2644 } 2645 } 2646 2647 /* Disable any previously enabled IOMMUs */ 2648 if (!is_kdump_kernel() || amd_iommu_disabled) 2649 disable_iommus(); 2650 2651 if (amd_iommu_irq_remap) 2652 amd_iommu_irq_remap = check_ioapic_information(); 2653 2654 if (amd_iommu_irq_remap) { 2655 /* 2656 * Interrupt remapping enabled, create kmem_cache for the 2657 * remapping tables. 2658 */ 2659 ret = -ENOMEM; 2660 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) 2661 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32); 2662 else 2663 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2); 2664 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", 2665 remap_cache_sz, 2666 IRQ_TABLE_ALIGNMENT, 2667 0, NULL); 2668 if (!amd_iommu_irq_cache) 2669 goto out; 2670 2671 irq_lookup_table = (void *)__get_free_pages( 2672 GFP_KERNEL | __GFP_ZERO, 2673 get_order(rlookup_table_size)); 2674 kmemleak_alloc(irq_lookup_table, rlookup_table_size, 2675 1, GFP_KERNEL); 2676 if (!irq_lookup_table) 2677 goto out; 2678 } 2679 2680 ret = init_memory_definitions(ivrs_base); 2681 if (ret) 2682 goto out; 2683 2684 /* init the device table */ 2685 init_device_table(); 2686 2687 out: 2688 /* Don't leak any ACPI memory */ 2689 acpi_put_table(ivrs_base); 2690 ivrs_base = NULL; 2691 2692 return ret; 2693 } 2694 2695 static int amd_iommu_enable_interrupts(void) 2696 { 2697 struct amd_iommu *iommu; 2698 int ret = 0; 2699 2700 for_each_iommu(iommu) { 2701 ret = iommu_init_msi(iommu); 2702 if (ret) 2703 goto out; 2704 } 2705 2706 out: 2707 return ret; 2708 } 2709 2710 static bool detect_ivrs(void) 2711 { 2712 struct acpi_table_header *ivrs_base; 2713 acpi_status status; 2714 2715 status = acpi_get_table("IVRS", 0, &ivrs_base); 2716 if (status == AE_NOT_FOUND) 2717 return false; 2718 else if (ACPI_FAILURE(status)) { 2719 const char *err = acpi_format_exception(status); 2720 pr_err("IVRS table error: %s\n", err); 2721 return false; 2722 } 2723 2724 acpi_put_table(ivrs_base); 2725 2726 /* Make sure ACS will be enabled during PCI probe */ 2727 pci_request_acs(); 2728 2729 return true; 2730 } 2731 2732 /**************************************************************************** 2733 * 2734 * AMD IOMMU Initialization State Machine 2735 * 2736 ****************************************************************************/ 2737 2738 static int __init state_next(void) 2739 { 2740 int ret = 0; 2741 2742 switch (init_state) { 2743 case IOMMU_START_STATE: 2744 if (!detect_ivrs()) { 2745 init_state = IOMMU_NOT_FOUND; 2746 ret = -ENODEV; 2747 } else { 2748 init_state = IOMMU_IVRS_DETECTED; 2749 } 2750 break; 2751 case IOMMU_IVRS_DETECTED: 2752 ret = early_amd_iommu_init(); 2753 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; 2754 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { 2755 pr_info("AMD IOMMU disabled\n"); 2756 init_state = IOMMU_CMDLINE_DISABLED; 2757 ret = -EINVAL; 2758 } 2759 break; 2760 case IOMMU_ACPI_FINISHED: 2761 early_enable_iommus(); 2762 x86_platform.iommu_shutdown = disable_iommus; 2763 init_state = IOMMU_ENABLED; 2764 break; 2765 case IOMMU_ENABLED: 2766 register_syscore_ops(&amd_iommu_syscore_ops); 2767 ret = amd_iommu_init_pci(); 2768 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; 2769 enable_iommus_v2(); 2770 break; 2771 case IOMMU_PCI_INIT: 2772 ret = amd_iommu_enable_interrupts(); 2773 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 2774 break; 2775 case IOMMU_INTERRUPTS_EN: 2776 ret = amd_iommu_init_dma_ops(); 2777 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; 2778 break; 2779 case IOMMU_DMA_OPS: 2780 init_state = IOMMU_INITIALIZED; 2781 break; 2782 case IOMMU_INITIALIZED: 2783 /* Nothing to do */ 2784 break; 2785 case IOMMU_NOT_FOUND: 2786 case IOMMU_INIT_ERROR: 2787 case IOMMU_CMDLINE_DISABLED: 2788 /* Error states => do nothing */ 2789 ret = -EINVAL; 2790 break; 2791 default: 2792 /* Unknown state */ 2793 BUG(); 2794 } 2795 2796 if (ret) { 2797 free_dma_resources(); 2798 if (!irq_remapping_enabled) { 2799 disable_iommus(); 2800 free_iommu_resources(); 2801 } else { 2802 struct amd_iommu *iommu; 2803 2804 uninit_device_table_dma(); 2805 for_each_iommu(iommu) 2806 iommu_flush_all_caches(iommu); 2807 } 2808 } 2809 return ret; 2810 } 2811 2812 static int __init iommu_go_to_state(enum iommu_init_state state) 2813 { 2814 int ret = -EINVAL; 2815 2816 while (init_state != state) { 2817 if (init_state == IOMMU_NOT_FOUND || 2818 init_state == IOMMU_INIT_ERROR || 2819 init_state == IOMMU_CMDLINE_DISABLED) 2820 break; 2821 ret = state_next(); 2822 } 2823 2824 return ret; 2825 } 2826 2827 #ifdef CONFIG_IRQ_REMAP 2828 int __init amd_iommu_prepare(void) 2829 { 2830 int ret; 2831 2832 amd_iommu_irq_remap = true; 2833 2834 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); 2835 if (ret) 2836 return ret; 2837 return amd_iommu_irq_remap ? 0 : -ENODEV; 2838 } 2839 2840 int __init amd_iommu_enable(void) 2841 { 2842 int ret; 2843 2844 ret = iommu_go_to_state(IOMMU_ENABLED); 2845 if (ret) 2846 return ret; 2847 2848 irq_remapping_enabled = 1; 2849 return amd_iommu_xt_mode; 2850 } 2851 2852 void amd_iommu_disable(void) 2853 { 2854 amd_iommu_suspend(); 2855 } 2856 2857 int amd_iommu_reenable(int mode) 2858 { 2859 amd_iommu_resume(); 2860 2861 return 0; 2862 } 2863 2864 int __init amd_iommu_enable_faulting(void) 2865 { 2866 /* We enable MSI later when PCI is initialized */ 2867 return 0; 2868 } 2869 #endif 2870 2871 /* 2872 * This is the core init function for AMD IOMMU hardware in the system. 2873 * This function is called from the generic x86 DMA layer initialization 2874 * code. 2875 */ 2876 static int __init amd_iommu_init(void) 2877 { 2878 struct amd_iommu *iommu; 2879 int ret; 2880 2881 ret = iommu_go_to_state(IOMMU_INITIALIZED); 2882 #ifdef CONFIG_GART_IOMMU 2883 if (ret && list_empty(&amd_iommu_list)) { 2884 /* 2885 * We failed to initialize the AMD IOMMU - try fallback 2886 * to GART if possible. 2887 */ 2888 gart_iommu_init(); 2889 } 2890 #endif 2891 2892 for_each_iommu(iommu) 2893 amd_iommu_debugfs_setup(iommu); 2894 2895 return ret; 2896 } 2897 2898 static bool amd_iommu_sme_check(void) 2899 { 2900 if (!sme_active() || (boot_cpu_data.x86 != 0x17)) 2901 return true; 2902 2903 /* For Fam17h, a specific level of support is required */ 2904 if (boot_cpu_data.microcode >= 0x08001205) 2905 return true; 2906 2907 if ((boot_cpu_data.microcode >= 0x08001126) && 2908 (boot_cpu_data.microcode <= 0x080011ff)) 2909 return true; 2910 2911 pr_notice("IOMMU not currently supported when SME is active\n"); 2912 2913 return false; 2914 } 2915 2916 /**************************************************************************** 2917 * 2918 * Early detect code. This code runs at IOMMU detection time in the DMA 2919 * layer. It just looks if there is an IVRS ACPI table to detect AMD 2920 * IOMMUs 2921 * 2922 ****************************************************************************/ 2923 int __init amd_iommu_detect(void) 2924 { 2925 int ret; 2926 2927 if (no_iommu || (iommu_detected && !gart_iommu_aperture)) 2928 return -ENODEV; 2929 2930 if (!amd_iommu_sme_check()) 2931 return -ENODEV; 2932 2933 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); 2934 if (ret) 2935 return ret; 2936 2937 amd_iommu_detected = true; 2938 iommu_detected = 1; 2939 x86_init.iommu.iommu_init = amd_iommu_init; 2940 2941 return 1; 2942 } 2943 2944 /**************************************************************************** 2945 * 2946 * Parsing functions for the AMD IOMMU specific kernel command line 2947 * options. 2948 * 2949 ****************************************************************************/ 2950 2951 static int __init parse_amd_iommu_dump(char *str) 2952 { 2953 amd_iommu_dump = true; 2954 2955 return 1; 2956 } 2957 2958 static int __init parse_amd_iommu_intr(char *str) 2959 { 2960 for (; *str; ++str) { 2961 if (strncmp(str, "legacy", 6) == 0) { 2962 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 2963 break; 2964 } 2965 if (strncmp(str, "vapic", 5) == 0) { 2966 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 2967 break; 2968 } 2969 } 2970 return 1; 2971 } 2972 2973 static int __init parse_amd_iommu_options(char *str) 2974 { 2975 for (; *str; ++str) { 2976 if (strncmp(str, "fullflush", 9) == 0) 2977 amd_iommu_unmap_flush = true; 2978 if (strncmp(str, "off", 3) == 0) 2979 amd_iommu_disabled = true; 2980 if (strncmp(str, "force_isolation", 15) == 0) 2981 amd_iommu_force_isolation = true; 2982 } 2983 2984 return 1; 2985 } 2986 2987 static int __init parse_ivrs_ioapic(char *str) 2988 { 2989 unsigned int bus, dev, fn; 2990 int ret, id, i; 2991 u16 devid; 2992 2993 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 2994 2995 if (ret != 4) { 2996 pr_err("Invalid command line: ivrs_ioapic%s\n", str); 2997 return 1; 2998 } 2999 3000 if (early_ioapic_map_size == EARLY_MAP_SIZE) { 3001 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", 3002 str); 3003 return 1; 3004 } 3005 3006 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 3007 3008 cmdline_maps = true; 3009 i = early_ioapic_map_size++; 3010 early_ioapic_map[i].id = id; 3011 early_ioapic_map[i].devid = devid; 3012 early_ioapic_map[i].cmd_line = true; 3013 3014 return 1; 3015 } 3016 3017 static int __init parse_ivrs_hpet(char *str) 3018 { 3019 unsigned int bus, dev, fn; 3020 int ret, id, i; 3021 u16 devid; 3022 3023 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 3024 3025 if (ret != 4) { 3026 pr_err("Invalid command line: ivrs_hpet%s\n", str); 3027 return 1; 3028 } 3029 3030 if (early_hpet_map_size == EARLY_MAP_SIZE) { 3031 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", 3032 str); 3033 return 1; 3034 } 3035 3036 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 3037 3038 cmdline_maps = true; 3039 i = early_hpet_map_size++; 3040 early_hpet_map[i].id = id; 3041 early_hpet_map[i].devid = devid; 3042 early_hpet_map[i].cmd_line = true; 3043 3044 return 1; 3045 } 3046 3047 static int __init parse_ivrs_acpihid(char *str) 3048 { 3049 u32 bus, dev, fn; 3050 char *hid, *uid, *p; 3051 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0}; 3052 int ret, i; 3053 3054 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid); 3055 if (ret != 4) { 3056 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str); 3057 return 1; 3058 } 3059 3060 p = acpiid; 3061 hid = strsep(&p, ":"); 3062 uid = p; 3063 3064 if (!hid || !(*hid) || !uid) { 3065 pr_err("Invalid command line: hid or uid\n"); 3066 return 1; 3067 } 3068 3069 i = early_acpihid_map_size++; 3070 memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); 3071 memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); 3072 early_acpihid_map[i].devid = 3073 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 3074 early_acpihid_map[i].cmd_line = true; 3075 3076 return 1; 3077 } 3078 3079 __setup("amd_iommu_dump", parse_amd_iommu_dump); 3080 __setup("amd_iommu=", parse_amd_iommu_options); 3081 __setup("amd_iommu_intr=", parse_amd_iommu_intr); 3082 __setup("ivrs_ioapic", parse_ivrs_ioapic); 3083 __setup("ivrs_hpet", parse_ivrs_hpet); 3084 __setup("ivrs_acpihid", parse_ivrs_acpihid); 3085 3086 IOMMU_INIT_FINISH(amd_iommu_detect, 3087 gart_iommu_hole_init, 3088 NULL, 3089 NULL); 3090 3091 bool amd_iommu_v2_supported(void) 3092 { 3093 return amd_iommu_v2_present; 3094 } 3095 EXPORT_SYMBOL(amd_iommu_v2_supported); 3096 3097 struct amd_iommu *get_amd_iommu(unsigned int idx) 3098 { 3099 unsigned int i = 0; 3100 struct amd_iommu *iommu; 3101 3102 for_each_iommu(iommu) 3103 if (i++ == idx) 3104 return iommu; 3105 return NULL; 3106 } 3107 EXPORT_SYMBOL(get_amd_iommu); 3108 3109 /**************************************************************************** 3110 * 3111 * IOMMU EFR Performance Counter support functionality. This code allows 3112 * access to the IOMMU PC functionality. 3113 * 3114 ****************************************************************************/ 3115 3116 u8 amd_iommu_pc_get_max_banks(unsigned int idx) 3117 { 3118 struct amd_iommu *iommu = get_amd_iommu(idx); 3119 3120 if (iommu) 3121 return iommu->max_banks; 3122 3123 return 0; 3124 } 3125 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); 3126 3127 bool amd_iommu_pc_supported(void) 3128 { 3129 return amd_iommu_pc_present; 3130 } 3131 EXPORT_SYMBOL(amd_iommu_pc_supported); 3132 3133 u8 amd_iommu_pc_get_max_counters(unsigned int idx) 3134 { 3135 struct amd_iommu *iommu = get_amd_iommu(idx); 3136 3137 if (iommu) 3138 return iommu->max_counters; 3139 3140 return 0; 3141 } 3142 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); 3143 3144 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 3145 u8 fxn, u64 *value, bool is_write) 3146 { 3147 u32 offset; 3148 u32 max_offset_lim; 3149 3150 /* Make sure the IOMMU PC resource is available */ 3151 if (!amd_iommu_pc_present) 3152 return -ENODEV; 3153 3154 /* Check for valid iommu and pc register indexing */ 3155 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) 3156 return -ENODEV; 3157 3158 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn); 3159 3160 /* Limit the offset to the hw defined mmio region aperture */ 3161 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | 3162 (iommu->max_counters << 8) | 0x28); 3163 if ((offset < MMIO_CNTR_REG_OFFSET) || 3164 (offset > max_offset_lim)) 3165 return -EINVAL; 3166 3167 if (is_write) { 3168 u64 val = *value & GENMASK_ULL(47, 0); 3169 3170 writel((u32)val, iommu->mmio_base + offset); 3171 writel((val >> 32), iommu->mmio_base + offset + 4); 3172 } else { 3173 *value = readl(iommu->mmio_base + offset + 4); 3174 *value <<= 32; 3175 *value |= readl(iommu->mmio_base + offset); 3176 *value &= GENMASK_ULL(47, 0); 3177 } 3178 3179 return 0; 3180 } 3181 3182 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3183 { 3184 if (!iommu) 3185 return -EINVAL; 3186 3187 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); 3188 } 3189 EXPORT_SYMBOL(amd_iommu_pc_get_reg); 3190 3191 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3192 { 3193 if (!iommu) 3194 return -EINVAL; 3195 3196 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); 3197 } 3198 EXPORT_SYMBOL(amd_iommu_pc_set_reg); 3199