1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "AMD-Vi: " fmt 9 #define dev_fmt(fmt) pr_fmt(fmt) 10 11 #include <linux/pci.h> 12 #include <linux/acpi.h> 13 #include <linux/list.h> 14 #include <linux/bitmap.h> 15 #include <linux/slab.h> 16 #include <linux/syscore_ops.h> 17 #include <linux/interrupt.h> 18 #include <linux/msi.h> 19 #include <linux/amd-iommu.h> 20 #include <linux/export.h> 21 #include <linux/kmemleak.h> 22 #include <linux/mem_encrypt.h> 23 #include <asm/pci-direct.h> 24 #include <asm/iommu.h> 25 #include <asm/apic.h> 26 #include <asm/msidef.h> 27 #include <asm/gart.h> 28 #include <asm/x86_init.h> 29 #include <asm/iommu_table.h> 30 #include <asm/io_apic.h> 31 #include <asm/irq_remapping.h> 32 33 #include <linux/crash_dump.h> 34 35 #include "amd_iommu.h" 36 #include "../irq_remapping.h" 37 38 /* 39 * definitions for the ACPI scanning code 40 */ 41 #define IVRS_HEADER_LENGTH 48 42 43 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40 44 #define ACPI_IVMD_TYPE_ALL 0x20 45 #define ACPI_IVMD_TYPE 0x21 46 #define ACPI_IVMD_TYPE_RANGE 0x22 47 48 #define IVHD_DEV_ALL 0x01 49 #define IVHD_DEV_SELECT 0x02 50 #define IVHD_DEV_SELECT_RANGE_START 0x03 51 #define IVHD_DEV_RANGE_END 0x04 52 #define IVHD_DEV_ALIAS 0x42 53 #define IVHD_DEV_ALIAS_RANGE 0x43 54 #define IVHD_DEV_EXT_SELECT 0x46 55 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 56 #define IVHD_DEV_SPECIAL 0x48 57 #define IVHD_DEV_ACPI_HID 0xf0 58 59 #define UID_NOT_PRESENT 0 60 #define UID_IS_INTEGER 1 61 #define UID_IS_CHARACTER 2 62 63 #define IVHD_SPECIAL_IOAPIC 1 64 #define IVHD_SPECIAL_HPET 2 65 66 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 67 #define IVHD_FLAG_PASSPW_EN_MASK 0x02 68 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 69 #define IVHD_FLAG_ISOC_EN_MASK 0x08 70 71 #define IVMD_FLAG_EXCL_RANGE 0x08 72 #define IVMD_FLAG_IW 0x04 73 #define IVMD_FLAG_IR 0x02 74 #define IVMD_FLAG_UNITY_MAP 0x01 75 76 #define ACPI_DEVFLAG_INITPASS 0x01 77 #define ACPI_DEVFLAG_EXTINT 0x02 78 #define ACPI_DEVFLAG_NMI 0x04 79 #define ACPI_DEVFLAG_SYSMGT1 0x10 80 #define ACPI_DEVFLAG_SYSMGT2 0x20 81 #define ACPI_DEVFLAG_LINT0 0x40 82 #define ACPI_DEVFLAG_LINT1 0x80 83 #define ACPI_DEVFLAG_ATSDIS 0x10000000 84 85 #define LOOP_TIMEOUT 100000 86 /* 87 * ACPI table definitions 88 * 89 * These data structures are laid over the table to parse the important values 90 * out of it. 91 */ 92 93 extern const struct iommu_ops amd_iommu_ops; 94 95 /* 96 * structure describing one IOMMU in the ACPI table. Typically followed by one 97 * or more ivhd_entrys. 98 */ 99 struct ivhd_header { 100 u8 type; 101 u8 flags; 102 u16 length; 103 u16 devid; 104 u16 cap_ptr; 105 u64 mmio_phys; 106 u16 pci_seg; 107 u16 info; 108 u32 efr_attr; 109 110 /* Following only valid on IVHD type 11h and 40h */ 111 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */ 112 u64 res; 113 } __attribute__((packed)); 114 115 /* 116 * A device entry describing which devices a specific IOMMU translates and 117 * which requestor ids they use. 118 */ 119 struct ivhd_entry { 120 u8 type; 121 u16 devid; 122 u8 flags; 123 u32 ext; 124 u32 hidh; 125 u64 cid; 126 u8 uidf; 127 u8 uidl; 128 u8 uid; 129 } __attribute__((packed)); 130 131 /* 132 * An AMD IOMMU memory definition structure. It defines things like exclusion 133 * ranges for devices and regions that should be unity mapped. 134 */ 135 struct ivmd_header { 136 u8 type; 137 u8 flags; 138 u16 length; 139 u16 devid; 140 u16 aux; 141 u64 resv; 142 u64 range_start; 143 u64 range_length; 144 } __attribute__((packed)); 145 146 bool amd_iommu_dump; 147 bool amd_iommu_irq_remap __read_mostly; 148 149 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 150 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; 151 152 static bool amd_iommu_detected; 153 static bool __initdata amd_iommu_disabled; 154 static int amd_iommu_target_ivhd_type; 155 156 u16 amd_iommu_last_bdf; /* largest PCI device id we have 157 to handle */ 158 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 159 we find in ACPI */ 160 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 161 162 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 163 system */ 164 165 /* Array to assign indices to IOMMUs*/ 166 struct amd_iommu *amd_iommus[MAX_IOMMUS]; 167 168 /* Number of IOMMUs present in the system */ 169 static int amd_iommus_present; 170 171 /* IOMMUs have a non-present cache? */ 172 bool amd_iommu_np_cache __read_mostly; 173 bool amd_iommu_iotlb_sup __read_mostly = true; 174 175 u32 amd_iommu_max_pasid __read_mostly = ~0; 176 177 bool amd_iommu_v2_present __read_mostly; 178 static bool amd_iommu_pc_present __read_mostly; 179 180 bool amd_iommu_force_isolation __read_mostly; 181 182 /* 183 * Pointer to the device table which is shared by all AMD IOMMUs 184 * it is indexed by the PCI device id or the HT unit id and contains 185 * information about the domain the device belongs to as well as the 186 * page table root pointer. 187 */ 188 struct dev_table_entry *amd_iommu_dev_table; 189 /* 190 * Pointer to a device table which the content of old device table 191 * will be copied to. It's only be used in kdump kernel. 192 */ 193 static struct dev_table_entry *old_dev_tbl_cpy; 194 195 /* 196 * The alias table is a driver specific data structure which contains the 197 * mappings of the PCI device ids to the actual requestor ids on the IOMMU. 198 * More than one device can share the same requestor id. 199 */ 200 u16 *amd_iommu_alias_table; 201 202 /* 203 * The rlookup table is used to find the IOMMU which is responsible 204 * for a specific device. It is also indexed by the PCI device id. 205 */ 206 struct amd_iommu **amd_iommu_rlookup_table; 207 EXPORT_SYMBOL(amd_iommu_rlookup_table); 208 209 /* 210 * This table is used to find the irq remapping table for a given device id 211 * quickly. 212 */ 213 struct irq_remap_table **irq_lookup_table; 214 215 /* 216 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap 217 * to know which ones are already in use. 218 */ 219 unsigned long *amd_iommu_pd_alloc_bitmap; 220 221 static u32 dev_table_size; /* size of the device table */ 222 static u32 alias_table_size; /* size of the alias table */ 223 static u32 rlookup_table_size; /* size if the rlookup table */ 224 225 enum iommu_init_state { 226 IOMMU_START_STATE, 227 IOMMU_IVRS_DETECTED, 228 IOMMU_ACPI_FINISHED, 229 IOMMU_ENABLED, 230 IOMMU_PCI_INIT, 231 IOMMU_INTERRUPTS_EN, 232 IOMMU_DMA_OPS, 233 IOMMU_INITIALIZED, 234 IOMMU_NOT_FOUND, 235 IOMMU_INIT_ERROR, 236 IOMMU_CMDLINE_DISABLED, 237 }; 238 239 /* Early ioapic and hpet maps from kernel command line */ 240 #define EARLY_MAP_SIZE 4 241 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; 242 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; 243 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE]; 244 245 static int __initdata early_ioapic_map_size; 246 static int __initdata early_hpet_map_size; 247 static int __initdata early_acpihid_map_size; 248 249 static bool __initdata cmdline_maps; 250 251 static enum iommu_init_state init_state = IOMMU_START_STATE; 252 253 static int amd_iommu_enable_interrupts(void); 254 static int __init iommu_go_to_state(enum iommu_init_state state); 255 static void init_device_table_dma(void); 256 257 static bool amd_iommu_pre_enabled = true; 258 259 bool translation_pre_enabled(struct amd_iommu *iommu) 260 { 261 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); 262 } 263 EXPORT_SYMBOL(translation_pre_enabled); 264 265 static void clear_translation_pre_enabled(struct amd_iommu *iommu) 266 { 267 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 268 } 269 270 static void init_translation_status(struct amd_iommu *iommu) 271 { 272 u64 ctrl; 273 274 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 275 if (ctrl & (1<<CONTROL_IOMMU_EN)) 276 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 277 } 278 279 static inline void update_last_devid(u16 devid) 280 { 281 if (devid > amd_iommu_last_bdf) 282 amd_iommu_last_bdf = devid; 283 } 284 285 static inline unsigned long tbl_size(int entry_size) 286 { 287 unsigned shift = PAGE_SHIFT + 288 get_order(((int)amd_iommu_last_bdf + 1) * entry_size); 289 290 return 1UL << shift; 291 } 292 293 int amd_iommu_get_num_iommus(void) 294 { 295 return amd_iommus_present; 296 } 297 298 /* Access to l1 and l2 indexed register spaces */ 299 300 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) 301 { 302 u32 val; 303 304 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 305 pci_read_config_dword(iommu->dev, 0xfc, &val); 306 return val; 307 } 308 309 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) 310 { 311 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); 312 pci_write_config_dword(iommu->dev, 0xfc, val); 313 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 314 } 315 316 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) 317 { 318 u32 val; 319 320 pci_write_config_dword(iommu->dev, 0xf0, address); 321 pci_read_config_dword(iommu->dev, 0xf4, &val); 322 return val; 323 } 324 325 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) 326 { 327 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); 328 pci_write_config_dword(iommu->dev, 0xf4, val); 329 } 330 331 /**************************************************************************** 332 * 333 * AMD IOMMU MMIO register space handling functions 334 * 335 * These functions are used to program the IOMMU device registers in 336 * MMIO space required for that driver. 337 * 338 ****************************************************************************/ 339 340 /* 341 * This function set the exclusion range in the IOMMU. DMA accesses to the 342 * exclusion range are passed through untranslated 343 */ 344 static void iommu_set_exclusion_range(struct amd_iommu *iommu) 345 { 346 u64 start = iommu->exclusion_start & PAGE_MASK; 347 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; 348 u64 entry; 349 350 if (!iommu->exclusion_start) 351 return; 352 353 entry = start | MMIO_EXCL_ENABLE_MASK; 354 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 355 &entry, sizeof(entry)); 356 357 entry = limit; 358 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 359 &entry, sizeof(entry)); 360 } 361 362 static void iommu_set_cwwb_range(struct amd_iommu *iommu) 363 { 364 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); 365 u64 entry = start & PM_ADDR_MASK; 366 367 if (!iommu_feature(iommu, FEATURE_SNP)) 368 return; 369 370 /* Note: 371 * Re-purpose Exclusion base/limit registers for Completion wait 372 * write-back base/limit. 373 */ 374 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 375 &entry, sizeof(entry)); 376 377 /* Note: 378 * Default to 4 Kbytes, which can be specified by setting base 379 * address equal to the limit address. 380 */ 381 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 382 &entry, sizeof(entry)); 383 } 384 385 /* Programs the physical address of the device table into the IOMMU hardware */ 386 static void iommu_set_device_table(struct amd_iommu *iommu) 387 { 388 u64 entry; 389 390 BUG_ON(iommu->mmio_base == NULL); 391 392 entry = iommu_virt_to_phys(amd_iommu_dev_table); 393 entry |= (dev_table_size >> 12) - 1; 394 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, 395 &entry, sizeof(entry)); 396 } 397 398 /* Generic functions to enable/disable certain features of the IOMMU. */ 399 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) 400 { 401 u64 ctrl; 402 403 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 404 ctrl |= (1ULL << bit); 405 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 406 } 407 408 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) 409 { 410 u64 ctrl; 411 412 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 413 ctrl &= ~(1ULL << bit); 414 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 415 } 416 417 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) 418 { 419 u64 ctrl; 420 421 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 422 ctrl &= ~CTRL_INV_TO_MASK; 423 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; 424 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 425 } 426 427 /* Function to enable the hardware */ 428 static void iommu_enable(struct amd_iommu *iommu) 429 { 430 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 431 } 432 433 static void iommu_disable(struct amd_iommu *iommu) 434 { 435 if (!iommu->mmio_base) 436 return; 437 438 /* Disable command buffer */ 439 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 440 441 /* Disable event logging and event interrupts */ 442 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); 443 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 444 445 /* Disable IOMMU GA_LOG */ 446 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 447 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 448 449 /* Disable IOMMU hardware itself */ 450 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); 451 } 452 453 /* 454 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 455 * the system has one. 456 */ 457 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) 458 { 459 if (!request_mem_region(address, end, "amd_iommu")) { 460 pr_err("Can not reserve memory region %llx-%llx for mmio\n", 461 address, end); 462 pr_err("This is a BIOS bug. Please contact your hardware vendor\n"); 463 return NULL; 464 } 465 466 return (u8 __iomem *)ioremap(address, end); 467 } 468 469 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) 470 { 471 if (iommu->mmio_base) 472 iounmap(iommu->mmio_base); 473 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); 474 } 475 476 static inline u32 get_ivhd_header_size(struct ivhd_header *h) 477 { 478 u32 size = 0; 479 480 switch (h->type) { 481 case 0x10: 482 size = 24; 483 break; 484 case 0x11: 485 case 0x40: 486 size = 40; 487 break; 488 } 489 return size; 490 } 491 492 /**************************************************************************** 493 * 494 * The functions below belong to the first pass of AMD IOMMU ACPI table 495 * parsing. In this pass we try to find out the highest device id this 496 * code has to handle. Upon this information the size of the shared data 497 * structures is determined later. 498 * 499 ****************************************************************************/ 500 501 /* 502 * This function calculates the length of a given IVHD entry 503 */ 504 static inline int ivhd_entry_length(u8 *ivhd) 505 { 506 u32 type = ((struct ivhd_entry *)ivhd)->type; 507 508 if (type < 0x80) { 509 return 0x04 << (*ivhd >> 6); 510 } else if (type == IVHD_DEV_ACPI_HID) { 511 /* For ACPI_HID, offset 21 is uid len */ 512 return *((u8 *)ivhd + 21) + 22; 513 } 514 return 0; 515 } 516 517 /* 518 * After reading the highest device id from the IOMMU PCI capability header 519 * this function looks if there is a higher device id defined in the ACPI table 520 */ 521 static int __init find_last_devid_from_ivhd(struct ivhd_header *h) 522 { 523 u8 *p = (void *)h, *end = (void *)h; 524 struct ivhd_entry *dev; 525 526 u32 ivhd_size = get_ivhd_header_size(h); 527 528 if (!ivhd_size) { 529 pr_err("Unsupported IVHD type %#x\n", h->type); 530 return -EINVAL; 531 } 532 533 p += ivhd_size; 534 end += h->length; 535 536 while (p < end) { 537 dev = (struct ivhd_entry *)p; 538 switch (dev->type) { 539 case IVHD_DEV_ALL: 540 /* Use maximum BDF value for DEV_ALL */ 541 update_last_devid(0xffff); 542 break; 543 case IVHD_DEV_SELECT: 544 case IVHD_DEV_RANGE_END: 545 case IVHD_DEV_ALIAS: 546 case IVHD_DEV_EXT_SELECT: 547 /* all the above subfield types refer to device ids */ 548 update_last_devid(dev->devid); 549 break; 550 default: 551 break; 552 } 553 p += ivhd_entry_length(p); 554 } 555 556 WARN_ON(p != end); 557 558 return 0; 559 } 560 561 static int __init check_ivrs_checksum(struct acpi_table_header *table) 562 { 563 int i; 564 u8 checksum = 0, *p = (u8 *)table; 565 566 for (i = 0; i < table->length; ++i) 567 checksum += p[i]; 568 if (checksum != 0) { 569 /* ACPI table corrupt */ 570 pr_err(FW_BUG "IVRS invalid checksum\n"); 571 return -ENODEV; 572 } 573 574 return 0; 575 } 576 577 /* 578 * Iterate over all IVHD entries in the ACPI table and find the highest device 579 * id which we need to handle. This is the first of three functions which parse 580 * the ACPI table. So we check the checksum here. 581 */ 582 static int __init find_last_devid_acpi(struct acpi_table_header *table) 583 { 584 u8 *p = (u8 *)table, *end = (u8 *)table; 585 struct ivhd_header *h; 586 587 p += IVRS_HEADER_LENGTH; 588 589 end += table->length; 590 while (p < end) { 591 h = (struct ivhd_header *)p; 592 if (h->type == amd_iommu_target_ivhd_type) { 593 int ret = find_last_devid_from_ivhd(h); 594 595 if (ret) 596 return ret; 597 } 598 p += h->length; 599 } 600 WARN_ON(p != end); 601 602 return 0; 603 } 604 605 /**************************************************************************** 606 * 607 * The following functions belong to the code path which parses the ACPI table 608 * the second time. In this ACPI parsing iteration we allocate IOMMU specific 609 * data structures, initialize the device/alias/rlookup table and also 610 * basically initialize the hardware. 611 * 612 ****************************************************************************/ 613 614 /* 615 * Allocates the command buffer. This buffer is per AMD IOMMU. We can 616 * write commands to that buffer later and the IOMMU will execute them 617 * asynchronously 618 */ 619 static int __init alloc_command_buffer(struct amd_iommu *iommu) 620 { 621 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 622 get_order(CMD_BUFFER_SIZE)); 623 624 return iommu->cmd_buf ? 0 : -ENOMEM; 625 } 626 627 /* 628 * This function resets the command buffer if the IOMMU stopped fetching 629 * commands from it. 630 */ 631 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) 632 { 633 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 634 635 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 636 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 637 iommu->cmd_buf_head = 0; 638 iommu->cmd_buf_tail = 0; 639 640 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 641 } 642 643 /* 644 * This function writes the command buffer address to the hardware and 645 * enables it. 646 */ 647 static void iommu_enable_command_buffer(struct amd_iommu *iommu) 648 { 649 u64 entry; 650 651 BUG_ON(iommu->cmd_buf == NULL); 652 653 entry = iommu_virt_to_phys(iommu->cmd_buf); 654 entry |= MMIO_CMD_SIZE_512; 655 656 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 657 &entry, sizeof(entry)); 658 659 amd_iommu_reset_cmd_buffer(iommu); 660 } 661 662 /* 663 * This function disables the command buffer 664 */ 665 static void iommu_disable_command_buffer(struct amd_iommu *iommu) 666 { 667 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 668 } 669 670 static void __init free_command_buffer(struct amd_iommu *iommu) 671 { 672 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); 673 } 674 675 /* allocates the memory where the IOMMU will log its events to */ 676 static int __init alloc_event_buffer(struct amd_iommu *iommu) 677 { 678 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 679 get_order(EVT_BUFFER_SIZE)); 680 681 return iommu->evt_buf ? 0 : -ENOMEM; 682 } 683 684 static void iommu_enable_event_buffer(struct amd_iommu *iommu) 685 { 686 u64 entry; 687 688 BUG_ON(iommu->evt_buf == NULL); 689 690 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; 691 692 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, 693 &entry, sizeof(entry)); 694 695 /* set head and tail to zero manually */ 696 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 697 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 698 699 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 700 } 701 702 /* 703 * This function disables the event log buffer 704 */ 705 static void iommu_disable_event_buffer(struct amd_iommu *iommu) 706 { 707 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 708 } 709 710 static void __init free_event_buffer(struct amd_iommu *iommu) 711 { 712 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); 713 } 714 715 /* allocates the memory where the IOMMU will log its events to */ 716 static int __init alloc_ppr_log(struct amd_iommu *iommu) 717 { 718 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 719 get_order(PPR_LOG_SIZE)); 720 721 return iommu->ppr_log ? 0 : -ENOMEM; 722 } 723 724 static void iommu_enable_ppr_log(struct amd_iommu *iommu) 725 { 726 u64 entry; 727 728 if (iommu->ppr_log == NULL) 729 return; 730 731 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; 732 733 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, 734 &entry, sizeof(entry)); 735 736 /* set head and tail to zero manually */ 737 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 738 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 739 740 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); 741 iommu_feature_enable(iommu, CONTROL_PPR_EN); 742 } 743 744 static void __init free_ppr_log(struct amd_iommu *iommu) 745 { 746 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); 747 } 748 749 static void free_ga_log(struct amd_iommu *iommu) 750 { 751 #ifdef CONFIG_IRQ_REMAP 752 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE)); 753 free_pages((unsigned long)iommu->ga_log_tail, get_order(8)); 754 #endif 755 } 756 757 static int iommu_ga_log_enable(struct amd_iommu *iommu) 758 { 759 #ifdef CONFIG_IRQ_REMAP 760 u32 status, i; 761 762 if (!iommu->ga_log) 763 return -EINVAL; 764 765 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 766 767 /* Check if already running */ 768 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 769 return 0; 770 771 iommu_feature_enable(iommu, CONTROL_GAINT_EN); 772 iommu_feature_enable(iommu, CONTROL_GALOG_EN); 773 774 for (i = 0; i < LOOP_TIMEOUT; ++i) { 775 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 776 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 777 break; 778 } 779 780 if (i >= LOOP_TIMEOUT) 781 return -EINVAL; 782 #endif /* CONFIG_IRQ_REMAP */ 783 return 0; 784 } 785 786 #ifdef CONFIG_IRQ_REMAP 787 static int iommu_init_ga_log(struct amd_iommu *iommu) 788 { 789 u64 entry; 790 791 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 792 return 0; 793 794 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 795 get_order(GA_LOG_SIZE)); 796 if (!iommu->ga_log) 797 goto err_out; 798 799 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 800 get_order(8)); 801 if (!iommu->ga_log_tail) 802 goto err_out; 803 804 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; 805 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, 806 &entry, sizeof(entry)); 807 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & 808 (BIT_ULL(52)-1)) & ~7ULL; 809 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, 810 &entry, sizeof(entry)); 811 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 812 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); 813 814 return 0; 815 err_out: 816 free_ga_log(iommu); 817 return -EINVAL; 818 } 819 #endif /* CONFIG_IRQ_REMAP */ 820 821 static int iommu_init_ga(struct amd_iommu *iommu) 822 { 823 int ret = 0; 824 825 #ifdef CONFIG_IRQ_REMAP 826 /* Note: We have already checked GASup from IVRS table. 827 * Now, we need to make sure that GAMSup is set. 828 */ 829 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && 830 !iommu_feature(iommu, FEATURE_GAM_VAPIC)) 831 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 832 833 ret = iommu_init_ga_log(iommu); 834 #endif /* CONFIG_IRQ_REMAP */ 835 836 return ret; 837 } 838 839 static int __init alloc_cwwb_sem(struct amd_iommu *iommu) 840 { 841 iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL); 842 843 return iommu->cmd_sem ? 0 : -ENOMEM; 844 } 845 846 static void __init free_cwwb_sem(struct amd_iommu *iommu) 847 { 848 if (iommu->cmd_sem) 849 free_page((unsigned long)iommu->cmd_sem); 850 } 851 852 static void iommu_enable_xt(struct amd_iommu *iommu) 853 { 854 #ifdef CONFIG_IRQ_REMAP 855 /* 856 * XT mode (32-bit APIC destination ID) requires 857 * GA mode (128-bit IRTE support) as a prerequisite. 858 */ 859 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) && 860 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 861 iommu_feature_enable(iommu, CONTROL_XT_EN); 862 #endif /* CONFIG_IRQ_REMAP */ 863 } 864 865 static void iommu_enable_gt(struct amd_iommu *iommu) 866 { 867 if (!iommu_feature(iommu, FEATURE_GT)) 868 return; 869 870 iommu_feature_enable(iommu, CONTROL_GT_EN); 871 } 872 873 /* sets a specific bit in the device table entry. */ 874 static void set_dev_entry_bit(u16 devid, u8 bit) 875 { 876 int i = (bit >> 6) & 0x03; 877 int _bit = bit & 0x3f; 878 879 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); 880 } 881 882 static int get_dev_entry_bit(u16 devid, u8 bit) 883 { 884 int i = (bit >> 6) & 0x03; 885 int _bit = bit & 0x3f; 886 887 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; 888 } 889 890 891 static bool copy_device_table(void) 892 { 893 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0; 894 struct dev_table_entry *old_devtb = NULL; 895 u32 lo, hi, devid, old_devtb_size; 896 phys_addr_t old_devtb_phys; 897 struct amd_iommu *iommu; 898 u16 dom_id, dte_v, irq_v; 899 gfp_t gfp_flag; 900 u64 tmp; 901 902 if (!amd_iommu_pre_enabled) 903 return false; 904 905 pr_warn("Translation is already enabled - trying to copy translation structures\n"); 906 for_each_iommu(iommu) { 907 /* All IOMMUs should use the same device table with the same size */ 908 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); 909 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); 910 entry = (((u64) hi) << 32) + lo; 911 if (last_entry && last_entry != entry) { 912 pr_err("IOMMU:%d should use the same dev table as others!\n", 913 iommu->index); 914 return false; 915 } 916 last_entry = entry; 917 918 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; 919 if (old_devtb_size != dev_table_size) { 920 pr_err("The device table size of IOMMU:%d is not expected!\n", 921 iommu->index); 922 return false; 923 } 924 } 925 926 /* 927 * When SME is enabled in the first kernel, the entry includes the 928 * memory encryption mask(sme_me_mask), we must remove the memory 929 * encryption mask to obtain the true physical address in kdump kernel. 930 */ 931 old_devtb_phys = __sme_clr(entry) & PAGE_MASK; 932 933 if (old_devtb_phys >= 0x100000000ULL) { 934 pr_err("The address of old device table is above 4G, not trustworthy!\n"); 935 return false; 936 } 937 old_devtb = (sme_active() && is_kdump_kernel()) 938 ? (__force void *)ioremap_encrypted(old_devtb_phys, 939 dev_table_size) 940 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); 941 942 if (!old_devtb) 943 return false; 944 945 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32; 946 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, 947 get_order(dev_table_size)); 948 if (old_dev_tbl_cpy == NULL) { 949 pr_err("Failed to allocate memory for copying old device table!\n"); 950 return false; 951 } 952 953 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 954 old_dev_tbl_cpy[devid] = old_devtb[devid]; 955 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; 956 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; 957 958 if (dte_v && dom_id) { 959 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; 960 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; 961 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap); 962 /* If gcr3 table existed, mask it out */ 963 if (old_devtb[devid].data[0] & DTE_FLAG_GV) { 964 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; 965 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; 966 old_dev_tbl_cpy[devid].data[1] &= ~tmp; 967 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A; 968 tmp |= DTE_FLAG_GV; 969 old_dev_tbl_cpy[devid].data[0] &= ~tmp; 970 } 971 } 972 973 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; 974 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; 975 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK; 976 if (irq_v && (int_ctl || int_tab_len)) { 977 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || 978 (int_tab_len != DTE_IRQ_TABLE_LEN)) { 979 pr_err("Wrong old irq remapping flag: %#x\n", devid); 980 return false; 981 } 982 983 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; 984 } 985 } 986 memunmap(old_devtb); 987 988 return true; 989 } 990 991 void amd_iommu_apply_erratum_63(u16 devid) 992 { 993 int sysmgt; 994 995 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | 996 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); 997 998 if (sysmgt == 0x01) 999 set_dev_entry_bit(devid, DEV_ENTRY_IW); 1000 } 1001 1002 /* Writes the specific IOMMU for a device into the rlookup table */ 1003 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) 1004 { 1005 amd_iommu_rlookup_table[devid] = iommu; 1006 } 1007 1008 /* 1009 * This function takes the device specific flags read from the ACPI 1010 * table and sets up the device table entry with that information 1011 */ 1012 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, 1013 u16 devid, u32 flags, u32 ext_flags) 1014 { 1015 if (flags & ACPI_DEVFLAG_INITPASS) 1016 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); 1017 if (flags & ACPI_DEVFLAG_EXTINT) 1018 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); 1019 if (flags & ACPI_DEVFLAG_NMI) 1020 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); 1021 if (flags & ACPI_DEVFLAG_SYSMGT1) 1022 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); 1023 if (flags & ACPI_DEVFLAG_SYSMGT2) 1024 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); 1025 if (flags & ACPI_DEVFLAG_LINT0) 1026 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); 1027 if (flags & ACPI_DEVFLAG_LINT1) 1028 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); 1029 1030 amd_iommu_apply_erratum_63(devid); 1031 1032 set_iommu_for_device(iommu, devid); 1033 } 1034 1035 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) 1036 { 1037 struct devid_map *entry; 1038 struct list_head *list; 1039 1040 if (type == IVHD_SPECIAL_IOAPIC) 1041 list = &ioapic_map; 1042 else if (type == IVHD_SPECIAL_HPET) 1043 list = &hpet_map; 1044 else 1045 return -EINVAL; 1046 1047 list_for_each_entry(entry, list, list) { 1048 if (!(entry->id == id && entry->cmd_line)) 1049 continue; 1050 1051 pr_info("Command-line override present for %s id %d - ignoring\n", 1052 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); 1053 1054 *devid = entry->devid; 1055 1056 return 0; 1057 } 1058 1059 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1060 if (!entry) 1061 return -ENOMEM; 1062 1063 entry->id = id; 1064 entry->devid = *devid; 1065 entry->cmd_line = cmd_line; 1066 1067 list_add_tail(&entry->list, list); 1068 1069 return 0; 1070 } 1071 1072 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid, 1073 bool cmd_line) 1074 { 1075 struct acpihid_map_entry *entry; 1076 struct list_head *list = &acpihid_map; 1077 1078 list_for_each_entry(entry, list, list) { 1079 if (strcmp(entry->hid, hid) || 1080 (*uid && *entry->uid && strcmp(entry->uid, uid)) || 1081 !entry->cmd_line) 1082 continue; 1083 1084 pr_info("Command-line override for hid:%s uid:%s\n", 1085 hid, uid); 1086 *devid = entry->devid; 1087 return 0; 1088 } 1089 1090 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1091 if (!entry) 1092 return -ENOMEM; 1093 1094 memcpy(entry->uid, uid, strlen(uid)); 1095 memcpy(entry->hid, hid, strlen(hid)); 1096 entry->devid = *devid; 1097 entry->cmd_line = cmd_line; 1098 entry->root_devid = (entry->devid & (~0x7)); 1099 1100 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n", 1101 entry->cmd_line ? "cmd" : "ivrs", 1102 entry->hid, entry->uid, entry->root_devid); 1103 1104 list_add_tail(&entry->list, list); 1105 return 0; 1106 } 1107 1108 static int __init add_early_maps(void) 1109 { 1110 int i, ret; 1111 1112 for (i = 0; i < early_ioapic_map_size; ++i) { 1113 ret = add_special_device(IVHD_SPECIAL_IOAPIC, 1114 early_ioapic_map[i].id, 1115 &early_ioapic_map[i].devid, 1116 early_ioapic_map[i].cmd_line); 1117 if (ret) 1118 return ret; 1119 } 1120 1121 for (i = 0; i < early_hpet_map_size; ++i) { 1122 ret = add_special_device(IVHD_SPECIAL_HPET, 1123 early_hpet_map[i].id, 1124 &early_hpet_map[i].devid, 1125 early_hpet_map[i].cmd_line); 1126 if (ret) 1127 return ret; 1128 } 1129 1130 for (i = 0; i < early_acpihid_map_size; ++i) { 1131 ret = add_acpi_hid_device(early_acpihid_map[i].hid, 1132 early_acpihid_map[i].uid, 1133 &early_acpihid_map[i].devid, 1134 early_acpihid_map[i].cmd_line); 1135 if (ret) 1136 return ret; 1137 } 1138 1139 return 0; 1140 } 1141 1142 /* 1143 * Reads the device exclusion range from ACPI and initializes the IOMMU with 1144 * it 1145 */ 1146 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) 1147 { 1148 if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) 1149 return; 1150 1151 /* 1152 * Treat per-device exclusion ranges as r/w unity-mapped regions 1153 * since some buggy BIOSes might lead to the overwritten exclusion 1154 * range (exclusion_start and exclusion_length members). This 1155 * happens when there are multiple exclusion ranges (IVMD entries) 1156 * defined in ACPI table. 1157 */ 1158 m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP); 1159 } 1160 1161 /* 1162 * Takes a pointer to an AMD IOMMU entry in the ACPI table and 1163 * initializes the hardware and our data structures with it. 1164 */ 1165 static int __init init_iommu_from_acpi(struct amd_iommu *iommu, 1166 struct ivhd_header *h) 1167 { 1168 u8 *p = (u8 *)h; 1169 u8 *end = p, flags = 0; 1170 u16 devid = 0, devid_start = 0, devid_to = 0; 1171 u32 dev_i, ext_flags = 0; 1172 bool alias = false; 1173 struct ivhd_entry *e; 1174 u32 ivhd_size; 1175 int ret; 1176 1177 1178 ret = add_early_maps(); 1179 if (ret) 1180 return ret; 1181 1182 amd_iommu_apply_ivrs_quirks(); 1183 1184 /* 1185 * First save the recommended feature enable bits from ACPI 1186 */ 1187 iommu->acpi_flags = h->flags; 1188 1189 /* 1190 * Done. Now parse the device entries 1191 */ 1192 ivhd_size = get_ivhd_header_size(h); 1193 if (!ivhd_size) { 1194 pr_err("Unsupported IVHD type %#x\n", h->type); 1195 return -EINVAL; 1196 } 1197 1198 p += ivhd_size; 1199 1200 end += h->length; 1201 1202 1203 while (p < end) { 1204 e = (struct ivhd_entry *)p; 1205 switch (e->type) { 1206 case IVHD_DEV_ALL: 1207 1208 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags); 1209 1210 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i) 1211 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); 1212 break; 1213 case IVHD_DEV_SELECT: 1214 1215 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " 1216 "flags: %02x\n", 1217 PCI_BUS_NUM(e->devid), 1218 PCI_SLOT(e->devid), 1219 PCI_FUNC(e->devid), 1220 e->flags); 1221 1222 devid = e->devid; 1223 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1224 break; 1225 case IVHD_DEV_SELECT_RANGE_START: 1226 1227 DUMP_printk(" DEV_SELECT_RANGE_START\t " 1228 "devid: %02x:%02x.%x flags: %02x\n", 1229 PCI_BUS_NUM(e->devid), 1230 PCI_SLOT(e->devid), 1231 PCI_FUNC(e->devid), 1232 e->flags); 1233 1234 devid_start = e->devid; 1235 flags = e->flags; 1236 ext_flags = 0; 1237 alias = false; 1238 break; 1239 case IVHD_DEV_ALIAS: 1240 1241 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " 1242 "flags: %02x devid_to: %02x:%02x.%x\n", 1243 PCI_BUS_NUM(e->devid), 1244 PCI_SLOT(e->devid), 1245 PCI_FUNC(e->devid), 1246 e->flags, 1247 PCI_BUS_NUM(e->ext >> 8), 1248 PCI_SLOT(e->ext >> 8), 1249 PCI_FUNC(e->ext >> 8)); 1250 1251 devid = e->devid; 1252 devid_to = e->ext >> 8; 1253 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); 1254 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); 1255 amd_iommu_alias_table[devid] = devid_to; 1256 break; 1257 case IVHD_DEV_ALIAS_RANGE: 1258 1259 DUMP_printk(" DEV_ALIAS_RANGE\t\t " 1260 "devid: %02x:%02x.%x flags: %02x " 1261 "devid_to: %02x:%02x.%x\n", 1262 PCI_BUS_NUM(e->devid), 1263 PCI_SLOT(e->devid), 1264 PCI_FUNC(e->devid), 1265 e->flags, 1266 PCI_BUS_NUM(e->ext >> 8), 1267 PCI_SLOT(e->ext >> 8), 1268 PCI_FUNC(e->ext >> 8)); 1269 1270 devid_start = e->devid; 1271 flags = e->flags; 1272 devid_to = e->ext >> 8; 1273 ext_flags = 0; 1274 alias = true; 1275 break; 1276 case IVHD_DEV_EXT_SELECT: 1277 1278 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " 1279 "flags: %02x ext: %08x\n", 1280 PCI_BUS_NUM(e->devid), 1281 PCI_SLOT(e->devid), 1282 PCI_FUNC(e->devid), 1283 e->flags, e->ext); 1284 1285 devid = e->devid; 1286 set_dev_entry_from_acpi(iommu, devid, e->flags, 1287 e->ext); 1288 break; 1289 case IVHD_DEV_EXT_SELECT_RANGE: 1290 1291 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " 1292 "%02x:%02x.%x flags: %02x ext: %08x\n", 1293 PCI_BUS_NUM(e->devid), 1294 PCI_SLOT(e->devid), 1295 PCI_FUNC(e->devid), 1296 e->flags, e->ext); 1297 1298 devid_start = e->devid; 1299 flags = e->flags; 1300 ext_flags = e->ext; 1301 alias = false; 1302 break; 1303 case IVHD_DEV_RANGE_END: 1304 1305 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", 1306 PCI_BUS_NUM(e->devid), 1307 PCI_SLOT(e->devid), 1308 PCI_FUNC(e->devid)); 1309 1310 devid = e->devid; 1311 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { 1312 if (alias) { 1313 amd_iommu_alias_table[dev_i] = devid_to; 1314 set_dev_entry_from_acpi(iommu, 1315 devid_to, flags, ext_flags); 1316 } 1317 set_dev_entry_from_acpi(iommu, dev_i, 1318 flags, ext_flags); 1319 } 1320 break; 1321 case IVHD_DEV_SPECIAL: { 1322 u8 handle, type; 1323 const char *var; 1324 u16 devid; 1325 int ret; 1326 1327 handle = e->ext & 0xff; 1328 devid = (e->ext >> 8) & 0xffff; 1329 type = (e->ext >> 24) & 0xff; 1330 1331 if (type == IVHD_SPECIAL_IOAPIC) 1332 var = "IOAPIC"; 1333 else if (type == IVHD_SPECIAL_HPET) 1334 var = "HPET"; 1335 else 1336 var = "UNKNOWN"; 1337 1338 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n", 1339 var, (int)handle, 1340 PCI_BUS_NUM(devid), 1341 PCI_SLOT(devid), 1342 PCI_FUNC(devid)); 1343 1344 ret = add_special_device(type, handle, &devid, false); 1345 if (ret) 1346 return ret; 1347 1348 /* 1349 * add_special_device might update the devid in case a 1350 * command-line override is present. So call 1351 * set_dev_entry_from_acpi after add_special_device. 1352 */ 1353 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1354 1355 break; 1356 } 1357 case IVHD_DEV_ACPI_HID: { 1358 u16 devid; 1359 u8 hid[ACPIHID_HID_LEN]; 1360 u8 uid[ACPIHID_UID_LEN]; 1361 int ret; 1362 1363 if (h->type != 0x40) { 1364 pr_err(FW_BUG "Invalid IVHD device type %#x\n", 1365 e->type); 1366 break; 1367 } 1368 1369 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1); 1370 hid[ACPIHID_HID_LEN - 1] = '\0'; 1371 1372 if (!(*hid)) { 1373 pr_err(FW_BUG "Invalid HID.\n"); 1374 break; 1375 } 1376 1377 uid[0] = '\0'; 1378 switch (e->uidf) { 1379 case UID_NOT_PRESENT: 1380 1381 if (e->uidl != 0) 1382 pr_warn(FW_BUG "Invalid UID length.\n"); 1383 1384 break; 1385 case UID_IS_INTEGER: 1386 1387 sprintf(uid, "%d", e->uid); 1388 1389 break; 1390 case UID_IS_CHARACTER: 1391 1392 memcpy(uid, &e->uid, e->uidl); 1393 uid[e->uidl] = '\0'; 1394 1395 break; 1396 default: 1397 break; 1398 } 1399 1400 devid = e->devid; 1401 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n", 1402 hid, uid, 1403 PCI_BUS_NUM(devid), 1404 PCI_SLOT(devid), 1405 PCI_FUNC(devid)); 1406 1407 flags = e->flags; 1408 1409 ret = add_acpi_hid_device(hid, uid, &devid, false); 1410 if (ret) 1411 return ret; 1412 1413 /* 1414 * add_special_device might update the devid in case a 1415 * command-line override is present. So call 1416 * set_dev_entry_from_acpi after add_special_device. 1417 */ 1418 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1419 1420 break; 1421 } 1422 default: 1423 break; 1424 } 1425 1426 p += ivhd_entry_length(p); 1427 } 1428 1429 return 0; 1430 } 1431 1432 static void __init free_iommu_one(struct amd_iommu *iommu) 1433 { 1434 free_cwwb_sem(iommu); 1435 free_command_buffer(iommu); 1436 free_event_buffer(iommu); 1437 free_ppr_log(iommu); 1438 free_ga_log(iommu); 1439 iommu_unmap_mmio_space(iommu); 1440 } 1441 1442 static void __init free_iommu_all(void) 1443 { 1444 struct amd_iommu *iommu, *next; 1445 1446 for_each_iommu_safe(iommu, next) { 1447 list_del(&iommu->list); 1448 free_iommu_one(iommu); 1449 kfree(iommu); 1450 } 1451 } 1452 1453 /* 1454 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) 1455 * Workaround: 1456 * BIOS should disable L2B micellaneous clock gating by setting 1457 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b 1458 */ 1459 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) 1460 { 1461 u32 value; 1462 1463 if ((boot_cpu_data.x86 != 0x15) || 1464 (boot_cpu_data.x86_model < 0x10) || 1465 (boot_cpu_data.x86_model > 0x1f)) 1466 return; 1467 1468 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1469 pci_read_config_dword(iommu->dev, 0xf4, &value); 1470 1471 if (value & BIT(2)) 1472 return; 1473 1474 /* Select NB indirect register 0x90 and enable writing */ 1475 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); 1476 1477 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); 1478 pci_info(iommu->dev, "Applying erratum 746 workaround\n"); 1479 1480 /* Clear the enable writing bit */ 1481 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1482 } 1483 1484 /* 1485 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission) 1486 * Workaround: 1487 * BIOS should enable ATS write permission check by setting 1488 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b 1489 */ 1490 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) 1491 { 1492 u32 value; 1493 1494 if ((boot_cpu_data.x86 != 0x15) || 1495 (boot_cpu_data.x86_model < 0x30) || 1496 (boot_cpu_data.x86_model > 0x3f)) 1497 return; 1498 1499 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */ 1500 value = iommu_read_l2(iommu, 0x47); 1501 1502 if (value & BIT(0)) 1503 return; 1504 1505 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */ 1506 iommu_write_l2(iommu, 0x47, value | BIT(0)); 1507 1508 pci_info(iommu->dev, "Applying ATS write check workaround\n"); 1509 } 1510 1511 /* 1512 * This function clues the initialization function for one IOMMU 1513 * together and also allocates the command buffer and programs the 1514 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1515 */ 1516 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) 1517 { 1518 int ret; 1519 1520 raw_spin_lock_init(&iommu->lock); 1521 iommu->cmd_sem_val = 0; 1522 1523 /* Add IOMMU to internal data structures */ 1524 list_add_tail(&iommu->list, &amd_iommu_list); 1525 iommu->index = amd_iommus_present++; 1526 1527 if (unlikely(iommu->index >= MAX_IOMMUS)) { 1528 WARN(1, "System has more IOMMUs than supported by this driver\n"); 1529 return -ENOSYS; 1530 } 1531 1532 /* Index is fine - add IOMMU to the array */ 1533 amd_iommus[iommu->index] = iommu; 1534 1535 /* 1536 * Copy data from ACPI table entry to the iommu struct 1537 */ 1538 iommu->devid = h->devid; 1539 iommu->cap_ptr = h->cap_ptr; 1540 iommu->pci_seg = h->pci_seg; 1541 iommu->mmio_phys = h->mmio_phys; 1542 1543 switch (h->type) { 1544 case 0x10: 1545 /* Check if IVHD EFR contains proper max banks/counters */ 1546 if ((h->efr_attr != 0) && 1547 ((h->efr_attr & (0xF << 13)) != 0) && 1548 ((h->efr_attr & (0x3F << 17)) != 0)) 1549 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1550 else 1551 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1552 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) 1553 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1554 break; 1555 case 0x11: 1556 case 0x40: 1557 if (h->efr_reg & (1 << 9)) 1558 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1559 else 1560 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1561 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) 1562 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1563 /* 1564 * Note: Since iommu_update_intcapxt() leverages 1565 * the IOMMU MMIO access to MSI capability block registers 1566 * for MSI address lo/hi/data, we need to check both 1567 * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support. 1568 */ 1569 if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) && 1570 (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT))) 1571 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; 1572 break; 1573 default: 1574 return -EINVAL; 1575 } 1576 1577 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, 1578 iommu->mmio_phys_end); 1579 if (!iommu->mmio_base) 1580 return -ENOMEM; 1581 1582 if (alloc_cwwb_sem(iommu)) 1583 return -ENOMEM; 1584 1585 if (alloc_command_buffer(iommu)) 1586 return -ENOMEM; 1587 1588 if (alloc_event_buffer(iommu)) 1589 return -ENOMEM; 1590 1591 iommu->int_enabled = false; 1592 1593 init_translation_status(iommu); 1594 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { 1595 iommu_disable(iommu); 1596 clear_translation_pre_enabled(iommu); 1597 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", 1598 iommu->index); 1599 } 1600 if (amd_iommu_pre_enabled) 1601 amd_iommu_pre_enabled = translation_pre_enabled(iommu); 1602 1603 ret = init_iommu_from_acpi(iommu, h); 1604 if (ret) 1605 return ret; 1606 1607 ret = amd_iommu_create_irq_domain(iommu); 1608 if (ret) 1609 return ret; 1610 1611 /* 1612 * Make sure IOMMU is not considered to translate itself. The IVRS 1613 * table tells us so, but this is a lie! 1614 */ 1615 amd_iommu_rlookup_table[iommu->devid] = NULL; 1616 1617 return 0; 1618 } 1619 1620 /** 1621 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type 1622 * @ivrs: Pointer to the IVRS header 1623 * 1624 * This function search through all IVDB of the maximum supported IVHD 1625 */ 1626 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs) 1627 { 1628 u8 *base = (u8 *)ivrs; 1629 struct ivhd_header *ivhd = (struct ivhd_header *) 1630 (base + IVRS_HEADER_LENGTH); 1631 u8 last_type = ivhd->type; 1632 u16 devid = ivhd->devid; 1633 1634 while (((u8 *)ivhd - base < ivrs->length) && 1635 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { 1636 u8 *p = (u8 *) ivhd; 1637 1638 if (ivhd->devid == devid) 1639 last_type = ivhd->type; 1640 ivhd = (struct ivhd_header *)(p + ivhd->length); 1641 } 1642 1643 return last_type; 1644 } 1645 1646 /* 1647 * Iterates over all IOMMU entries in the ACPI table, allocates the 1648 * IOMMU structure and initializes it with init_iommu_one() 1649 */ 1650 static int __init init_iommu_all(struct acpi_table_header *table) 1651 { 1652 u8 *p = (u8 *)table, *end = (u8 *)table; 1653 struct ivhd_header *h; 1654 struct amd_iommu *iommu; 1655 int ret; 1656 1657 end += table->length; 1658 p += IVRS_HEADER_LENGTH; 1659 1660 while (p < end) { 1661 h = (struct ivhd_header *)p; 1662 if (*p == amd_iommu_target_ivhd_type) { 1663 1664 DUMP_printk("device: %02x:%02x.%01x cap: %04x " 1665 "seg: %d flags: %01x info %04x\n", 1666 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), 1667 PCI_FUNC(h->devid), h->cap_ptr, 1668 h->pci_seg, h->flags, h->info); 1669 DUMP_printk(" mmio-addr: %016llx\n", 1670 h->mmio_phys); 1671 1672 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 1673 if (iommu == NULL) 1674 return -ENOMEM; 1675 1676 ret = init_iommu_one(iommu, h); 1677 if (ret) 1678 return ret; 1679 } 1680 p += h->length; 1681 1682 } 1683 WARN_ON(p != end); 1684 1685 return 0; 1686 } 1687 1688 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 1689 u8 fxn, u64 *value, bool is_write); 1690 1691 static void init_iommu_perf_ctr(struct amd_iommu *iommu) 1692 { 1693 struct pci_dev *pdev = iommu->dev; 1694 u64 val = 0xabcd, val2 = 0, save_reg = 0; 1695 1696 if (!iommu_feature(iommu, FEATURE_PC)) 1697 return; 1698 1699 amd_iommu_pc_present = true; 1700 1701 /* save the value to restore, if writable */ 1702 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false)) 1703 goto pc_false; 1704 1705 /* Check if the performance counters can be written to */ 1706 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || 1707 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || 1708 (val != val2)) 1709 goto pc_false; 1710 1711 /* restore */ 1712 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true)) 1713 goto pc_false; 1714 1715 pci_info(pdev, "IOMMU performance counters supported\n"); 1716 1717 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); 1718 iommu->max_banks = (u8) ((val >> 12) & 0x3f); 1719 iommu->max_counters = (u8) ((val >> 7) & 0xf); 1720 1721 return; 1722 1723 pc_false: 1724 pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n"); 1725 amd_iommu_pc_present = false; 1726 return; 1727 } 1728 1729 static ssize_t amd_iommu_show_cap(struct device *dev, 1730 struct device_attribute *attr, 1731 char *buf) 1732 { 1733 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1734 return sprintf(buf, "%x\n", iommu->cap); 1735 } 1736 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); 1737 1738 static ssize_t amd_iommu_show_features(struct device *dev, 1739 struct device_attribute *attr, 1740 char *buf) 1741 { 1742 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1743 return sprintf(buf, "%llx\n", iommu->features); 1744 } 1745 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); 1746 1747 static struct attribute *amd_iommu_attrs[] = { 1748 &dev_attr_cap.attr, 1749 &dev_attr_features.attr, 1750 NULL, 1751 }; 1752 1753 static struct attribute_group amd_iommu_group = { 1754 .name = "amd-iommu", 1755 .attrs = amd_iommu_attrs, 1756 }; 1757 1758 static const struct attribute_group *amd_iommu_groups[] = { 1759 &amd_iommu_group, 1760 NULL, 1761 }; 1762 1763 static int __init iommu_init_pci(struct amd_iommu *iommu) 1764 { 1765 int cap_ptr = iommu->cap_ptr; 1766 int ret; 1767 1768 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid), 1769 iommu->devid & 0xff); 1770 if (!iommu->dev) 1771 return -ENODEV; 1772 1773 /* Prevent binding other PCI device drivers to IOMMU devices */ 1774 iommu->dev->match_driver = false; 1775 1776 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 1777 &iommu->cap); 1778 1779 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) 1780 amd_iommu_iotlb_sup = false; 1781 1782 /* read extended feature bits */ 1783 iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); 1784 1785 if (iommu_feature(iommu, FEATURE_GT)) { 1786 int glxval; 1787 u32 max_pasid; 1788 u64 pasmax; 1789 1790 pasmax = iommu->features & FEATURE_PASID_MASK; 1791 pasmax >>= FEATURE_PASID_SHIFT; 1792 max_pasid = (1 << (pasmax + 1)) - 1; 1793 1794 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); 1795 1796 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); 1797 1798 glxval = iommu->features & FEATURE_GLXVAL_MASK; 1799 glxval >>= FEATURE_GLXVAL_SHIFT; 1800 1801 if (amd_iommu_max_glx_val == -1) 1802 amd_iommu_max_glx_val = glxval; 1803 else 1804 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); 1805 } 1806 1807 if (iommu_feature(iommu, FEATURE_GT) && 1808 iommu_feature(iommu, FEATURE_PPR)) { 1809 iommu->is_iommu_v2 = true; 1810 amd_iommu_v2_present = true; 1811 } 1812 1813 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) 1814 return -ENOMEM; 1815 1816 ret = iommu_init_ga(iommu); 1817 if (ret) 1818 return ret; 1819 1820 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) 1821 amd_iommu_np_cache = true; 1822 1823 init_iommu_perf_ctr(iommu); 1824 1825 if (is_rd890_iommu(iommu->dev)) { 1826 int i, j; 1827 1828 iommu->root_pdev = 1829 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number, 1830 PCI_DEVFN(0, 0)); 1831 1832 /* 1833 * Some rd890 systems may not be fully reconfigured by the 1834 * BIOS, so it's necessary for us to store this information so 1835 * it can be reprogrammed on resume 1836 */ 1837 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, 1838 &iommu->stored_addr_lo); 1839 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, 1840 &iommu->stored_addr_hi); 1841 1842 /* Low bit locks writes to configuration space */ 1843 iommu->stored_addr_lo &= ~1; 1844 1845 for (i = 0; i < 6; i++) 1846 for (j = 0; j < 0x12; j++) 1847 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); 1848 1849 for (i = 0; i < 0x83; i++) 1850 iommu->stored_l2[i] = iommu_read_l2(iommu, i); 1851 } 1852 1853 amd_iommu_erratum_746_workaround(iommu); 1854 amd_iommu_ats_write_check_workaround(iommu); 1855 1856 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, 1857 amd_iommu_groups, "ivhd%d", iommu->index); 1858 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops); 1859 iommu_device_register(&iommu->iommu); 1860 1861 return pci_enable_device(iommu->dev); 1862 } 1863 1864 static void print_iommu_info(void) 1865 { 1866 static const char * const feat_str[] = { 1867 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", 1868 "IA", "GA", "HE", "PC" 1869 }; 1870 struct amd_iommu *iommu; 1871 1872 for_each_iommu(iommu) { 1873 struct pci_dev *pdev = iommu->dev; 1874 int i; 1875 1876 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr); 1877 1878 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { 1879 pci_info(pdev, "Extended features (%#llx):", 1880 iommu->features); 1881 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { 1882 if (iommu_feature(iommu, (1ULL << i))) 1883 pr_cont(" %s", feat_str[i]); 1884 } 1885 1886 if (iommu->features & FEATURE_GAM_VAPIC) 1887 pr_cont(" GA_vAPIC"); 1888 1889 pr_cont("\n"); 1890 } 1891 } 1892 if (irq_remapping_enabled) { 1893 pr_info("Interrupt remapping enabled\n"); 1894 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 1895 pr_info("Virtual APIC enabled\n"); 1896 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 1897 pr_info("X2APIC enabled\n"); 1898 } 1899 } 1900 1901 static int __init amd_iommu_init_pci(void) 1902 { 1903 struct amd_iommu *iommu; 1904 int ret = 0; 1905 1906 for_each_iommu(iommu) { 1907 ret = iommu_init_pci(iommu); 1908 if (ret) 1909 break; 1910 1911 /* Need to setup range after PCI init */ 1912 iommu_set_cwwb_range(iommu); 1913 } 1914 1915 /* 1916 * Order is important here to make sure any unity map requirements are 1917 * fulfilled. The unity mappings are created and written to the device 1918 * table during the amd_iommu_init_api() call. 1919 * 1920 * After that we call init_device_table_dma() to make sure any 1921 * uninitialized DTE will block DMA, and in the end we flush the caches 1922 * of all IOMMUs to make sure the changes to the device table are 1923 * active. 1924 */ 1925 ret = amd_iommu_init_api(); 1926 1927 init_device_table_dma(); 1928 1929 for_each_iommu(iommu) 1930 iommu_flush_all_caches(iommu); 1931 1932 if (!ret) 1933 print_iommu_info(); 1934 1935 return ret; 1936 } 1937 1938 /**************************************************************************** 1939 * 1940 * The following functions initialize the MSI interrupts for all IOMMUs 1941 * in the system. It's a bit challenging because there could be multiple 1942 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per 1943 * pci_dev. 1944 * 1945 ****************************************************************************/ 1946 1947 static int iommu_setup_msi(struct amd_iommu *iommu) 1948 { 1949 int r; 1950 1951 r = pci_enable_msi(iommu->dev); 1952 if (r) 1953 return r; 1954 1955 r = request_threaded_irq(iommu->dev->irq, 1956 amd_iommu_int_handler, 1957 amd_iommu_int_thread, 1958 0, "AMD-Vi", 1959 iommu); 1960 1961 if (r) { 1962 pci_disable_msi(iommu->dev); 1963 return r; 1964 } 1965 1966 iommu->int_enabled = true; 1967 1968 return 0; 1969 } 1970 1971 #define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2) 1972 #define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8) 1973 #define XT_INT_VEC(x) (((x) & 0xFFULL) << 32) 1974 #define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56) 1975 1976 /* 1977 * Setup the IntCapXT registers with interrupt routing information 1978 * based on the PCI MSI capability block registers, accessed via 1979 * MMIO MSI address low/hi and MSI data registers. 1980 */ 1981 static void iommu_update_intcapxt(struct amd_iommu *iommu) 1982 { 1983 u64 val; 1984 u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET); 1985 u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET); 1986 u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET); 1987 bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; 1988 u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF); 1989 1990 if (x2apic_enabled()) 1991 dest |= MSI_ADDR_EXT_DEST_ID(addr_hi); 1992 1993 val = XT_INT_VEC(data & 0xFF) | 1994 XT_INT_DEST_MODE(dm) | 1995 XT_INT_DEST_LO(dest) | 1996 XT_INT_DEST_HI(dest); 1997 1998 /** 1999 * Current IOMMU implemtation uses the same IRQ for all 2000 * 3 IOMMU interrupts. 2001 */ 2002 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); 2003 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); 2004 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); 2005 } 2006 2007 static void _irq_notifier_notify(struct irq_affinity_notify *notify, 2008 const cpumask_t *mask) 2009 { 2010 struct amd_iommu *iommu; 2011 2012 for_each_iommu(iommu) { 2013 if (iommu->dev->irq == notify->irq) { 2014 iommu_update_intcapxt(iommu); 2015 break; 2016 } 2017 } 2018 } 2019 2020 static void _irq_notifier_release(struct kref *ref) 2021 { 2022 } 2023 2024 static int iommu_init_intcapxt(struct amd_iommu *iommu) 2025 { 2026 int ret; 2027 struct irq_affinity_notify *notify = &iommu->intcapxt_notify; 2028 2029 /** 2030 * IntCapXT requires XTSup=1 and MsiCapMmioSup=1, 2031 * which can be inferred from amd_iommu_xt_mode. 2032 */ 2033 if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE) 2034 return 0; 2035 2036 /** 2037 * Also, we need to setup notifier to update the IntCapXT registers 2038 * whenever the irq affinity is changed from user-space. 2039 */ 2040 notify->irq = iommu->dev->irq; 2041 notify->notify = _irq_notifier_notify, 2042 notify->release = _irq_notifier_release, 2043 ret = irq_set_affinity_notifier(iommu->dev->irq, notify); 2044 if (ret) { 2045 pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n", 2046 iommu->devid, iommu->dev->irq); 2047 return ret; 2048 } 2049 2050 iommu_update_intcapxt(iommu); 2051 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); 2052 return ret; 2053 } 2054 2055 static int iommu_init_msi(struct amd_iommu *iommu) 2056 { 2057 int ret; 2058 2059 if (iommu->int_enabled) 2060 goto enable_faults; 2061 2062 if (iommu->dev->msi_cap) 2063 ret = iommu_setup_msi(iommu); 2064 else 2065 ret = -ENODEV; 2066 2067 if (ret) 2068 return ret; 2069 2070 enable_faults: 2071 ret = iommu_init_intcapxt(iommu); 2072 if (ret) 2073 return ret; 2074 2075 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 2076 2077 if (iommu->ppr_log != NULL) 2078 iommu_feature_enable(iommu, CONTROL_PPRINT_EN); 2079 2080 iommu_ga_log_enable(iommu); 2081 2082 return 0; 2083 } 2084 2085 /**************************************************************************** 2086 * 2087 * The next functions belong to the third pass of parsing the ACPI 2088 * table. In this last pass the memory mapping requirements are 2089 * gathered (like exclusion and unity mapping ranges). 2090 * 2091 ****************************************************************************/ 2092 2093 static void __init free_unity_maps(void) 2094 { 2095 struct unity_map_entry *entry, *next; 2096 2097 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { 2098 list_del(&entry->list); 2099 kfree(entry); 2100 } 2101 } 2102 2103 /* called when we find an exclusion range definition in ACPI */ 2104 static int __init init_exclusion_range(struct ivmd_header *m) 2105 { 2106 int i; 2107 2108 switch (m->type) { 2109 case ACPI_IVMD_TYPE: 2110 set_device_exclusion_range(m->devid, m); 2111 break; 2112 case ACPI_IVMD_TYPE_ALL: 2113 for (i = 0; i <= amd_iommu_last_bdf; ++i) 2114 set_device_exclusion_range(i, m); 2115 break; 2116 case ACPI_IVMD_TYPE_RANGE: 2117 for (i = m->devid; i <= m->aux; ++i) 2118 set_device_exclusion_range(i, m); 2119 break; 2120 default: 2121 break; 2122 } 2123 2124 return 0; 2125 } 2126 2127 /* called for unity map ACPI definition */ 2128 static int __init init_unity_map_range(struct ivmd_header *m) 2129 { 2130 struct unity_map_entry *e = NULL; 2131 char *s; 2132 2133 e = kzalloc(sizeof(*e), GFP_KERNEL); 2134 if (e == NULL) 2135 return -ENOMEM; 2136 2137 if (m->flags & IVMD_FLAG_EXCL_RANGE) 2138 init_exclusion_range(m); 2139 2140 switch (m->type) { 2141 default: 2142 kfree(e); 2143 return 0; 2144 case ACPI_IVMD_TYPE: 2145 s = "IVMD_TYPEi\t\t\t"; 2146 e->devid_start = e->devid_end = m->devid; 2147 break; 2148 case ACPI_IVMD_TYPE_ALL: 2149 s = "IVMD_TYPE_ALL\t\t"; 2150 e->devid_start = 0; 2151 e->devid_end = amd_iommu_last_bdf; 2152 break; 2153 case ACPI_IVMD_TYPE_RANGE: 2154 s = "IVMD_TYPE_RANGE\t\t"; 2155 e->devid_start = m->devid; 2156 e->devid_end = m->aux; 2157 break; 2158 } 2159 e->address_start = PAGE_ALIGN(m->range_start); 2160 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); 2161 e->prot = m->flags >> 1; 2162 2163 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" 2164 " range_start: %016llx range_end: %016llx flags: %x\n", s, 2165 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), 2166 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end), 2167 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), 2168 e->address_start, e->address_end, m->flags); 2169 2170 list_add_tail(&e->list, &amd_iommu_unity_map); 2171 2172 return 0; 2173 } 2174 2175 /* iterates over all memory definitions we find in the ACPI table */ 2176 static int __init init_memory_definitions(struct acpi_table_header *table) 2177 { 2178 u8 *p = (u8 *)table, *end = (u8 *)table; 2179 struct ivmd_header *m; 2180 2181 end += table->length; 2182 p += IVRS_HEADER_LENGTH; 2183 2184 while (p < end) { 2185 m = (struct ivmd_header *)p; 2186 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) 2187 init_unity_map_range(m); 2188 2189 p += m->length; 2190 } 2191 2192 return 0; 2193 } 2194 2195 /* 2196 * Init the device table to not allow DMA access for devices 2197 */ 2198 static void init_device_table_dma(void) 2199 { 2200 u32 devid; 2201 2202 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 2203 set_dev_entry_bit(devid, DEV_ENTRY_VALID); 2204 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); 2205 } 2206 } 2207 2208 static void __init uninit_device_table_dma(void) 2209 { 2210 u32 devid; 2211 2212 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 2213 amd_iommu_dev_table[devid].data[0] = 0ULL; 2214 amd_iommu_dev_table[devid].data[1] = 0ULL; 2215 } 2216 } 2217 2218 static void init_device_table(void) 2219 { 2220 u32 devid; 2221 2222 if (!amd_iommu_irq_remap) 2223 return; 2224 2225 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) 2226 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); 2227 } 2228 2229 static void iommu_init_flags(struct amd_iommu *iommu) 2230 { 2231 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 2232 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 2233 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 2234 2235 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? 2236 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 2237 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 2238 2239 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 2240 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 2241 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 2242 2243 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? 2244 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 2245 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 2246 2247 /* 2248 * make IOMMU memory accesses cache coherent 2249 */ 2250 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 2251 2252 /* Set IOTLB invalidation timeout to 1s */ 2253 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); 2254 } 2255 2256 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) 2257 { 2258 int i, j; 2259 u32 ioc_feature_control; 2260 struct pci_dev *pdev = iommu->root_pdev; 2261 2262 /* RD890 BIOSes may not have completely reconfigured the iommu */ 2263 if (!is_rd890_iommu(iommu->dev) || !pdev) 2264 return; 2265 2266 /* 2267 * First, we need to ensure that the iommu is enabled. This is 2268 * controlled by a register in the northbridge 2269 */ 2270 2271 /* Select Northbridge indirect register 0x75 and enable writing */ 2272 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); 2273 pci_read_config_dword(pdev, 0x64, &ioc_feature_control); 2274 2275 /* Enable the iommu */ 2276 if (!(ioc_feature_control & 0x1)) 2277 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); 2278 2279 /* Restore the iommu BAR */ 2280 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2281 iommu->stored_addr_lo); 2282 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, 2283 iommu->stored_addr_hi); 2284 2285 /* Restore the l1 indirect regs for each of the 6 l1s */ 2286 for (i = 0; i < 6; i++) 2287 for (j = 0; j < 0x12; j++) 2288 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); 2289 2290 /* Restore the l2 indirect regs */ 2291 for (i = 0; i < 0x83; i++) 2292 iommu_write_l2(iommu, i, iommu->stored_l2[i]); 2293 2294 /* Lock PCI setup registers */ 2295 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2296 iommu->stored_addr_lo | 1); 2297 } 2298 2299 static void iommu_enable_ga(struct amd_iommu *iommu) 2300 { 2301 #ifdef CONFIG_IRQ_REMAP 2302 switch (amd_iommu_guest_ir) { 2303 case AMD_IOMMU_GUEST_IR_VAPIC: 2304 iommu_feature_enable(iommu, CONTROL_GAM_EN); 2305 fallthrough; 2306 case AMD_IOMMU_GUEST_IR_LEGACY_GA: 2307 iommu_feature_enable(iommu, CONTROL_GA_EN); 2308 iommu->irte_ops = &irte_128_ops; 2309 break; 2310 default: 2311 iommu->irte_ops = &irte_32_ops; 2312 break; 2313 } 2314 #endif 2315 } 2316 2317 static void early_enable_iommu(struct amd_iommu *iommu) 2318 { 2319 iommu_disable(iommu); 2320 iommu_init_flags(iommu); 2321 iommu_set_device_table(iommu); 2322 iommu_enable_command_buffer(iommu); 2323 iommu_enable_event_buffer(iommu); 2324 iommu_set_exclusion_range(iommu); 2325 iommu_enable_ga(iommu); 2326 iommu_enable_xt(iommu); 2327 iommu_enable(iommu); 2328 iommu_flush_all_caches(iommu); 2329 } 2330 2331 /* 2332 * This function finally enables all IOMMUs found in the system after 2333 * they have been initialized. 2334 * 2335 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy 2336 * the old content of device table entries. Not this case or copy failed, 2337 * just continue as normal kernel does. 2338 */ 2339 static void early_enable_iommus(void) 2340 { 2341 struct amd_iommu *iommu; 2342 2343 2344 if (!copy_device_table()) { 2345 /* 2346 * If come here because of failure in copying device table from old 2347 * kernel with all IOMMUs enabled, print error message and try to 2348 * free allocated old_dev_tbl_cpy. 2349 */ 2350 if (amd_iommu_pre_enabled) 2351 pr_err("Failed to copy DEV table from previous kernel.\n"); 2352 if (old_dev_tbl_cpy != NULL) 2353 free_pages((unsigned long)old_dev_tbl_cpy, 2354 get_order(dev_table_size)); 2355 2356 for_each_iommu(iommu) { 2357 clear_translation_pre_enabled(iommu); 2358 early_enable_iommu(iommu); 2359 } 2360 } else { 2361 pr_info("Copied DEV table from previous kernel.\n"); 2362 free_pages((unsigned long)amd_iommu_dev_table, 2363 get_order(dev_table_size)); 2364 amd_iommu_dev_table = old_dev_tbl_cpy; 2365 for_each_iommu(iommu) { 2366 iommu_disable_command_buffer(iommu); 2367 iommu_disable_event_buffer(iommu); 2368 iommu_enable_command_buffer(iommu); 2369 iommu_enable_event_buffer(iommu); 2370 iommu_enable_ga(iommu); 2371 iommu_enable_xt(iommu); 2372 iommu_set_device_table(iommu); 2373 iommu_flush_all_caches(iommu); 2374 } 2375 } 2376 2377 #ifdef CONFIG_IRQ_REMAP 2378 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 2379 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); 2380 #endif 2381 } 2382 2383 static void enable_iommus_v2(void) 2384 { 2385 struct amd_iommu *iommu; 2386 2387 for_each_iommu(iommu) { 2388 iommu_enable_ppr_log(iommu); 2389 iommu_enable_gt(iommu); 2390 } 2391 } 2392 2393 static void enable_iommus(void) 2394 { 2395 early_enable_iommus(); 2396 2397 enable_iommus_v2(); 2398 } 2399 2400 static void disable_iommus(void) 2401 { 2402 struct amd_iommu *iommu; 2403 2404 for_each_iommu(iommu) 2405 iommu_disable(iommu); 2406 2407 #ifdef CONFIG_IRQ_REMAP 2408 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 2409 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP); 2410 #endif 2411 } 2412 2413 /* 2414 * Suspend/Resume support 2415 * disable suspend until real resume implemented 2416 */ 2417 2418 static void amd_iommu_resume(void) 2419 { 2420 struct amd_iommu *iommu; 2421 2422 for_each_iommu(iommu) 2423 iommu_apply_resume_quirks(iommu); 2424 2425 /* re-load the hardware */ 2426 enable_iommus(); 2427 2428 amd_iommu_enable_interrupts(); 2429 } 2430 2431 static int amd_iommu_suspend(void) 2432 { 2433 /* disable IOMMUs to go out of the way for BIOS */ 2434 disable_iommus(); 2435 2436 return 0; 2437 } 2438 2439 static struct syscore_ops amd_iommu_syscore_ops = { 2440 .suspend = amd_iommu_suspend, 2441 .resume = amd_iommu_resume, 2442 }; 2443 2444 static void __init free_iommu_resources(void) 2445 { 2446 kmemleak_free(irq_lookup_table); 2447 free_pages((unsigned long)irq_lookup_table, 2448 get_order(rlookup_table_size)); 2449 irq_lookup_table = NULL; 2450 2451 kmem_cache_destroy(amd_iommu_irq_cache); 2452 amd_iommu_irq_cache = NULL; 2453 2454 free_pages((unsigned long)amd_iommu_rlookup_table, 2455 get_order(rlookup_table_size)); 2456 amd_iommu_rlookup_table = NULL; 2457 2458 free_pages((unsigned long)amd_iommu_alias_table, 2459 get_order(alias_table_size)); 2460 amd_iommu_alias_table = NULL; 2461 2462 free_pages((unsigned long)amd_iommu_dev_table, 2463 get_order(dev_table_size)); 2464 amd_iommu_dev_table = NULL; 2465 2466 free_iommu_all(); 2467 } 2468 2469 /* SB IOAPIC is always on this device in AMD systems */ 2470 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) 2471 2472 static bool __init check_ioapic_information(void) 2473 { 2474 const char *fw_bug = FW_BUG; 2475 bool ret, has_sb_ioapic; 2476 int idx; 2477 2478 has_sb_ioapic = false; 2479 ret = false; 2480 2481 /* 2482 * If we have map overrides on the kernel command line the 2483 * messages in this function might not describe firmware bugs 2484 * anymore - so be careful 2485 */ 2486 if (cmdline_maps) 2487 fw_bug = ""; 2488 2489 for (idx = 0; idx < nr_ioapics; idx++) { 2490 int devid, id = mpc_ioapic_id(idx); 2491 2492 devid = get_ioapic_devid(id); 2493 if (devid < 0) { 2494 pr_err("%s: IOAPIC[%d] not in IVRS table\n", 2495 fw_bug, id); 2496 ret = false; 2497 } else if (devid == IOAPIC_SB_DEVID) { 2498 has_sb_ioapic = true; 2499 ret = true; 2500 } 2501 } 2502 2503 if (!has_sb_ioapic) { 2504 /* 2505 * We expect the SB IOAPIC to be listed in the IVRS 2506 * table. The system timer is connected to the SB IOAPIC 2507 * and if we don't have it in the list the system will 2508 * panic at boot time. This situation usually happens 2509 * when the BIOS is buggy and provides us the wrong 2510 * device id for the IOAPIC in the system. 2511 */ 2512 pr_err("%s: No southbridge IOAPIC found\n", fw_bug); 2513 } 2514 2515 if (!ret) 2516 pr_err("Disabling interrupt remapping\n"); 2517 2518 return ret; 2519 } 2520 2521 static void __init free_dma_resources(void) 2522 { 2523 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 2524 get_order(MAX_DOMAIN_ID/8)); 2525 amd_iommu_pd_alloc_bitmap = NULL; 2526 2527 free_unity_maps(); 2528 } 2529 2530 /* 2531 * This is the hardware init function for AMD IOMMU in the system. 2532 * This function is called either from amd_iommu_init or from the interrupt 2533 * remapping setup code. 2534 * 2535 * This function basically parses the ACPI table for AMD IOMMU (IVRS) 2536 * four times: 2537 * 2538 * 1 pass) Discover the most comprehensive IVHD type to use. 2539 * 2540 * 2 pass) Find the highest PCI device id the driver has to handle. 2541 * Upon this information the size of the data structures is 2542 * determined that needs to be allocated. 2543 * 2544 * 3 pass) Initialize the data structures just allocated with the 2545 * information in the ACPI table about available AMD IOMMUs 2546 * in the system. It also maps the PCI devices in the 2547 * system to specific IOMMUs 2548 * 2549 * 4 pass) After the basic data structures are allocated and 2550 * initialized we update them with information about memory 2551 * remapping requirements parsed out of the ACPI table in 2552 * this last pass. 2553 * 2554 * After everything is set up the IOMMUs are enabled and the necessary 2555 * hotplug and suspend notifiers are registered. 2556 */ 2557 static int __init early_amd_iommu_init(void) 2558 { 2559 struct acpi_table_header *ivrs_base; 2560 acpi_status status; 2561 int i, remap_cache_sz, ret = 0; 2562 u32 pci_id; 2563 2564 if (!amd_iommu_detected) 2565 return -ENODEV; 2566 2567 status = acpi_get_table("IVRS", 0, &ivrs_base); 2568 if (status == AE_NOT_FOUND) 2569 return -ENODEV; 2570 else if (ACPI_FAILURE(status)) { 2571 const char *err = acpi_format_exception(status); 2572 pr_err("IVRS table error: %s\n", err); 2573 return -EINVAL; 2574 } 2575 2576 /* 2577 * Validate checksum here so we don't need to do it when 2578 * we actually parse the table 2579 */ 2580 ret = check_ivrs_checksum(ivrs_base); 2581 if (ret) 2582 goto out; 2583 2584 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); 2585 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); 2586 2587 /* 2588 * First parse ACPI tables to find the largest Bus/Dev/Func 2589 * we need to handle. Upon this information the shared data 2590 * structures for the IOMMUs in the system will be allocated 2591 */ 2592 ret = find_last_devid_acpi(ivrs_base); 2593 if (ret) 2594 goto out; 2595 2596 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); 2597 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); 2598 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); 2599 2600 /* Device table - directly used by all IOMMUs */ 2601 ret = -ENOMEM; 2602 amd_iommu_dev_table = (void *)__get_free_pages( 2603 GFP_KERNEL | __GFP_ZERO | GFP_DMA32, 2604 get_order(dev_table_size)); 2605 if (amd_iommu_dev_table == NULL) 2606 goto out; 2607 2608 /* 2609 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the 2610 * IOMMU see for that device 2611 */ 2612 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, 2613 get_order(alias_table_size)); 2614 if (amd_iommu_alias_table == NULL) 2615 goto out; 2616 2617 /* IOMMU rlookup table - find the IOMMU for a specific device */ 2618 amd_iommu_rlookup_table = (void *)__get_free_pages( 2619 GFP_KERNEL | __GFP_ZERO, 2620 get_order(rlookup_table_size)); 2621 if (amd_iommu_rlookup_table == NULL) 2622 goto out; 2623 2624 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( 2625 GFP_KERNEL | __GFP_ZERO, 2626 get_order(MAX_DOMAIN_ID/8)); 2627 if (amd_iommu_pd_alloc_bitmap == NULL) 2628 goto out; 2629 2630 /* 2631 * let all alias entries point to itself 2632 */ 2633 for (i = 0; i <= amd_iommu_last_bdf; ++i) 2634 amd_iommu_alias_table[i] = i; 2635 2636 /* 2637 * never allocate domain 0 because its used as the non-allocated and 2638 * error value placeholder 2639 */ 2640 __set_bit(0, amd_iommu_pd_alloc_bitmap); 2641 2642 /* 2643 * now the data structures are allocated and basically initialized 2644 * start the real acpi table scan 2645 */ 2646 ret = init_iommu_all(ivrs_base); 2647 if (ret) 2648 goto out; 2649 2650 /* Disable IOMMU if there's Stoney Ridge graphics */ 2651 for (i = 0; i < 32; i++) { 2652 pci_id = read_pci_config(0, i, 0, 0); 2653 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { 2654 pr_info("Disable IOMMU on Stoney Ridge\n"); 2655 amd_iommu_disabled = true; 2656 break; 2657 } 2658 } 2659 2660 /* Disable any previously enabled IOMMUs */ 2661 if (!is_kdump_kernel() || amd_iommu_disabled) 2662 disable_iommus(); 2663 2664 if (amd_iommu_irq_remap) 2665 amd_iommu_irq_remap = check_ioapic_information(); 2666 2667 if (amd_iommu_irq_remap) { 2668 /* 2669 * Interrupt remapping enabled, create kmem_cache for the 2670 * remapping tables. 2671 */ 2672 ret = -ENOMEM; 2673 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) 2674 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32); 2675 else 2676 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2); 2677 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", 2678 remap_cache_sz, 2679 IRQ_TABLE_ALIGNMENT, 2680 0, NULL); 2681 if (!amd_iommu_irq_cache) 2682 goto out; 2683 2684 irq_lookup_table = (void *)__get_free_pages( 2685 GFP_KERNEL | __GFP_ZERO, 2686 get_order(rlookup_table_size)); 2687 kmemleak_alloc(irq_lookup_table, rlookup_table_size, 2688 1, GFP_KERNEL); 2689 if (!irq_lookup_table) 2690 goto out; 2691 } 2692 2693 ret = init_memory_definitions(ivrs_base); 2694 if (ret) 2695 goto out; 2696 2697 /* init the device table */ 2698 init_device_table(); 2699 2700 out: 2701 /* Don't leak any ACPI memory */ 2702 acpi_put_table(ivrs_base); 2703 ivrs_base = NULL; 2704 2705 return ret; 2706 } 2707 2708 static int amd_iommu_enable_interrupts(void) 2709 { 2710 struct amd_iommu *iommu; 2711 int ret = 0; 2712 2713 for_each_iommu(iommu) { 2714 ret = iommu_init_msi(iommu); 2715 if (ret) 2716 goto out; 2717 } 2718 2719 out: 2720 return ret; 2721 } 2722 2723 static bool detect_ivrs(void) 2724 { 2725 struct acpi_table_header *ivrs_base; 2726 acpi_status status; 2727 2728 status = acpi_get_table("IVRS", 0, &ivrs_base); 2729 if (status == AE_NOT_FOUND) 2730 return false; 2731 else if (ACPI_FAILURE(status)) { 2732 const char *err = acpi_format_exception(status); 2733 pr_err("IVRS table error: %s\n", err); 2734 return false; 2735 } 2736 2737 acpi_put_table(ivrs_base); 2738 2739 /* Make sure ACS will be enabled during PCI probe */ 2740 pci_request_acs(); 2741 2742 return true; 2743 } 2744 2745 /**************************************************************************** 2746 * 2747 * AMD IOMMU Initialization State Machine 2748 * 2749 ****************************************************************************/ 2750 2751 static int __init state_next(void) 2752 { 2753 int ret = 0; 2754 2755 switch (init_state) { 2756 case IOMMU_START_STATE: 2757 if (!detect_ivrs()) { 2758 init_state = IOMMU_NOT_FOUND; 2759 ret = -ENODEV; 2760 } else { 2761 init_state = IOMMU_IVRS_DETECTED; 2762 } 2763 break; 2764 case IOMMU_IVRS_DETECTED: 2765 ret = early_amd_iommu_init(); 2766 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; 2767 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { 2768 pr_info("AMD IOMMU disabled\n"); 2769 init_state = IOMMU_CMDLINE_DISABLED; 2770 ret = -EINVAL; 2771 } 2772 break; 2773 case IOMMU_ACPI_FINISHED: 2774 early_enable_iommus(); 2775 x86_platform.iommu_shutdown = disable_iommus; 2776 init_state = IOMMU_ENABLED; 2777 break; 2778 case IOMMU_ENABLED: 2779 register_syscore_ops(&amd_iommu_syscore_ops); 2780 ret = amd_iommu_init_pci(); 2781 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; 2782 enable_iommus_v2(); 2783 break; 2784 case IOMMU_PCI_INIT: 2785 ret = amd_iommu_enable_interrupts(); 2786 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 2787 break; 2788 case IOMMU_INTERRUPTS_EN: 2789 ret = amd_iommu_init_dma_ops(); 2790 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; 2791 break; 2792 case IOMMU_DMA_OPS: 2793 init_state = IOMMU_INITIALIZED; 2794 break; 2795 case IOMMU_INITIALIZED: 2796 /* Nothing to do */ 2797 break; 2798 case IOMMU_NOT_FOUND: 2799 case IOMMU_INIT_ERROR: 2800 case IOMMU_CMDLINE_DISABLED: 2801 /* Error states => do nothing */ 2802 ret = -EINVAL; 2803 break; 2804 default: 2805 /* Unknown state */ 2806 BUG(); 2807 } 2808 2809 if (ret) { 2810 free_dma_resources(); 2811 if (!irq_remapping_enabled) { 2812 disable_iommus(); 2813 free_iommu_resources(); 2814 } else { 2815 struct amd_iommu *iommu; 2816 2817 uninit_device_table_dma(); 2818 for_each_iommu(iommu) 2819 iommu_flush_all_caches(iommu); 2820 } 2821 } 2822 return ret; 2823 } 2824 2825 static int __init iommu_go_to_state(enum iommu_init_state state) 2826 { 2827 int ret = -EINVAL; 2828 2829 while (init_state != state) { 2830 if (init_state == IOMMU_NOT_FOUND || 2831 init_state == IOMMU_INIT_ERROR || 2832 init_state == IOMMU_CMDLINE_DISABLED) 2833 break; 2834 ret = state_next(); 2835 } 2836 2837 return ret; 2838 } 2839 2840 #ifdef CONFIG_IRQ_REMAP 2841 int __init amd_iommu_prepare(void) 2842 { 2843 int ret; 2844 2845 amd_iommu_irq_remap = true; 2846 2847 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); 2848 if (ret) 2849 return ret; 2850 return amd_iommu_irq_remap ? 0 : -ENODEV; 2851 } 2852 2853 int __init amd_iommu_enable(void) 2854 { 2855 int ret; 2856 2857 ret = iommu_go_to_state(IOMMU_ENABLED); 2858 if (ret) 2859 return ret; 2860 2861 irq_remapping_enabled = 1; 2862 return amd_iommu_xt_mode; 2863 } 2864 2865 void amd_iommu_disable(void) 2866 { 2867 amd_iommu_suspend(); 2868 } 2869 2870 int amd_iommu_reenable(int mode) 2871 { 2872 amd_iommu_resume(); 2873 2874 return 0; 2875 } 2876 2877 int __init amd_iommu_enable_faulting(void) 2878 { 2879 /* We enable MSI later when PCI is initialized */ 2880 return 0; 2881 } 2882 #endif 2883 2884 /* 2885 * This is the core init function for AMD IOMMU hardware in the system. 2886 * This function is called from the generic x86 DMA layer initialization 2887 * code. 2888 */ 2889 static int __init amd_iommu_init(void) 2890 { 2891 struct amd_iommu *iommu; 2892 int ret; 2893 2894 ret = iommu_go_to_state(IOMMU_INITIALIZED); 2895 #ifdef CONFIG_GART_IOMMU 2896 if (ret && list_empty(&amd_iommu_list)) { 2897 /* 2898 * We failed to initialize the AMD IOMMU - try fallback 2899 * to GART if possible. 2900 */ 2901 gart_iommu_init(); 2902 } 2903 #endif 2904 2905 for_each_iommu(iommu) 2906 amd_iommu_debugfs_setup(iommu); 2907 2908 return ret; 2909 } 2910 2911 static bool amd_iommu_sme_check(void) 2912 { 2913 if (!sme_active() || (boot_cpu_data.x86 != 0x17)) 2914 return true; 2915 2916 /* For Fam17h, a specific level of support is required */ 2917 if (boot_cpu_data.microcode >= 0x08001205) 2918 return true; 2919 2920 if ((boot_cpu_data.microcode >= 0x08001126) && 2921 (boot_cpu_data.microcode <= 0x080011ff)) 2922 return true; 2923 2924 pr_notice("IOMMU not currently supported when SME is active\n"); 2925 2926 return false; 2927 } 2928 2929 /**************************************************************************** 2930 * 2931 * Early detect code. This code runs at IOMMU detection time in the DMA 2932 * layer. It just looks if there is an IVRS ACPI table to detect AMD 2933 * IOMMUs 2934 * 2935 ****************************************************************************/ 2936 int __init amd_iommu_detect(void) 2937 { 2938 int ret; 2939 2940 if (no_iommu || (iommu_detected && !gart_iommu_aperture)) 2941 return -ENODEV; 2942 2943 if (!amd_iommu_sme_check()) 2944 return -ENODEV; 2945 2946 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); 2947 if (ret) 2948 return ret; 2949 2950 amd_iommu_detected = true; 2951 iommu_detected = 1; 2952 x86_init.iommu.iommu_init = amd_iommu_init; 2953 2954 return 1; 2955 } 2956 2957 /**************************************************************************** 2958 * 2959 * Parsing functions for the AMD IOMMU specific kernel command line 2960 * options. 2961 * 2962 ****************************************************************************/ 2963 2964 static int __init parse_amd_iommu_dump(char *str) 2965 { 2966 amd_iommu_dump = true; 2967 2968 return 1; 2969 } 2970 2971 static int __init parse_amd_iommu_intr(char *str) 2972 { 2973 for (; *str; ++str) { 2974 if (strncmp(str, "legacy", 6) == 0) { 2975 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 2976 break; 2977 } 2978 if (strncmp(str, "vapic", 5) == 0) { 2979 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 2980 break; 2981 } 2982 } 2983 return 1; 2984 } 2985 2986 static int __init parse_amd_iommu_options(char *str) 2987 { 2988 for (; *str; ++str) { 2989 if (strncmp(str, "fullflush", 9) == 0) 2990 amd_iommu_unmap_flush = true; 2991 if (strncmp(str, "off", 3) == 0) 2992 amd_iommu_disabled = true; 2993 if (strncmp(str, "force_isolation", 15) == 0) 2994 amd_iommu_force_isolation = true; 2995 } 2996 2997 return 1; 2998 } 2999 3000 static int __init parse_ivrs_ioapic(char *str) 3001 { 3002 unsigned int bus, dev, fn; 3003 int ret, id, i; 3004 u16 devid; 3005 3006 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 3007 3008 if (ret != 4) { 3009 pr_err("Invalid command line: ivrs_ioapic%s\n", str); 3010 return 1; 3011 } 3012 3013 if (early_ioapic_map_size == EARLY_MAP_SIZE) { 3014 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", 3015 str); 3016 return 1; 3017 } 3018 3019 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 3020 3021 cmdline_maps = true; 3022 i = early_ioapic_map_size++; 3023 early_ioapic_map[i].id = id; 3024 early_ioapic_map[i].devid = devid; 3025 early_ioapic_map[i].cmd_line = true; 3026 3027 return 1; 3028 } 3029 3030 static int __init parse_ivrs_hpet(char *str) 3031 { 3032 unsigned int bus, dev, fn; 3033 int ret, id, i; 3034 u16 devid; 3035 3036 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 3037 3038 if (ret != 4) { 3039 pr_err("Invalid command line: ivrs_hpet%s\n", str); 3040 return 1; 3041 } 3042 3043 if (early_hpet_map_size == EARLY_MAP_SIZE) { 3044 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", 3045 str); 3046 return 1; 3047 } 3048 3049 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 3050 3051 cmdline_maps = true; 3052 i = early_hpet_map_size++; 3053 early_hpet_map[i].id = id; 3054 early_hpet_map[i].devid = devid; 3055 early_hpet_map[i].cmd_line = true; 3056 3057 return 1; 3058 } 3059 3060 static int __init parse_ivrs_acpihid(char *str) 3061 { 3062 u32 bus, dev, fn; 3063 char *hid, *uid, *p; 3064 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0}; 3065 int ret, i; 3066 3067 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid); 3068 if (ret != 4) { 3069 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str); 3070 return 1; 3071 } 3072 3073 p = acpiid; 3074 hid = strsep(&p, ":"); 3075 uid = p; 3076 3077 if (!hid || !(*hid) || !uid) { 3078 pr_err("Invalid command line: hid or uid\n"); 3079 return 1; 3080 } 3081 3082 i = early_acpihid_map_size++; 3083 memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); 3084 memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); 3085 early_acpihid_map[i].devid = 3086 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 3087 early_acpihid_map[i].cmd_line = true; 3088 3089 return 1; 3090 } 3091 3092 __setup("amd_iommu_dump", parse_amd_iommu_dump); 3093 __setup("amd_iommu=", parse_amd_iommu_options); 3094 __setup("amd_iommu_intr=", parse_amd_iommu_intr); 3095 __setup("ivrs_ioapic", parse_ivrs_ioapic); 3096 __setup("ivrs_hpet", parse_ivrs_hpet); 3097 __setup("ivrs_acpihid", parse_ivrs_acpihid); 3098 3099 IOMMU_INIT_FINISH(amd_iommu_detect, 3100 gart_iommu_hole_init, 3101 NULL, 3102 NULL); 3103 3104 bool amd_iommu_v2_supported(void) 3105 { 3106 return amd_iommu_v2_present; 3107 } 3108 EXPORT_SYMBOL(amd_iommu_v2_supported); 3109 3110 struct amd_iommu *get_amd_iommu(unsigned int idx) 3111 { 3112 unsigned int i = 0; 3113 struct amd_iommu *iommu; 3114 3115 for_each_iommu(iommu) 3116 if (i++ == idx) 3117 return iommu; 3118 return NULL; 3119 } 3120 EXPORT_SYMBOL(get_amd_iommu); 3121 3122 /**************************************************************************** 3123 * 3124 * IOMMU EFR Performance Counter support functionality. This code allows 3125 * access to the IOMMU PC functionality. 3126 * 3127 ****************************************************************************/ 3128 3129 u8 amd_iommu_pc_get_max_banks(unsigned int idx) 3130 { 3131 struct amd_iommu *iommu = get_amd_iommu(idx); 3132 3133 if (iommu) 3134 return iommu->max_banks; 3135 3136 return 0; 3137 } 3138 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); 3139 3140 bool amd_iommu_pc_supported(void) 3141 { 3142 return amd_iommu_pc_present; 3143 } 3144 EXPORT_SYMBOL(amd_iommu_pc_supported); 3145 3146 u8 amd_iommu_pc_get_max_counters(unsigned int idx) 3147 { 3148 struct amd_iommu *iommu = get_amd_iommu(idx); 3149 3150 if (iommu) 3151 return iommu->max_counters; 3152 3153 return 0; 3154 } 3155 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); 3156 3157 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 3158 u8 fxn, u64 *value, bool is_write) 3159 { 3160 u32 offset; 3161 u32 max_offset_lim; 3162 3163 /* Make sure the IOMMU PC resource is available */ 3164 if (!amd_iommu_pc_present) 3165 return -ENODEV; 3166 3167 /* Check for valid iommu and pc register indexing */ 3168 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) 3169 return -ENODEV; 3170 3171 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn); 3172 3173 /* Limit the offset to the hw defined mmio region aperture */ 3174 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | 3175 (iommu->max_counters << 8) | 0x28); 3176 if ((offset < MMIO_CNTR_REG_OFFSET) || 3177 (offset > max_offset_lim)) 3178 return -EINVAL; 3179 3180 if (is_write) { 3181 u64 val = *value & GENMASK_ULL(47, 0); 3182 3183 writel((u32)val, iommu->mmio_base + offset); 3184 writel((val >> 32), iommu->mmio_base + offset + 4); 3185 } else { 3186 *value = readl(iommu->mmio_base + offset + 4); 3187 *value <<= 32; 3188 *value |= readl(iommu->mmio_base + offset); 3189 *value &= GENMASK_ULL(47, 0); 3190 } 3191 3192 return 0; 3193 } 3194 3195 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3196 { 3197 if (!iommu) 3198 return -EINVAL; 3199 3200 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); 3201 } 3202 EXPORT_SYMBOL(amd_iommu_pc_get_reg); 3203 3204 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3205 { 3206 if (!iommu) 3207 return -EINVAL; 3208 3209 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); 3210 } 3211 EXPORT_SYMBOL(amd_iommu_pc_set_reg); 3212