1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "AMD-Vi: " fmt 9 #define dev_fmt(fmt) pr_fmt(fmt) 10 11 #include <linux/pci.h> 12 #include <linux/acpi.h> 13 #include <linux/list.h> 14 #include <linux/bitmap.h> 15 #include <linux/slab.h> 16 #include <linux/syscore_ops.h> 17 #include <linux/interrupt.h> 18 #include <linux/msi.h> 19 #include <linux/irq.h> 20 #include <linux/amd-iommu.h> 21 #include <linux/export.h> 22 #include <linux/kmemleak.h> 23 #include <linux/cc_platform.h> 24 #include <linux/iopoll.h> 25 #include <asm/pci-direct.h> 26 #include <asm/iommu.h> 27 #include <asm/apic.h> 28 #include <asm/gart.h> 29 #include <asm/x86_init.h> 30 #include <asm/io_apic.h> 31 #include <asm/irq_remapping.h> 32 #include <asm/set_memory.h> 33 34 #include <linux/crash_dump.h> 35 36 #include "amd_iommu.h" 37 #include "../irq_remapping.h" 38 39 /* 40 * definitions for the ACPI scanning code 41 */ 42 #define IVRS_HEADER_LENGTH 48 43 44 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40 45 #define ACPI_IVMD_TYPE_ALL 0x20 46 #define ACPI_IVMD_TYPE 0x21 47 #define ACPI_IVMD_TYPE_RANGE 0x22 48 49 #define IVHD_DEV_ALL 0x01 50 #define IVHD_DEV_SELECT 0x02 51 #define IVHD_DEV_SELECT_RANGE_START 0x03 52 #define IVHD_DEV_RANGE_END 0x04 53 #define IVHD_DEV_ALIAS 0x42 54 #define IVHD_DEV_ALIAS_RANGE 0x43 55 #define IVHD_DEV_EXT_SELECT 0x46 56 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 57 #define IVHD_DEV_SPECIAL 0x48 58 #define IVHD_DEV_ACPI_HID 0xf0 59 60 #define UID_NOT_PRESENT 0 61 #define UID_IS_INTEGER 1 62 #define UID_IS_CHARACTER 2 63 64 #define IVHD_SPECIAL_IOAPIC 1 65 #define IVHD_SPECIAL_HPET 2 66 67 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 68 #define IVHD_FLAG_PASSPW_EN_MASK 0x02 69 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 70 #define IVHD_FLAG_ISOC_EN_MASK 0x08 71 72 #define IVMD_FLAG_EXCL_RANGE 0x08 73 #define IVMD_FLAG_IW 0x04 74 #define IVMD_FLAG_IR 0x02 75 #define IVMD_FLAG_UNITY_MAP 0x01 76 77 #define ACPI_DEVFLAG_INITPASS 0x01 78 #define ACPI_DEVFLAG_EXTINT 0x02 79 #define ACPI_DEVFLAG_NMI 0x04 80 #define ACPI_DEVFLAG_SYSMGT1 0x10 81 #define ACPI_DEVFLAG_SYSMGT2 0x20 82 #define ACPI_DEVFLAG_LINT0 0x40 83 #define ACPI_DEVFLAG_LINT1 0x80 84 #define ACPI_DEVFLAG_ATSDIS 0x10000000 85 86 #define LOOP_TIMEOUT 2000000 87 88 #define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \ 89 | ((dev & 0x1f) << 3) | (fn & 0x7)) 90 91 /* 92 * ACPI table definitions 93 * 94 * These data structures are laid over the table to parse the important values 95 * out of it. 96 */ 97 98 /* 99 * structure describing one IOMMU in the ACPI table. Typically followed by one 100 * or more ivhd_entrys. 101 */ 102 struct ivhd_header { 103 u8 type; 104 u8 flags; 105 u16 length; 106 u16 devid; 107 u16 cap_ptr; 108 u64 mmio_phys; 109 u16 pci_seg; 110 u16 info; 111 u32 efr_attr; 112 113 /* Following only valid on IVHD type 11h and 40h */ 114 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */ 115 u64 efr_reg2; 116 } __attribute__((packed)); 117 118 /* 119 * A device entry describing which devices a specific IOMMU translates and 120 * which requestor ids they use. 121 */ 122 struct ivhd_entry { 123 u8 type; 124 u16 devid; 125 u8 flags; 126 struct_group(ext_hid, 127 u32 ext; 128 u32 hidh; 129 ); 130 u64 cid; 131 u8 uidf; 132 u8 uidl; 133 u8 uid; 134 } __attribute__((packed)); 135 136 /* 137 * An AMD IOMMU memory definition structure. It defines things like exclusion 138 * ranges for devices and regions that should be unity mapped. 139 */ 140 struct ivmd_header { 141 u8 type; 142 u8 flags; 143 u16 length; 144 u16 devid; 145 u16 aux; 146 u16 pci_seg; 147 u8 resv[6]; 148 u64 range_start; 149 u64 range_length; 150 } __attribute__((packed)); 151 152 bool amd_iommu_dump; 153 bool amd_iommu_irq_remap __read_mostly; 154 155 enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1; 156 /* Guest page table level */ 157 int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL; 158 159 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 160 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; 161 162 static bool amd_iommu_detected; 163 static bool amd_iommu_disabled __initdata; 164 static bool amd_iommu_force_enable __initdata; 165 static bool amd_iommu_irtcachedis; 166 static int amd_iommu_target_ivhd_type; 167 168 /* Global EFR and EFR2 registers */ 169 u64 amd_iommu_efr; 170 u64 amd_iommu_efr2; 171 172 /* SNP is enabled on the system? */ 173 bool amd_iommu_snp_en; 174 EXPORT_SYMBOL(amd_iommu_snp_en); 175 176 LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */ 177 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 178 system */ 179 180 /* Array to assign indices to IOMMUs*/ 181 struct amd_iommu *amd_iommus[MAX_IOMMUS]; 182 183 /* Number of IOMMUs present in the system */ 184 static int amd_iommus_present; 185 186 /* IOMMUs have a non-present cache? */ 187 bool amd_iommu_np_cache __read_mostly; 188 bool amd_iommu_iotlb_sup __read_mostly = true; 189 190 u32 amd_iommu_max_pasid __read_mostly = ~0; 191 192 bool amd_iommu_v2_present __read_mostly; 193 static bool amd_iommu_pc_present __read_mostly; 194 bool amdr_ivrs_remap_support __read_mostly; 195 196 bool amd_iommu_force_isolation __read_mostly; 197 198 /* 199 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap 200 * to know which ones are already in use. 201 */ 202 unsigned long *amd_iommu_pd_alloc_bitmap; 203 204 enum iommu_init_state { 205 IOMMU_START_STATE, 206 IOMMU_IVRS_DETECTED, 207 IOMMU_ACPI_FINISHED, 208 IOMMU_ENABLED, 209 IOMMU_PCI_INIT, 210 IOMMU_INTERRUPTS_EN, 211 IOMMU_INITIALIZED, 212 IOMMU_NOT_FOUND, 213 IOMMU_INIT_ERROR, 214 IOMMU_CMDLINE_DISABLED, 215 }; 216 217 /* Early ioapic and hpet maps from kernel command line */ 218 #define EARLY_MAP_SIZE 4 219 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; 220 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; 221 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE]; 222 223 static int __initdata early_ioapic_map_size; 224 static int __initdata early_hpet_map_size; 225 static int __initdata early_acpihid_map_size; 226 227 static bool __initdata cmdline_maps; 228 229 static enum iommu_init_state init_state = IOMMU_START_STATE; 230 231 static int amd_iommu_enable_interrupts(void); 232 static int __init iommu_go_to_state(enum iommu_init_state state); 233 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg); 234 235 static bool amd_iommu_pre_enabled = true; 236 237 static u32 amd_iommu_ivinfo __initdata; 238 239 bool translation_pre_enabled(struct amd_iommu *iommu) 240 { 241 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); 242 } 243 244 static void clear_translation_pre_enabled(struct amd_iommu *iommu) 245 { 246 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 247 } 248 249 static void init_translation_status(struct amd_iommu *iommu) 250 { 251 u64 ctrl; 252 253 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 254 if (ctrl & (1<<CONTROL_IOMMU_EN)) 255 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 256 } 257 258 static inline unsigned long tbl_size(int entry_size, int last_bdf) 259 { 260 unsigned shift = PAGE_SHIFT + 261 get_order((last_bdf + 1) * entry_size); 262 263 return 1UL << shift; 264 } 265 266 int amd_iommu_get_num_iommus(void) 267 { 268 return amd_iommus_present; 269 } 270 271 /* 272 * Iterate through all the IOMMUs to get common EFR 273 * masks among all IOMMUs and warn if found inconsistency. 274 */ 275 static void get_global_efr(void) 276 { 277 struct amd_iommu *iommu; 278 279 for_each_iommu(iommu) { 280 u64 tmp = iommu->features; 281 u64 tmp2 = iommu->features2; 282 283 if (list_is_first(&iommu->list, &amd_iommu_list)) { 284 amd_iommu_efr = tmp; 285 amd_iommu_efr2 = tmp2; 286 continue; 287 } 288 289 if (amd_iommu_efr == tmp && 290 amd_iommu_efr2 == tmp2) 291 continue; 292 293 pr_err(FW_BUG 294 "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n", 295 tmp, tmp2, amd_iommu_efr, amd_iommu_efr2, 296 iommu->index, iommu->pci_seg->id, 297 PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid), 298 PCI_FUNC(iommu->devid)); 299 300 amd_iommu_efr &= tmp; 301 amd_iommu_efr2 &= tmp2; 302 } 303 304 pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2); 305 } 306 307 static bool check_feature_on_all_iommus(u64 mask) 308 { 309 return !!(amd_iommu_efr & mask); 310 } 311 312 static inline int check_feature_gpt_level(void) 313 { 314 return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK); 315 } 316 317 /* 318 * For IVHD type 0x11/0x40, EFR is also available via IVHD. 319 * Default to IVHD EFR since it is available sooner 320 * (i.e. before PCI init). 321 */ 322 static void __init early_iommu_features_init(struct amd_iommu *iommu, 323 struct ivhd_header *h) 324 { 325 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) { 326 iommu->features = h->efr_reg; 327 iommu->features2 = h->efr_reg2; 328 } 329 if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP) 330 amdr_ivrs_remap_support = true; 331 } 332 333 /* Access to l1 and l2 indexed register spaces */ 334 335 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) 336 { 337 u32 val; 338 339 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 340 pci_read_config_dword(iommu->dev, 0xfc, &val); 341 return val; 342 } 343 344 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) 345 { 346 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); 347 pci_write_config_dword(iommu->dev, 0xfc, val); 348 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 349 } 350 351 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) 352 { 353 u32 val; 354 355 pci_write_config_dword(iommu->dev, 0xf0, address); 356 pci_read_config_dword(iommu->dev, 0xf4, &val); 357 return val; 358 } 359 360 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) 361 { 362 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); 363 pci_write_config_dword(iommu->dev, 0xf4, val); 364 } 365 366 /**************************************************************************** 367 * 368 * AMD IOMMU MMIO register space handling functions 369 * 370 * These functions are used to program the IOMMU device registers in 371 * MMIO space required for that driver. 372 * 373 ****************************************************************************/ 374 375 /* 376 * This function set the exclusion range in the IOMMU. DMA accesses to the 377 * exclusion range are passed through untranslated 378 */ 379 static void iommu_set_exclusion_range(struct amd_iommu *iommu) 380 { 381 u64 start = iommu->exclusion_start & PAGE_MASK; 382 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; 383 u64 entry; 384 385 if (!iommu->exclusion_start) 386 return; 387 388 entry = start | MMIO_EXCL_ENABLE_MASK; 389 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 390 &entry, sizeof(entry)); 391 392 entry = limit; 393 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 394 &entry, sizeof(entry)); 395 } 396 397 static void iommu_set_cwwb_range(struct amd_iommu *iommu) 398 { 399 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); 400 u64 entry = start & PM_ADDR_MASK; 401 402 if (!check_feature_on_all_iommus(FEATURE_SNP)) 403 return; 404 405 /* Note: 406 * Re-purpose Exclusion base/limit registers for Completion wait 407 * write-back base/limit. 408 */ 409 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 410 &entry, sizeof(entry)); 411 412 /* Note: 413 * Default to 4 Kbytes, which can be specified by setting base 414 * address equal to the limit address. 415 */ 416 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 417 &entry, sizeof(entry)); 418 } 419 420 /* Programs the physical address of the device table into the IOMMU hardware */ 421 static void iommu_set_device_table(struct amd_iommu *iommu) 422 { 423 u64 entry; 424 u32 dev_table_size = iommu->pci_seg->dev_table_size; 425 void *dev_table = (void *)get_dev_table(iommu); 426 427 BUG_ON(iommu->mmio_base == NULL); 428 429 entry = iommu_virt_to_phys(dev_table); 430 entry |= (dev_table_size >> 12) - 1; 431 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, 432 &entry, sizeof(entry)); 433 } 434 435 /* Generic functions to enable/disable certain features of the IOMMU. */ 436 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) 437 { 438 u64 ctrl; 439 440 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 441 ctrl |= (1ULL << bit); 442 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 443 } 444 445 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) 446 { 447 u64 ctrl; 448 449 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 450 ctrl &= ~(1ULL << bit); 451 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 452 } 453 454 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) 455 { 456 u64 ctrl; 457 458 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 459 ctrl &= ~CTRL_INV_TO_MASK; 460 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; 461 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 462 } 463 464 /* Function to enable the hardware */ 465 static void iommu_enable(struct amd_iommu *iommu) 466 { 467 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 468 } 469 470 static void iommu_disable(struct amd_iommu *iommu) 471 { 472 if (!iommu->mmio_base) 473 return; 474 475 /* Disable command buffer */ 476 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 477 478 /* Disable event logging and event interrupts */ 479 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); 480 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 481 482 /* Disable IOMMU GA_LOG */ 483 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 484 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 485 486 /* Disable IOMMU hardware itself */ 487 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); 488 489 /* Clear IRTE cache disabling bit */ 490 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); 491 } 492 493 /* 494 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 495 * the system has one. 496 */ 497 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) 498 { 499 if (!request_mem_region(address, end, "amd_iommu")) { 500 pr_err("Can not reserve memory region %llx-%llx for mmio\n", 501 address, end); 502 pr_err("This is a BIOS bug. Please contact your hardware vendor\n"); 503 return NULL; 504 } 505 506 return (u8 __iomem *)ioremap(address, end); 507 } 508 509 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) 510 { 511 if (iommu->mmio_base) 512 iounmap(iommu->mmio_base); 513 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); 514 } 515 516 static inline u32 get_ivhd_header_size(struct ivhd_header *h) 517 { 518 u32 size = 0; 519 520 switch (h->type) { 521 case 0x10: 522 size = 24; 523 break; 524 case 0x11: 525 case 0x40: 526 size = 40; 527 break; 528 } 529 return size; 530 } 531 532 /**************************************************************************** 533 * 534 * The functions below belong to the first pass of AMD IOMMU ACPI table 535 * parsing. In this pass we try to find out the highest device id this 536 * code has to handle. Upon this information the size of the shared data 537 * structures is determined later. 538 * 539 ****************************************************************************/ 540 541 /* 542 * This function calculates the length of a given IVHD entry 543 */ 544 static inline int ivhd_entry_length(u8 *ivhd) 545 { 546 u32 type = ((struct ivhd_entry *)ivhd)->type; 547 548 if (type < 0x80) { 549 return 0x04 << (*ivhd >> 6); 550 } else if (type == IVHD_DEV_ACPI_HID) { 551 /* For ACPI_HID, offset 21 is uid len */ 552 return *((u8 *)ivhd + 21) + 22; 553 } 554 return 0; 555 } 556 557 /* 558 * After reading the highest device id from the IOMMU PCI capability header 559 * this function looks if there is a higher device id defined in the ACPI table 560 */ 561 static int __init find_last_devid_from_ivhd(struct ivhd_header *h) 562 { 563 u8 *p = (void *)h, *end = (void *)h; 564 struct ivhd_entry *dev; 565 int last_devid = -EINVAL; 566 567 u32 ivhd_size = get_ivhd_header_size(h); 568 569 if (!ivhd_size) { 570 pr_err("Unsupported IVHD type %#x\n", h->type); 571 return -EINVAL; 572 } 573 574 p += ivhd_size; 575 end += h->length; 576 577 while (p < end) { 578 dev = (struct ivhd_entry *)p; 579 switch (dev->type) { 580 case IVHD_DEV_ALL: 581 /* Use maximum BDF value for DEV_ALL */ 582 return 0xffff; 583 case IVHD_DEV_SELECT: 584 case IVHD_DEV_RANGE_END: 585 case IVHD_DEV_ALIAS: 586 case IVHD_DEV_EXT_SELECT: 587 /* all the above subfield types refer to device ids */ 588 if (dev->devid > last_devid) 589 last_devid = dev->devid; 590 break; 591 default: 592 break; 593 } 594 p += ivhd_entry_length(p); 595 } 596 597 WARN_ON(p != end); 598 599 return last_devid; 600 } 601 602 static int __init check_ivrs_checksum(struct acpi_table_header *table) 603 { 604 int i; 605 u8 checksum = 0, *p = (u8 *)table; 606 607 for (i = 0; i < table->length; ++i) 608 checksum += p[i]; 609 if (checksum != 0) { 610 /* ACPI table corrupt */ 611 pr_err(FW_BUG "IVRS invalid checksum\n"); 612 return -ENODEV; 613 } 614 615 return 0; 616 } 617 618 /* 619 * Iterate over all IVHD entries in the ACPI table and find the highest device 620 * id which we need to handle. This is the first of three functions which parse 621 * the ACPI table. So we check the checksum here. 622 */ 623 static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg) 624 { 625 u8 *p = (u8 *)table, *end = (u8 *)table; 626 struct ivhd_header *h; 627 int last_devid, last_bdf = 0; 628 629 p += IVRS_HEADER_LENGTH; 630 631 end += table->length; 632 while (p < end) { 633 h = (struct ivhd_header *)p; 634 if (h->pci_seg == pci_seg && 635 h->type == amd_iommu_target_ivhd_type) { 636 last_devid = find_last_devid_from_ivhd(h); 637 638 if (last_devid < 0) 639 return -EINVAL; 640 if (last_devid > last_bdf) 641 last_bdf = last_devid; 642 } 643 p += h->length; 644 } 645 WARN_ON(p != end); 646 647 return last_bdf; 648 } 649 650 /**************************************************************************** 651 * 652 * The following functions belong to the code path which parses the ACPI table 653 * the second time. In this ACPI parsing iteration we allocate IOMMU specific 654 * data structures, initialize the per PCI segment device/alias/rlookup table 655 * and also basically initialize the hardware. 656 * 657 ****************************************************************************/ 658 659 /* Allocate per PCI segment device table */ 660 static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg) 661 { 662 pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, 663 get_order(pci_seg->dev_table_size)); 664 if (!pci_seg->dev_table) 665 return -ENOMEM; 666 667 return 0; 668 } 669 670 static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg) 671 { 672 free_pages((unsigned long)pci_seg->dev_table, 673 get_order(pci_seg->dev_table_size)); 674 pci_seg->dev_table = NULL; 675 } 676 677 /* Allocate per PCI segment IOMMU rlookup table. */ 678 static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg) 679 { 680 pci_seg->rlookup_table = (void *)__get_free_pages( 681 GFP_KERNEL | __GFP_ZERO, 682 get_order(pci_seg->rlookup_table_size)); 683 if (pci_seg->rlookup_table == NULL) 684 return -ENOMEM; 685 686 return 0; 687 } 688 689 static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg) 690 { 691 free_pages((unsigned long)pci_seg->rlookup_table, 692 get_order(pci_seg->rlookup_table_size)); 693 pci_seg->rlookup_table = NULL; 694 } 695 696 static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) 697 { 698 pci_seg->irq_lookup_table = (void *)__get_free_pages( 699 GFP_KERNEL | __GFP_ZERO, 700 get_order(pci_seg->rlookup_table_size)); 701 kmemleak_alloc(pci_seg->irq_lookup_table, 702 pci_seg->rlookup_table_size, 1, GFP_KERNEL); 703 if (pci_seg->irq_lookup_table == NULL) 704 return -ENOMEM; 705 706 return 0; 707 } 708 709 static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) 710 { 711 kmemleak_free(pci_seg->irq_lookup_table); 712 free_pages((unsigned long)pci_seg->irq_lookup_table, 713 get_order(pci_seg->rlookup_table_size)); 714 pci_seg->irq_lookup_table = NULL; 715 } 716 717 static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg) 718 { 719 int i; 720 721 pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL, 722 get_order(pci_seg->alias_table_size)); 723 if (!pci_seg->alias_table) 724 return -ENOMEM; 725 726 /* 727 * let all alias entries point to itself 728 */ 729 for (i = 0; i <= pci_seg->last_bdf; ++i) 730 pci_seg->alias_table[i] = i; 731 732 return 0; 733 } 734 735 static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg) 736 { 737 free_pages((unsigned long)pci_seg->alias_table, 738 get_order(pci_seg->alias_table_size)); 739 pci_seg->alias_table = NULL; 740 } 741 742 /* 743 * Allocates the command buffer. This buffer is per AMD IOMMU. We can 744 * write commands to that buffer later and the IOMMU will execute them 745 * asynchronously 746 */ 747 static int __init alloc_command_buffer(struct amd_iommu *iommu) 748 { 749 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 750 get_order(CMD_BUFFER_SIZE)); 751 752 return iommu->cmd_buf ? 0 : -ENOMEM; 753 } 754 755 /* 756 * Interrupt handler has processed all pending events and adjusted head 757 * and tail pointer. Reset overflow mask and restart logging again. 758 */ 759 static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, 760 u8 cntrl_intr, u8 cntrl_log, 761 u32 status_run_mask, u32 status_overflow_mask) 762 { 763 u32 status; 764 765 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 766 if (status & status_run_mask) 767 return; 768 769 pr_info_ratelimited("IOMMU %s log restarting\n", evt_type); 770 771 iommu_feature_disable(iommu, cntrl_log); 772 iommu_feature_disable(iommu, cntrl_intr); 773 774 writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET); 775 776 iommu_feature_enable(iommu, cntrl_intr); 777 iommu_feature_enable(iommu, cntrl_log); 778 } 779 780 /* 781 * This function restarts event logging in case the IOMMU experienced 782 * an event log buffer overflow. 783 */ 784 void amd_iommu_restart_event_logging(struct amd_iommu *iommu) 785 { 786 amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN, 787 CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK, 788 MMIO_STATUS_EVT_OVERFLOW_MASK); 789 } 790 791 /* 792 * This function restarts event logging in case the IOMMU experienced 793 * GA log overflow. 794 */ 795 void amd_iommu_restart_ga_log(struct amd_iommu *iommu) 796 { 797 amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN, 798 CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK, 799 MMIO_STATUS_GALOG_OVERFLOW_MASK); 800 } 801 802 /* 803 * This function restarts ppr logging in case the IOMMU experienced 804 * PPR log overflow. 805 */ 806 void amd_iommu_restart_ppr_log(struct amd_iommu *iommu) 807 { 808 amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN, 809 CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK, 810 MMIO_STATUS_PPR_OVERFLOW_MASK); 811 } 812 813 /* 814 * This function resets the command buffer if the IOMMU stopped fetching 815 * commands from it. 816 */ 817 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) 818 { 819 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 820 821 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 822 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 823 iommu->cmd_buf_head = 0; 824 iommu->cmd_buf_tail = 0; 825 826 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 827 } 828 829 /* 830 * This function writes the command buffer address to the hardware and 831 * enables it. 832 */ 833 static void iommu_enable_command_buffer(struct amd_iommu *iommu) 834 { 835 u64 entry; 836 837 BUG_ON(iommu->cmd_buf == NULL); 838 839 entry = iommu_virt_to_phys(iommu->cmd_buf); 840 entry |= MMIO_CMD_SIZE_512; 841 842 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 843 &entry, sizeof(entry)); 844 845 amd_iommu_reset_cmd_buffer(iommu); 846 } 847 848 /* 849 * This function disables the command buffer 850 */ 851 static void iommu_disable_command_buffer(struct amd_iommu *iommu) 852 { 853 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 854 } 855 856 static void __init free_command_buffer(struct amd_iommu *iommu) 857 { 858 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); 859 } 860 861 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, 862 gfp_t gfp, size_t size) 863 { 864 int order = get_order(size); 865 void *buf = (void *)__get_free_pages(gfp, order); 866 867 if (buf && 868 check_feature_on_all_iommus(FEATURE_SNP) && 869 set_memory_4k((unsigned long)buf, (1 << order))) { 870 free_pages((unsigned long)buf, order); 871 buf = NULL; 872 } 873 874 return buf; 875 } 876 877 /* allocates the memory where the IOMMU will log its events to */ 878 static int __init alloc_event_buffer(struct amd_iommu *iommu) 879 { 880 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 881 EVT_BUFFER_SIZE); 882 883 return iommu->evt_buf ? 0 : -ENOMEM; 884 } 885 886 static void iommu_enable_event_buffer(struct amd_iommu *iommu) 887 { 888 u64 entry; 889 890 BUG_ON(iommu->evt_buf == NULL); 891 892 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; 893 894 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, 895 &entry, sizeof(entry)); 896 897 /* set head and tail to zero manually */ 898 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 899 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 900 901 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 902 } 903 904 /* 905 * This function disables the event log buffer 906 */ 907 static void iommu_disable_event_buffer(struct amd_iommu *iommu) 908 { 909 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 910 } 911 912 static void __init free_event_buffer(struct amd_iommu *iommu) 913 { 914 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); 915 } 916 917 /* allocates the memory where the IOMMU will log its events to */ 918 static int __init alloc_ppr_log(struct amd_iommu *iommu) 919 { 920 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 921 PPR_LOG_SIZE); 922 923 return iommu->ppr_log ? 0 : -ENOMEM; 924 } 925 926 static void iommu_enable_ppr_log(struct amd_iommu *iommu) 927 { 928 u64 entry; 929 930 if (iommu->ppr_log == NULL) 931 return; 932 933 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; 934 935 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, 936 &entry, sizeof(entry)); 937 938 /* set head and tail to zero manually */ 939 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 940 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 941 942 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); 943 iommu_feature_enable(iommu, CONTROL_PPR_EN); 944 } 945 946 static void __init free_ppr_log(struct amd_iommu *iommu) 947 { 948 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); 949 } 950 951 static void free_ga_log(struct amd_iommu *iommu) 952 { 953 #ifdef CONFIG_IRQ_REMAP 954 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE)); 955 free_pages((unsigned long)iommu->ga_log_tail, get_order(8)); 956 #endif 957 } 958 959 #ifdef CONFIG_IRQ_REMAP 960 static int iommu_ga_log_enable(struct amd_iommu *iommu) 961 { 962 u32 status, i; 963 u64 entry; 964 965 if (!iommu->ga_log) 966 return -EINVAL; 967 968 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; 969 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, 970 &entry, sizeof(entry)); 971 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & 972 (BIT_ULL(52)-1)) & ~7ULL; 973 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, 974 &entry, sizeof(entry)); 975 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 976 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); 977 978 979 iommu_feature_enable(iommu, CONTROL_GAINT_EN); 980 iommu_feature_enable(iommu, CONTROL_GALOG_EN); 981 982 for (i = 0; i < LOOP_TIMEOUT; ++i) { 983 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 984 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 985 break; 986 udelay(10); 987 } 988 989 if (WARN_ON(i >= LOOP_TIMEOUT)) 990 return -EINVAL; 991 992 return 0; 993 } 994 995 static int iommu_init_ga_log(struct amd_iommu *iommu) 996 { 997 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 998 return 0; 999 1000 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1001 get_order(GA_LOG_SIZE)); 1002 if (!iommu->ga_log) 1003 goto err_out; 1004 1005 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1006 get_order(8)); 1007 if (!iommu->ga_log_tail) 1008 goto err_out; 1009 1010 return 0; 1011 err_out: 1012 free_ga_log(iommu); 1013 return -EINVAL; 1014 } 1015 #endif /* CONFIG_IRQ_REMAP */ 1016 1017 static int __init alloc_cwwb_sem(struct amd_iommu *iommu) 1018 { 1019 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1); 1020 1021 return iommu->cmd_sem ? 0 : -ENOMEM; 1022 } 1023 1024 static void __init free_cwwb_sem(struct amd_iommu *iommu) 1025 { 1026 if (iommu->cmd_sem) 1027 free_page((unsigned long)iommu->cmd_sem); 1028 } 1029 1030 static void iommu_enable_xt(struct amd_iommu *iommu) 1031 { 1032 #ifdef CONFIG_IRQ_REMAP 1033 /* 1034 * XT mode (32-bit APIC destination ID) requires 1035 * GA mode (128-bit IRTE support) as a prerequisite. 1036 */ 1037 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) && 1038 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 1039 iommu_feature_enable(iommu, CONTROL_XT_EN); 1040 #endif /* CONFIG_IRQ_REMAP */ 1041 } 1042 1043 static void iommu_enable_gt(struct amd_iommu *iommu) 1044 { 1045 if (!iommu_feature(iommu, FEATURE_GT)) 1046 return; 1047 1048 iommu_feature_enable(iommu, CONTROL_GT_EN); 1049 } 1050 1051 /* sets a specific bit in the device table entry. */ 1052 static void __set_dev_entry_bit(struct dev_table_entry *dev_table, 1053 u16 devid, u8 bit) 1054 { 1055 int i = (bit >> 6) & 0x03; 1056 int _bit = bit & 0x3f; 1057 1058 dev_table[devid].data[i] |= (1UL << _bit); 1059 } 1060 1061 static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit) 1062 { 1063 struct dev_table_entry *dev_table = get_dev_table(iommu); 1064 1065 return __set_dev_entry_bit(dev_table, devid, bit); 1066 } 1067 1068 static int __get_dev_entry_bit(struct dev_table_entry *dev_table, 1069 u16 devid, u8 bit) 1070 { 1071 int i = (bit >> 6) & 0x03; 1072 int _bit = bit & 0x3f; 1073 1074 return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit; 1075 } 1076 1077 static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit) 1078 { 1079 struct dev_table_entry *dev_table = get_dev_table(iommu); 1080 1081 return __get_dev_entry_bit(dev_table, devid, bit); 1082 } 1083 1084 static bool __copy_device_table(struct amd_iommu *iommu) 1085 { 1086 u64 int_ctl, int_tab_len, entry = 0; 1087 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 1088 struct dev_table_entry *old_devtb = NULL; 1089 u32 lo, hi, devid, old_devtb_size; 1090 phys_addr_t old_devtb_phys; 1091 u16 dom_id, dte_v, irq_v; 1092 gfp_t gfp_flag; 1093 u64 tmp; 1094 1095 /* Each IOMMU use separate device table with the same size */ 1096 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); 1097 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); 1098 entry = (((u64) hi) << 32) + lo; 1099 1100 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; 1101 if (old_devtb_size != pci_seg->dev_table_size) { 1102 pr_err("The device table size of IOMMU:%d is not expected!\n", 1103 iommu->index); 1104 return false; 1105 } 1106 1107 /* 1108 * When SME is enabled in the first kernel, the entry includes the 1109 * memory encryption mask(sme_me_mask), we must remove the memory 1110 * encryption mask to obtain the true physical address in kdump kernel. 1111 */ 1112 old_devtb_phys = __sme_clr(entry) & PAGE_MASK; 1113 1114 if (old_devtb_phys >= 0x100000000ULL) { 1115 pr_err("The address of old device table is above 4G, not trustworthy!\n"); 1116 return false; 1117 } 1118 old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel()) 1119 ? (__force void *)ioremap_encrypted(old_devtb_phys, 1120 pci_seg->dev_table_size) 1121 : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB); 1122 1123 if (!old_devtb) 1124 return false; 1125 1126 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32; 1127 pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, 1128 get_order(pci_seg->dev_table_size)); 1129 if (pci_seg->old_dev_tbl_cpy == NULL) { 1130 pr_err("Failed to allocate memory for copying old device table!\n"); 1131 memunmap(old_devtb); 1132 return false; 1133 } 1134 1135 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { 1136 pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid]; 1137 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; 1138 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; 1139 1140 if (dte_v && dom_id) { 1141 pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; 1142 pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; 1143 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap); 1144 /* If gcr3 table existed, mask it out */ 1145 if (old_devtb[devid].data[0] & DTE_FLAG_GV) { 1146 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; 1147 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; 1148 pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp; 1149 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A; 1150 tmp |= DTE_FLAG_GV; 1151 pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp; 1152 } 1153 } 1154 1155 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; 1156 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; 1157 int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK; 1158 if (irq_v && (int_ctl || int_tab_len)) { 1159 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || 1160 (int_tab_len != DTE_INTTABLEN)) { 1161 pr_err("Wrong old irq remapping flag: %#x\n", devid); 1162 memunmap(old_devtb); 1163 return false; 1164 } 1165 1166 pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; 1167 } 1168 } 1169 memunmap(old_devtb); 1170 1171 return true; 1172 } 1173 1174 static bool copy_device_table(void) 1175 { 1176 struct amd_iommu *iommu; 1177 struct amd_iommu_pci_seg *pci_seg; 1178 1179 if (!amd_iommu_pre_enabled) 1180 return false; 1181 1182 pr_warn("Translation is already enabled - trying to copy translation structures\n"); 1183 1184 /* 1185 * All IOMMUs within PCI segment shares common device table. 1186 * Hence copy device table only once per PCI segment. 1187 */ 1188 for_each_pci_segment(pci_seg) { 1189 for_each_iommu(iommu) { 1190 if (pci_seg->id != iommu->pci_seg->id) 1191 continue; 1192 if (!__copy_device_table(iommu)) 1193 return false; 1194 break; 1195 } 1196 } 1197 1198 return true; 1199 } 1200 1201 void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid) 1202 { 1203 int sysmgt; 1204 1205 sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) | 1206 (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1); 1207 1208 if (sysmgt == 0x01) 1209 set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW); 1210 } 1211 1212 /* 1213 * This function takes the device specific flags read from the ACPI 1214 * table and sets up the device table entry with that information 1215 */ 1216 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, 1217 u16 devid, u32 flags, u32 ext_flags) 1218 { 1219 if (flags & ACPI_DEVFLAG_INITPASS) 1220 set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS); 1221 if (flags & ACPI_DEVFLAG_EXTINT) 1222 set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS); 1223 if (flags & ACPI_DEVFLAG_NMI) 1224 set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS); 1225 if (flags & ACPI_DEVFLAG_SYSMGT1) 1226 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1); 1227 if (flags & ACPI_DEVFLAG_SYSMGT2) 1228 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2); 1229 if (flags & ACPI_DEVFLAG_LINT0) 1230 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS); 1231 if (flags & ACPI_DEVFLAG_LINT1) 1232 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS); 1233 1234 amd_iommu_apply_erratum_63(iommu, devid); 1235 1236 amd_iommu_set_rlookup_table(iommu, devid); 1237 } 1238 1239 int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line) 1240 { 1241 struct devid_map *entry; 1242 struct list_head *list; 1243 1244 if (type == IVHD_SPECIAL_IOAPIC) 1245 list = &ioapic_map; 1246 else if (type == IVHD_SPECIAL_HPET) 1247 list = &hpet_map; 1248 else 1249 return -EINVAL; 1250 1251 list_for_each_entry(entry, list, list) { 1252 if (!(entry->id == id && entry->cmd_line)) 1253 continue; 1254 1255 pr_info("Command-line override present for %s id %d - ignoring\n", 1256 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); 1257 1258 *devid = entry->devid; 1259 1260 return 0; 1261 } 1262 1263 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1264 if (!entry) 1265 return -ENOMEM; 1266 1267 entry->id = id; 1268 entry->devid = *devid; 1269 entry->cmd_line = cmd_line; 1270 1271 list_add_tail(&entry->list, list); 1272 1273 return 0; 1274 } 1275 1276 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid, 1277 bool cmd_line) 1278 { 1279 struct acpihid_map_entry *entry; 1280 struct list_head *list = &acpihid_map; 1281 1282 list_for_each_entry(entry, list, list) { 1283 if (strcmp(entry->hid, hid) || 1284 (*uid && *entry->uid && strcmp(entry->uid, uid)) || 1285 !entry->cmd_line) 1286 continue; 1287 1288 pr_info("Command-line override for hid:%s uid:%s\n", 1289 hid, uid); 1290 *devid = entry->devid; 1291 return 0; 1292 } 1293 1294 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1295 if (!entry) 1296 return -ENOMEM; 1297 1298 memcpy(entry->uid, uid, strlen(uid)); 1299 memcpy(entry->hid, hid, strlen(hid)); 1300 entry->devid = *devid; 1301 entry->cmd_line = cmd_line; 1302 entry->root_devid = (entry->devid & (~0x7)); 1303 1304 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n", 1305 entry->cmd_line ? "cmd" : "ivrs", 1306 entry->hid, entry->uid, entry->root_devid); 1307 1308 list_add_tail(&entry->list, list); 1309 return 0; 1310 } 1311 1312 static int __init add_early_maps(void) 1313 { 1314 int i, ret; 1315 1316 for (i = 0; i < early_ioapic_map_size; ++i) { 1317 ret = add_special_device(IVHD_SPECIAL_IOAPIC, 1318 early_ioapic_map[i].id, 1319 &early_ioapic_map[i].devid, 1320 early_ioapic_map[i].cmd_line); 1321 if (ret) 1322 return ret; 1323 } 1324 1325 for (i = 0; i < early_hpet_map_size; ++i) { 1326 ret = add_special_device(IVHD_SPECIAL_HPET, 1327 early_hpet_map[i].id, 1328 &early_hpet_map[i].devid, 1329 early_hpet_map[i].cmd_line); 1330 if (ret) 1331 return ret; 1332 } 1333 1334 for (i = 0; i < early_acpihid_map_size; ++i) { 1335 ret = add_acpi_hid_device(early_acpihid_map[i].hid, 1336 early_acpihid_map[i].uid, 1337 &early_acpihid_map[i].devid, 1338 early_acpihid_map[i].cmd_line); 1339 if (ret) 1340 return ret; 1341 } 1342 1343 return 0; 1344 } 1345 1346 /* 1347 * Takes a pointer to an AMD IOMMU entry in the ACPI table and 1348 * initializes the hardware and our data structures with it. 1349 */ 1350 static int __init init_iommu_from_acpi(struct amd_iommu *iommu, 1351 struct ivhd_header *h) 1352 { 1353 u8 *p = (u8 *)h; 1354 u8 *end = p, flags = 0; 1355 u16 devid = 0, devid_start = 0, devid_to = 0, seg_id; 1356 u32 dev_i, ext_flags = 0; 1357 bool alias = false; 1358 struct ivhd_entry *e; 1359 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 1360 u32 ivhd_size; 1361 int ret; 1362 1363 1364 ret = add_early_maps(); 1365 if (ret) 1366 return ret; 1367 1368 amd_iommu_apply_ivrs_quirks(); 1369 1370 /* 1371 * First save the recommended feature enable bits from ACPI 1372 */ 1373 iommu->acpi_flags = h->flags; 1374 1375 /* 1376 * Done. Now parse the device entries 1377 */ 1378 ivhd_size = get_ivhd_header_size(h); 1379 if (!ivhd_size) { 1380 pr_err("Unsupported IVHD type %#x\n", h->type); 1381 return -EINVAL; 1382 } 1383 1384 p += ivhd_size; 1385 1386 end += h->length; 1387 1388 1389 while (p < end) { 1390 e = (struct ivhd_entry *)p; 1391 seg_id = pci_seg->id; 1392 1393 switch (e->type) { 1394 case IVHD_DEV_ALL: 1395 1396 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags); 1397 1398 for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i) 1399 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); 1400 break; 1401 case IVHD_DEV_SELECT: 1402 1403 DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x " 1404 "flags: %02x\n", 1405 seg_id, PCI_BUS_NUM(e->devid), 1406 PCI_SLOT(e->devid), 1407 PCI_FUNC(e->devid), 1408 e->flags); 1409 1410 devid = e->devid; 1411 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1412 break; 1413 case IVHD_DEV_SELECT_RANGE_START: 1414 1415 DUMP_printk(" DEV_SELECT_RANGE_START\t " 1416 "devid: %04x:%02x:%02x.%x flags: %02x\n", 1417 seg_id, PCI_BUS_NUM(e->devid), 1418 PCI_SLOT(e->devid), 1419 PCI_FUNC(e->devid), 1420 e->flags); 1421 1422 devid_start = e->devid; 1423 flags = e->flags; 1424 ext_flags = 0; 1425 alias = false; 1426 break; 1427 case IVHD_DEV_ALIAS: 1428 1429 DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x " 1430 "flags: %02x devid_to: %02x:%02x.%x\n", 1431 seg_id, PCI_BUS_NUM(e->devid), 1432 PCI_SLOT(e->devid), 1433 PCI_FUNC(e->devid), 1434 e->flags, 1435 PCI_BUS_NUM(e->ext >> 8), 1436 PCI_SLOT(e->ext >> 8), 1437 PCI_FUNC(e->ext >> 8)); 1438 1439 devid = e->devid; 1440 devid_to = e->ext >> 8; 1441 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); 1442 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); 1443 pci_seg->alias_table[devid] = devid_to; 1444 break; 1445 case IVHD_DEV_ALIAS_RANGE: 1446 1447 DUMP_printk(" DEV_ALIAS_RANGE\t\t " 1448 "devid: %04x:%02x:%02x.%x flags: %02x " 1449 "devid_to: %04x:%02x:%02x.%x\n", 1450 seg_id, PCI_BUS_NUM(e->devid), 1451 PCI_SLOT(e->devid), 1452 PCI_FUNC(e->devid), 1453 e->flags, 1454 seg_id, PCI_BUS_NUM(e->ext >> 8), 1455 PCI_SLOT(e->ext >> 8), 1456 PCI_FUNC(e->ext >> 8)); 1457 1458 devid_start = e->devid; 1459 flags = e->flags; 1460 devid_to = e->ext >> 8; 1461 ext_flags = 0; 1462 alias = true; 1463 break; 1464 case IVHD_DEV_EXT_SELECT: 1465 1466 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x " 1467 "flags: %02x ext: %08x\n", 1468 seg_id, PCI_BUS_NUM(e->devid), 1469 PCI_SLOT(e->devid), 1470 PCI_FUNC(e->devid), 1471 e->flags, e->ext); 1472 1473 devid = e->devid; 1474 set_dev_entry_from_acpi(iommu, devid, e->flags, 1475 e->ext); 1476 break; 1477 case IVHD_DEV_EXT_SELECT_RANGE: 1478 1479 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " 1480 "%04x:%02x:%02x.%x flags: %02x ext: %08x\n", 1481 seg_id, PCI_BUS_NUM(e->devid), 1482 PCI_SLOT(e->devid), 1483 PCI_FUNC(e->devid), 1484 e->flags, e->ext); 1485 1486 devid_start = e->devid; 1487 flags = e->flags; 1488 ext_flags = e->ext; 1489 alias = false; 1490 break; 1491 case IVHD_DEV_RANGE_END: 1492 1493 DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n", 1494 seg_id, PCI_BUS_NUM(e->devid), 1495 PCI_SLOT(e->devid), 1496 PCI_FUNC(e->devid)); 1497 1498 devid = e->devid; 1499 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { 1500 if (alias) { 1501 pci_seg->alias_table[dev_i] = devid_to; 1502 set_dev_entry_from_acpi(iommu, 1503 devid_to, flags, ext_flags); 1504 } 1505 set_dev_entry_from_acpi(iommu, dev_i, 1506 flags, ext_flags); 1507 } 1508 break; 1509 case IVHD_DEV_SPECIAL: { 1510 u8 handle, type; 1511 const char *var; 1512 u32 devid; 1513 int ret; 1514 1515 handle = e->ext & 0xff; 1516 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8)); 1517 type = (e->ext >> 24) & 0xff; 1518 1519 if (type == IVHD_SPECIAL_IOAPIC) 1520 var = "IOAPIC"; 1521 else if (type == IVHD_SPECIAL_HPET) 1522 var = "HPET"; 1523 else 1524 var = "UNKNOWN"; 1525 1526 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n", 1527 var, (int)handle, 1528 seg_id, PCI_BUS_NUM(devid), 1529 PCI_SLOT(devid), 1530 PCI_FUNC(devid)); 1531 1532 ret = add_special_device(type, handle, &devid, false); 1533 if (ret) 1534 return ret; 1535 1536 /* 1537 * add_special_device might update the devid in case a 1538 * command-line override is present. So call 1539 * set_dev_entry_from_acpi after add_special_device. 1540 */ 1541 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1542 1543 break; 1544 } 1545 case IVHD_DEV_ACPI_HID: { 1546 u32 devid; 1547 u8 hid[ACPIHID_HID_LEN]; 1548 u8 uid[ACPIHID_UID_LEN]; 1549 int ret; 1550 1551 if (h->type != 0x40) { 1552 pr_err(FW_BUG "Invalid IVHD device type %#x\n", 1553 e->type); 1554 break; 1555 } 1556 1557 BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1); 1558 memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1); 1559 hid[ACPIHID_HID_LEN - 1] = '\0'; 1560 1561 if (!(*hid)) { 1562 pr_err(FW_BUG "Invalid HID.\n"); 1563 break; 1564 } 1565 1566 uid[0] = '\0'; 1567 switch (e->uidf) { 1568 case UID_NOT_PRESENT: 1569 1570 if (e->uidl != 0) 1571 pr_warn(FW_BUG "Invalid UID length.\n"); 1572 1573 break; 1574 case UID_IS_INTEGER: 1575 1576 sprintf(uid, "%d", e->uid); 1577 1578 break; 1579 case UID_IS_CHARACTER: 1580 1581 memcpy(uid, &e->uid, e->uidl); 1582 uid[e->uidl] = '\0'; 1583 1584 break; 1585 default: 1586 break; 1587 } 1588 1589 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid); 1590 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n", 1591 hid, uid, seg_id, 1592 PCI_BUS_NUM(devid), 1593 PCI_SLOT(devid), 1594 PCI_FUNC(devid)); 1595 1596 flags = e->flags; 1597 1598 ret = add_acpi_hid_device(hid, uid, &devid, false); 1599 if (ret) 1600 return ret; 1601 1602 /* 1603 * add_special_device might update the devid in case a 1604 * command-line override is present. So call 1605 * set_dev_entry_from_acpi after add_special_device. 1606 */ 1607 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1608 1609 break; 1610 } 1611 default: 1612 break; 1613 } 1614 1615 p += ivhd_entry_length(p); 1616 } 1617 1618 return 0; 1619 } 1620 1621 /* Allocate PCI segment data structure */ 1622 static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id, 1623 struct acpi_table_header *ivrs_base) 1624 { 1625 struct amd_iommu_pci_seg *pci_seg; 1626 int last_bdf; 1627 1628 /* 1629 * First parse ACPI tables to find the largest Bus/Dev/Func we need to 1630 * handle in this PCI segment. Upon this information the shared data 1631 * structures for the PCI segments in the system will be allocated. 1632 */ 1633 last_bdf = find_last_devid_acpi(ivrs_base, id); 1634 if (last_bdf < 0) 1635 return NULL; 1636 1637 pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL); 1638 if (pci_seg == NULL) 1639 return NULL; 1640 1641 pci_seg->last_bdf = last_bdf; 1642 DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf); 1643 pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf); 1644 pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf); 1645 pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf); 1646 1647 pci_seg->id = id; 1648 init_llist_head(&pci_seg->dev_data_list); 1649 INIT_LIST_HEAD(&pci_seg->unity_map); 1650 list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list); 1651 1652 if (alloc_dev_table(pci_seg)) 1653 return NULL; 1654 if (alloc_alias_table(pci_seg)) 1655 return NULL; 1656 if (alloc_rlookup_table(pci_seg)) 1657 return NULL; 1658 1659 return pci_seg; 1660 } 1661 1662 static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id, 1663 struct acpi_table_header *ivrs_base) 1664 { 1665 struct amd_iommu_pci_seg *pci_seg; 1666 1667 for_each_pci_segment(pci_seg) { 1668 if (pci_seg->id == id) 1669 return pci_seg; 1670 } 1671 1672 return alloc_pci_segment(id, ivrs_base); 1673 } 1674 1675 static void __init free_pci_segments(void) 1676 { 1677 struct amd_iommu_pci_seg *pci_seg, *next; 1678 1679 for_each_pci_segment_safe(pci_seg, next) { 1680 list_del(&pci_seg->list); 1681 free_irq_lookup_table(pci_seg); 1682 free_rlookup_table(pci_seg); 1683 free_alias_table(pci_seg); 1684 free_dev_table(pci_seg); 1685 kfree(pci_seg); 1686 } 1687 } 1688 1689 static void __init free_iommu_one(struct amd_iommu *iommu) 1690 { 1691 free_cwwb_sem(iommu); 1692 free_command_buffer(iommu); 1693 free_event_buffer(iommu); 1694 free_ppr_log(iommu); 1695 free_ga_log(iommu); 1696 iommu_unmap_mmio_space(iommu); 1697 } 1698 1699 static void __init free_iommu_all(void) 1700 { 1701 struct amd_iommu *iommu, *next; 1702 1703 for_each_iommu_safe(iommu, next) { 1704 list_del(&iommu->list); 1705 free_iommu_one(iommu); 1706 kfree(iommu); 1707 } 1708 } 1709 1710 /* 1711 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) 1712 * Workaround: 1713 * BIOS should disable L2B micellaneous clock gating by setting 1714 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b 1715 */ 1716 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) 1717 { 1718 u32 value; 1719 1720 if ((boot_cpu_data.x86 != 0x15) || 1721 (boot_cpu_data.x86_model < 0x10) || 1722 (boot_cpu_data.x86_model > 0x1f)) 1723 return; 1724 1725 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1726 pci_read_config_dword(iommu->dev, 0xf4, &value); 1727 1728 if (value & BIT(2)) 1729 return; 1730 1731 /* Select NB indirect register 0x90 and enable writing */ 1732 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); 1733 1734 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); 1735 pci_info(iommu->dev, "Applying erratum 746 workaround\n"); 1736 1737 /* Clear the enable writing bit */ 1738 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1739 } 1740 1741 /* 1742 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission) 1743 * Workaround: 1744 * BIOS should enable ATS write permission check by setting 1745 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b 1746 */ 1747 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) 1748 { 1749 u32 value; 1750 1751 if ((boot_cpu_data.x86 != 0x15) || 1752 (boot_cpu_data.x86_model < 0x30) || 1753 (boot_cpu_data.x86_model > 0x3f)) 1754 return; 1755 1756 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */ 1757 value = iommu_read_l2(iommu, 0x47); 1758 1759 if (value & BIT(0)) 1760 return; 1761 1762 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */ 1763 iommu_write_l2(iommu, 0x47, value | BIT(0)); 1764 1765 pci_info(iommu->dev, "Applying ATS write check workaround\n"); 1766 } 1767 1768 /* 1769 * This function glues the initialization function for one IOMMU 1770 * together and also allocates the command buffer and programs the 1771 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1772 */ 1773 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h, 1774 struct acpi_table_header *ivrs_base) 1775 { 1776 struct amd_iommu_pci_seg *pci_seg; 1777 1778 pci_seg = get_pci_segment(h->pci_seg, ivrs_base); 1779 if (pci_seg == NULL) 1780 return -ENOMEM; 1781 iommu->pci_seg = pci_seg; 1782 1783 raw_spin_lock_init(&iommu->lock); 1784 atomic64_set(&iommu->cmd_sem_val, 0); 1785 1786 /* Add IOMMU to internal data structures */ 1787 list_add_tail(&iommu->list, &amd_iommu_list); 1788 iommu->index = amd_iommus_present++; 1789 1790 if (unlikely(iommu->index >= MAX_IOMMUS)) { 1791 WARN(1, "System has more IOMMUs than supported by this driver\n"); 1792 return -ENOSYS; 1793 } 1794 1795 /* Index is fine - add IOMMU to the array */ 1796 amd_iommus[iommu->index] = iommu; 1797 1798 /* 1799 * Copy data from ACPI table entry to the iommu struct 1800 */ 1801 iommu->devid = h->devid; 1802 iommu->cap_ptr = h->cap_ptr; 1803 iommu->mmio_phys = h->mmio_phys; 1804 1805 switch (h->type) { 1806 case 0x10: 1807 /* Check if IVHD EFR contains proper max banks/counters */ 1808 if ((h->efr_attr != 0) && 1809 ((h->efr_attr & (0xF << 13)) != 0) && 1810 ((h->efr_attr & (0x3F << 17)) != 0)) 1811 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1812 else 1813 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1814 1815 /* 1816 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. 1817 * GAM also requires GA mode. Therefore, we need to 1818 * check cmpxchg16b support before enabling it. 1819 */ 1820 if (!boot_cpu_has(X86_FEATURE_CX16) || 1821 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) 1822 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1823 break; 1824 case 0x11: 1825 case 0x40: 1826 if (h->efr_reg & (1 << 9)) 1827 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1828 else 1829 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1830 1831 /* 1832 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. 1833 * XT, GAM also requires GA mode. Therefore, we need to 1834 * check cmpxchg16b support before enabling them. 1835 */ 1836 if (!boot_cpu_has(X86_FEATURE_CX16) || 1837 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { 1838 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1839 break; 1840 } 1841 1842 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) 1843 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; 1844 1845 early_iommu_features_init(iommu, h); 1846 1847 break; 1848 default: 1849 return -EINVAL; 1850 } 1851 1852 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, 1853 iommu->mmio_phys_end); 1854 if (!iommu->mmio_base) 1855 return -ENOMEM; 1856 1857 return init_iommu_from_acpi(iommu, h); 1858 } 1859 1860 static int __init init_iommu_one_late(struct amd_iommu *iommu) 1861 { 1862 int ret; 1863 1864 if (alloc_cwwb_sem(iommu)) 1865 return -ENOMEM; 1866 1867 if (alloc_command_buffer(iommu)) 1868 return -ENOMEM; 1869 1870 if (alloc_event_buffer(iommu)) 1871 return -ENOMEM; 1872 1873 iommu->int_enabled = false; 1874 1875 init_translation_status(iommu); 1876 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { 1877 iommu_disable(iommu); 1878 clear_translation_pre_enabled(iommu); 1879 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", 1880 iommu->index); 1881 } 1882 if (amd_iommu_pre_enabled) 1883 amd_iommu_pre_enabled = translation_pre_enabled(iommu); 1884 1885 if (amd_iommu_irq_remap) { 1886 ret = amd_iommu_create_irq_domain(iommu); 1887 if (ret) 1888 return ret; 1889 } 1890 1891 /* 1892 * Make sure IOMMU is not considered to translate itself. The IVRS 1893 * table tells us so, but this is a lie! 1894 */ 1895 iommu->pci_seg->rlookup_table[iommu->devid] = NULL; 1896 1897 return 0; 1898 } 1899 1900 /** 1901 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type 1902 * @ivrs: Pointer to the IVRS header 1903 * 1904 * This function search through all IVDB of the maximum supported IVHD 1905 */ 1906 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs) 1907 { 1908 u8 *base = (u8 *)ivrs; 1909 struct ivhd_header *ivhd = (struct ivhd_header *) 1910 (base + IVRS_HEADER_LENGTH); 1911 u8 last_type = ivhd->type; 1912 u16 devid = ivhd->devid; 1913 1914 while (((u8 *)ivhd - base < ivrs->length) && 1915 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { 1916 u8 *p = (u8 *) ivhd; 1917 1918 if (ivhd->devid == devid) 1919 last_type = ivhd->type; 1920 ivhd = (struct ivhd_header *)(p + ivhd->length); 1921 } 1922 1923 return last_type; 1924 } 1925 1926 /* 1927 * Iterates over all IOMMU entries in the ACPI table, allocates the 1928 * IOMMU structure and initializes it with init_iommu_one() 1929 */ 1930 static int __init init_iommu_all(struct acpi_table_header *table) 1931 { 1932 u8 *p = (u8 *)table, *end = (u8 *)table; 1933 struct ivhd_header *h; 1934 struct amd_iommu *iommu; 1935 int ret; 1936 1937 end += table->length; 1938 p += IVRS_HEADER_LENGTH; 1939 1940 /* Phase 1: Process all IVHD blocks */ 1941 while (p < end) { 1942 h = (struct ivhd_header *)p; 1943 if (*p == amd_iommu_target_ivhd_type) { 1944 1945 DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x " 1946 "flags: %01x info %04x\n", 1947 h->pci_seg, PCI_BUS_NUM(h->devid), 1948 PCI_SLOT(h->devid), PCI_FUNC(h->devid), 1949 h->cap_ptr, h->flags, h->info); 1950 DUMP_printk(" mmio-addr: %016llx\n", 1951 h->mmio_phys); 1952 1953 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 1954 if (iommu == NULL) 1955 return -ENOMEM; 1956 1957 ret = init_iommu_one(iommu, h, table); 1958 if (ret) 1959 return ret; 1960 } 1961 p += h->length; 1962 1963 } 1964 WARN_ON(p != end); 1965 1966 /* Phase 2 : Early feature support check */ 1967 get_global_efr(); 1968 1969 /* Phase 3 : Enabling IOMMU features */ 1970 for_each_iommu(iommu) { 1971 ret = init_iommu_one_late(iommu); 1972 if (ret) 1973 return ret; 1974 } 1975 1976 return 0; 1977 } 1978 1979 static void init_iommu_perf_ctr(struct amd_iommu *iommu) 1980 { 1981 u64 val; 1982 struct pci_dev *pdev = iommu->dev; 1983 1984 if (!iommu_feature(iommu, FEATURE_PC)) 1985 return; 1986 1987 amd_iommu_pc_present = true; 1988 1989 pci_info(pdev, "IOMMU performance counters supported\n"); 1990 1991 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); 1992 iommu->max_banks = (u8) ((val >> 12) & 0x3f); 1993 iommu->max_counters = (u8) ((val >> 7) & 0xf); 1994 1995 return; 1996 } 1997 1998 static ssize_t amd_iommu_show_cap(struct device *dev, 1999 struct device_attribute *attr, 2000 char *buf) 2001 { 2002 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 2003 return sysfs_emit(buf, "%x\n", iommu->cap); 2004 } 2005 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); 2006 2007 static ssize_t amd_iommu_show_features(struct device *dev, 2008 struct device_attribute *attr, 2009 char *buf) 2010 { 2011 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 2012 return sysfs_emit(buf, "%llx:%llx\n", iommu->features2, iommu->features); 2013 } 2014 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); 2015 2016 static struct attribute *amd_iommu_attrs[] = { 2017 &dev_attr_cap.attr, 2018 &dev_attr_features.attr, 2019 NULL, 2020 }; 2021 2022 static struct attribute_group amd_iommu_group = { 2023 .name = "amd-iommu", 2024 .attrs = amd_iommu_attrs, 2025 }; 2026 2027 static const struct attribute_group *amd_iommu_groups[] = { 2028 &amd_iommu_group, 2029 NULL, 2030 }; 2031 2032 /* 2033 * Note: IVHD 0x11 and 0x40 also contains exact copy 2034 * of the IOMMU Extended Feature Register [MMIO Offset 0030h]. 2035 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init). 2036 */ 2037 static void __init late_iommu_features_init(struct amd_iommu *iommu) 2038 { 2039 u64 features, features2; 2040 2041 if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) 2042 return; 2043 2044 /* read extended feature bits */ 2045 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); 2046 features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2); 2047 2048 if (!iommu->features) { 2049 iommu->features = features; 2050 iommu->features2 = features2; 2051 return; 2052 } 2053 2054 /* 2055 * Sanity check and warn if EFR values from 2056 * IVHD and MMIO conflict. 2057 */ 2058 if (features != iommu->features || 2059 features2 != iommu->features2) { 2060 pr_warn(FW_WARN 2061 "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n", 2062 features, iommu->features, 2063 features2, iommu->features2); 2064 } 2065 } 2066 2067 static int __init iommu_init_pci(struct amd_iommu *iommu) 2068 { 2069 int cap_ptr = iommu->cap_ptr; 2070 int ret; 2071 2072 iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, 2073 PCI_BUS_NUM(iommu->devid), 2074 iommu->devid & 0xff); 2075 if (!iommu->dev) 2076 return -ENODEV; 2077 2078 /* Prevent binding other PCI device drivers to IOMMU devices */ 2079 iommu->dev->match_driver = false; 2080 2081 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 2082 &iommu->cap); 2083 2084 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) 2085 amd_iommu_iotlb_sup = false; 2086 2087 late_iommu_features_init(iommu); 2088 2089 if (iommu_feature(iommu, FEATURE_GT)) { 2090 int glxval; 2091 u32 max_pasid; 2092 u64 pasmax; 2093 2094 pasmax = iommu->features & FEATURE_PASID_MASK; 2095 pasmax >>= FEATURE_PASID_SHIFT; 2096 max_pasid = (1 << (pasmax + 1)) - 1; 2097 2098 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); 2099 2100 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); 2101 2102 glxval = iommu->features & FEATURE_GLXVAL_MASK; 2103 glxval >>= FEATURE_GLXVAL_SHIFT; 2104 2105 if (amd_iommu_max_glx_val == -1) 2106 amd_iommu_max_glx_val = glxval; 2107 else 2108 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); 2109 } 2110 2111 if (iommu_feature(iommu, FEATURE_GT) && 2112 iommu_feature(iommu, FEATURE_PPR)) { 2113 iommu->is_iommu_v2 = true; 2114 amd_iommu_v2_present = true; 2115 } 2116 2117 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) 2118 return -ENOMEM; 2119 2120 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) { 2121 pr_info("Using strict mode due to virtualization\n"); 2122 iommu_set_dma_strict(); 2123 amd_iommu_np_cache = true; 2124 } 2125 2126 init_iommu_perf_ctr(iommu); 2127 2128 if (amd_iommu_pgtable == AMD_IOMMU_V2) { 2129 if (!iommu_feature(iommu, FEATURE_GIOSUP) || 2130 !iommu_feature(iommu, FEATURE_GT)) { 2131 pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); 2132 amd_iommu_pgtable = AMD_IOMMU_V1; 2133 } else if (iommu_default_passthrough()) { 2134 pr_warn("V2 page table doesn't support passthrough mode. Fallback to v1.\n"); 2135 amd_iommu_pgtable = AMD_IOMMU_V1; 2136 } 2137 } 2138 2139 if (is_rd890_iommu(iommu->dev)) { 2140 int i, j; 2141 2142 iommu->root_pdev = 2143 pci_get_domain_bus_and_slot(iommu->pci_seg->id, 2144 iommu->dev->bus->number, 2145 PCI_DEVFN(0, 0)); 2146 2147 /* 2148 * Some rd890 systems may not be fully reconfigured by the 2149 * BIOS, so it's necessary for us to store this information so 2150 * it can be reprogrammed on resume 2151 */ 2152 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, 2153 &iommu->stored_addr_lo); 2154 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, 2155 &iommu->stored_addr_hi); 2156 2157 /* Low bit locks writes to configuration space */ 2158 iommu->stored_addr_lo &= ~1; 2159 2160 for (i = 0; i < 6; i++) 2161 for (j = 0; j < 0x12; j++) 2162 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); 2163 2164 for (i = 0; i < 0x83; i++) 2165 iommu->stored_l2[i] = iommu_read_l2(iommu, i); 2166 } 2167 2168 amd_iommu_erratum_746_workaround(iommu); 2169 amd_iommu_ats_write_check_workaround(iommu); 2170 2171 ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, 2172 amd_iommu_groups, "ivhd%d", iommu->index); 2173 if (ret) 2174 return ret; 2175 2176 iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); 2177 2178 return pci_enable_device(iommu->dev); 2179 } 2180 2181 static void print_iommu_info(void) 2182 { 2183 static const char * const feat_str[] = { 2184 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", 2185 "IA", "GA", "HE", "PC" 2186 }; 2187 struct amd_iommu *iommu; 2188 2189 for_each_iommu(iommu) { 2190 struct pci_dev *pdev = iommu->dev; 2191 int i; 2192 2193 pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr); 2194 2195 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { 2196 pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2); 2197 2198 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { 2199 if (iommu_feature(iommu, (1ULL << i))) 2200 pr_cont(" %s", feat_str[i]); 2201 } 2202 2203 if (iommu->features & FEATURE_GAM_VAPIC) 2204 pr_cont(" GA_vAPIC"); 2205 2206 if (iommu->features & FEATURE_SNP) 2207 pr_cont(" SNP"); 2208 2209 pr_cont("\n"); 2210 } 2211 } 2212 if (irq_remapping_enabled) { 2213 pr_info("Interrupt remapping enabled\n"); 2214 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 2215 pr_info("X2APIC enabled\n"); 2216 } 2217 if (amd_iommu_pgtable == AMD_IOMMU_V2) { 2218 pr_info("V2 page table enabled (Paging mode : %d level)\n", 2219 amd_iommu_gpt_level); 2220 } 2221 } 2222 2223 static int __init amd_iommu_init_pci(void) 2224 { 2225 struct amd_iommu *iommu; 2226 struct amd_iommu_pci_seg *pci_seg; 2227 int ret; 2228 2229 for_each_iommu(iommu) { 2230 ret = iommu_init_pci(iommu); 2231 if (ret) { 2232 pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n", 2233 iommu->index, ret); 2234 goto out; 2235 } 2236 /* Need to setup range after PCI init */ 2237 iommu_set_cwwb_range(iommu); 2238 } 2239 2240 /* 2241 * Order is important here to make sure any unity map requirements are 2242 * fulfilled. The unity mappings are created and written to the device 2243 * table during the iommu_init_pci() call. 2244 * 2245 * After that we call init_device_table_dma() to make sure any 2246 * uninitialized DTE will block DMA, and in the end we flush the caches 2247 * of all IOMMUs to make sure the changes to the device table are 2248 * active. 2249 */ 2250 for_each_pci_segment(pci_seg) 2251 init_device_table_dma(pci_seg); 2252 2253 for_each_iommu(iommu) 2254 iommu_flush_all_caches(iommu); 2255 2256 print_iommu_info(); 2257 2258 out: 2259 return ret; 2260 } 2261 2262 /**************************************************************************** 2263 * 2264 * The following functions initialize the MSI interrupts for all IOMMUs 2265 * in the system. It's a bit challenging because there could be multiple 2266 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per 2267 * pci_dev. 2268 * 2269 ****************************************************************************/ 2270 2271 static int iommu_setup_msi(struct amd_iommu *iommu) 2272 { 2273 int r; 2274 2275 r = pci_enable_msi(iommu->dev); 2276 if (r) 2277 return r; 2278 2279 r = request_threaded_irq(iommu->dev->irq, 2280 amd_iommu_int_handler, 2281 amd_iommu_int_thread, 2282 0, "AMD-Vi", 2283 iommu); 2284 2285 if (r) { 2286 pci_disable_msi(iommu->dev); 2287 return r; 2288 } 2289 2290 return 0; 2291 } 2292 2293 union intcapxt { 2294 u64 capxt; 2295 struct { 2296 u64 reserved_0 : 2, 2297 dest_mode_logical : 1, 2298 reserved_1 : 5, 2299 destid_0_23 : 24, 2300 vector : 8, 2301 reserved_2 : 16, 2302 destid_24_31 : 8; 2303 }; 2304 } __attribute__ ((packed)); 2305 2306 2307 static struct irq_chip intcapxt_controller; 2308 2309 static int intcapxt_irqdomain_activate(struct irq_domain *domain, 2310 struct irq_data *irqd, bool reserve) 2311 { 2312 return 0; 2313 } 2314 2315 static void intcapxt_irqdomain_deactivate(struct irq_domain *domain, 2316 struct irq_data *irqd) 2317 { 2318 } 2319 2320 2321 static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, 2322 unsigned int nr_irqs, void *arg) 2323 { 2324 struct irq_alloc_info *info = arg; 2325 int i, ret; 2326 2327 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI) 2328 return -EINVAL; 2329 2330 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 2331 if (ret < 0) 2332 return ret; 2333 2334 for (i = virq; i < virq + nr_irqs; i++) { 2335 struct irq_data *irqd = irq_domain_get_irq_data(domain, i); 2336 2337 irqd->chip = &intcapxt_controller; 2338 irqd->hwirq = info->hwirq; 2339 irqd->chip_data = info->data; 2340 __irq_set_handler(i, handle_edge_irq, 0, "edge"); 2341 } 2342 2343 return ret; 2344 } 2345 2346 static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq, 2347 unsigned int nr_irqs) 2348 { 2349 irq_domain_free_irqs_top(domain, virq, nr_irqs); 2350 } 2351 2352 2353 static void intcapxt_unmask_irq(struct irq_data *irqd) 2354 { 2355 struct amd_iommu *iommu = irqd->chip_data; 2356 struct irq_cfg *cfg = irqd_cfg(irqd); 2357 union intcapxt xt; 2358 2359 xt.capxt = 0ULL; 2360 xt.dest_mode_logical = apic->dest_mode_logical; 2361 xt.vector = cfg->vector; 2362 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); 2363 xt.destid_24_31 = cfg->dest_apicid >> 24; 2364 2365 writeq(xt.capxt, iommu->mmio_base + irqd->hwirq); 2366 } 2367 2368 static void intcapxt_mask_irq(struct irq_data *irqd) 2369 { 2370 struct amd_iommu *iommu = irqd->chip_data; 2371 2372 writeq(0, iommu->mmio_base + irqd->hwirq); 2373 } 2374 2375 2376 static int intcapxt_set_affinity(struct irq_data *irqd, 2377 const struct cpumask *mask, bool force) 2378 { 2379 struct irq_data *parent = irqd->parent_data; 2380 int ret; 2381 2382 ret = parent->chip->irq_set_affinity(parent, mask, force); 2383 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) 2384 return ret; 2385 return 0; 2386 } 2387 2388 static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on) 2389 { 2390 return on ? -EOPNOTSUPP : 0; 2391 } 2392 2393 static struct irq_chip intcapxt_controller = { 2394 .name = "IOMMU-MSI", 2395 .irq_unmask = intcapxt_unmask_irq, 2396 .irq_mask = intcapxt_mask_irq, 2397 .irq_ack = irq_chip_ack_parent, 2398 .irq_retrigger = irq_chip_retrigger_hierarchy, 2399 .irq_set_affinity = intcapxt_set_affinity, 2400 .irq_set_wake = intcapxt_set_wake, 2401 .flags = IRQCHIP_MASK_ON_SUSPEND, 2402 }; 2403 2404 static const struct irq_domain_ops intcapxt_domain_ops = { 2405 .alloc = intcapxt_irqdomain_alloc, 2406 .free = intcapxt_irqdomain_free, 2407 .activate = intcapxt_irqdomain_activate, 2408 .deactivate = intcapxt_irqdomain_deactivate, 2409 }; 2410 2411 2412 static struct irq_domain *iommu_irqdomain; 2413 2414 static struct irq_domain *iommu_get_irqdomain(void) 2415 { 2416 struct fwnode_handle *fn; 2417 2418 /* No need for locking here (yet) as the init is single-threaded */ 2419 if (iommu_irqdomain) 2420 return iommu_irqdomain; 2421 2422 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI"); 2423 if (!fn) 2424 return NULL; 2425 2426 iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0, 2427 fn, &intcapxt_domain_ops, 2428 NULL); 2429 if (!iommu_irqdomain) 2430 irq_domain_free_fwnode(fn); 2431 2432 return iommu_irqdomain; 2433 } 2434 2435 static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname, 2436 int hwirq, irq_handler_t thread_fn) 2437 { 2438 struct irq_domain *domain; 2439 struct irq_alloc_info info; 2440 int irq, ret; 2441 int node = dev_to_node(&iommu->dev->dev); 2442 2443 domain = iommu_get_irqdomain(); 2444 if (!domain) 2445 return -ENXIO; 2446 2447 init_irq_alloc_info(&info, NULL); 2448 info.type = X86_IRQ_ALLOC_TYPE_AMDVI; 2449 info.data = iommu; 2450 info.hwirq = hwirq; 2451 2452 irq = irq_domain_alloc_irqs(domain, 1, node, &info); 2453 if (irq < 0) { 2454 irq_domain_remove(domain); 2455 return irq; 2456 } 2457 2458 ret = request_threaded_irq(irq, amd_iommu_int_handler, 2459 thread_fn, 0, devname, iommu); 2460 if (ret) { 2461 irq_domain_free_irqs(irq, 1); 2462 irq_domain_remove(domain); 2463 return ret; 2464 } 2465 2466 return 0; 2467 } 2468 2469 static int iommu_setup_intcapxt(struct amd_iommu *iommu) 2470 { 2471 int ret; 2472 2473 snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name), 2474 "AMD-Vi%d-Evt", iommu->index); 2475 ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name, 2476 MMIO_INTCAPXT_EVT_OFFSET, 2477 amd_iommu_int_thread_evtlog); 2478 if (ret) 2479 return ret; 2480 2481 snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name), 2482 "AMD-Vi%d-PPR", iommu->index); 2483 ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name, 2484 MMIO_INTCAPXT_PPR_OFFSET, 2485 amd_iommu_int_thread_pprlog); 2486 if (ret) 2487 return ret; 2488 2489 #ifdef CONFIG_IRQ_REMAP 2490 snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name), 2491 "AMD-Vi%d-GA", iommu->index); 2492 ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name, 2493 MMIO_INTCAPXT_GALOG_OFFSET, 2494 amd_iommu_int_thread_galog); 2495 #endif 2496 2497 return ret; 2498 } 2499 2500 static int iommu_init_irq(struct amd_iommu *iommu) 2501 { 2502 int ret; 2503 2504 if (iommu->int_enabled) 2505 goto enable_faults; 2506 2507 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 2508 ret = iommu_setup_intcapxt(iommu); 2509 else if (iommu->dev->msi_cap) 2510 ret = iommu_setup_msi(iommu); 2511 else 2512 ret = -ENODEV; 2513 2514 if (ret) 2515 return ret; 2516 2517 iommu->int_enabled = true; 2518 enable_faults: 2519 2520 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 2521 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); 2522 2523 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 2524 2525 if (iommu->ppr_log != NULL) 2526 iommu_feature_enable(iommu, CONTROL_PPRINT_EN); 2527 return 0; 2528 } 2529 2530 /**************************************************************************** 2531 * 2532 * The next functions belong to the third pass of parsing the ACPI 2533 * table. In this last pass the memory mapping requirements are 2534 * gathered (like exclusion and unity mapping ranges). 2535 * 2536 ****************************************************************************/ 2537 2538 static void __init free_unity_maps(void) 2539 { 2540 struct unity_map_entry *entry, *next; 2541 struct amd_iommu_pci_seg *p, *pci_seg; 2542 2543 for_each_pci_segment_safe(pci_seg, p) { 2544 list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) { 2545 list_del(&entry->list); 2546 kfree(entry); 2547 } 2548 } 2549 } 2550 2551 /* called for unity map ACPI definition */ 2552 static int __init init_unity_map_range(struct ivmd_header *m, 2553 struct acpi_table_header *ivrs_base) 2554 { 2555 struct unity_map_entry *e = NULL; 2556 struct amd_iommu_pci_seg *pci_seg; 2557 char *s; 2558 2559 pci_seg = get_pci_segment(m->pci_seg, ivrs_base); 2560 if (pci_seg == NULL) 2561 return -ENOMEM; 2562 2563 e = kzalloc(sizeof(*e), GFP_KERNEL); 2564 if (e == NULL) 2565 return -ENOMEM; 2566 2567 switch (m->type) { 2568 default: 2569 kfree(e); 2570 return 0; 2571 case ACPI_IVMD_TYPE: 2572 s = "IVMD_TYPEi\t\t\t"; 2573 e->devid_start = e->devid_end = m->devid; 2574 break; 2575 case ACPI_IVMD_TYPE_ALL: 2576 s = "IVMD_TYPE_ALL\t\t"; 2577 e->devid_start = 0; 2578 e->devid_end = pci_seg->last_bdf; 2579 break; 2580 case ACPI_IVMD_TYPE_RANGE: 2581 s = "IVMD_TYPE_RANGE\t\t"; 2582 e->devid_start = m->devid; 2583 e->devid_end = m->aux; 2584 break; 2585 } 2586 e->address_start = PAGE_ALIGN(m->range_start); 2587 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); 2588 e->prot = m->flags >> 1; 2589 2590 /* 2591 * Treat per-device exclusion ranges as r/w unity-mapped regions 2592 * since some buggy BIOSes might lead to the overwritten exclusion 2593 * range (exclusion_start and exclusion_length members). This 2594 * happens when there are multiple exclusion ranges (IVMD entries) 2595 * defined in ACPI table. 2596 */ 2597 if (m->flags & IVMD_FLAG_EXCL_RANGE) 2598 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1; 2599 2600 DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: " 2601 "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx" 2602 " flags: %x\n", s, m->pci_seg, 2603 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), 2604 PCI_FUNC(e->devid_start), m->pci_seg, 2605 PCI_BUS_NUM(e->devid_end), 2606 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), 2607 e->address_start, e->address_end, m->flags); 2608 2609 list_add_tail(&e->list, &pci_seg->unity_map); 2610 2611 return 0; 2612 } 2613 2614 /* iterates over all memory definitions we find in the ACPI table */ 2615 static int __init init_memory_definitions(struct acpi_table_header *table) 2616 { 2617 u8 *p = (u8 *)table, *end = (u8 *)table; 2618 struct ivmd_header *m; 2619 2620 end += table->length; 2621 p += IVRS_HEADER_LENGTH; 2622 2623 while (p < end) { 2624 m = (struct ivmd_header *)p; 2625 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) 2626 init_unity_map_range(m, table); 2627 2628 p += m->length; 2629 } 2630 2631 return 0; 2632 } 2633 2634 /* 2635 * Init the device table to not allow DMA access for devices 2636 */ 2637 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg) 2638 { 2639 u32 devid; 2640 struct dev_table_entry *dev_table = pci_seg->dev_table; 2641 2642 if (dev_table == NULL) 2643 return; 2644 2645 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { 2646 __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID); 2647 if (!amd_iommu_snp_en) 2648 __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION); 2649 } 2650 } 2651 2652 static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg) 2653 { 2654 u32 devid; 2655 struct dev_table_entry *dev_table = pci_seg->dev_table; 2656 2657 if (dev_table == NULL) 2658 return; 2659 2660 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { 2661 dev_table[devid].data[0] = 0ULL; 2662 dev_table[devid].data[1] = 0ULL; 2663 } 2664 } 2665 2666 static void init_device_table(void) 2667 { 2668 struct amd_iommu_pci_seg *pci_seg; 2669 u32 devid; 2670 2671 if (!amd_iommu_irq_remap) 2672 return; 2673 2674 for_each_pci_segment(pci_seg) { 2675 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) 2676 __set_dev_entry_bit(pci_seg->dev_table, 2677 devid, DEV_ENTRY_IRQ_TBL_EN); 2678 } 2679 } 2680 2681 static void iommu_init_flags(struct amd_iommu *iommu) 2682 { 2683 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 2684 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 2685 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 2686 2687 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? 2688 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 2689 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 2690 2691 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 2692 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 2693 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 2694 2695 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? 2696 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 2697 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 2698 2699 /* 2700 * make IOMMU memory accesses cache coherent 2701 */ 2702 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 2703 2704 /* Set IOTLB invalidation timeout to 1s */ 2705 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); 2706 } 2707 2708 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) 2709 { 2710 int i, j; 2711 u32 ioc_feature_control; 2712 struct pci_dev *pdev = iommu->root_pdev; 2713 2714 /* RD890 BIOSes may not have completely reconfigured the iommu */ 2715 if (!is_rd890_iommu(iommu->dev) || !pdev) 2716 return; 2717 2718 /* 2719 * First, we need to ensure that the iommu is enabled. This is 2720 * controlled by a register in the northbridge 2721 */ 2722 2723 /* Select Northbridge indirect register 0x75 and enable writing */ 2724 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); 2725 pci_read_config_dword(pdev, 0x64, &ioc_feature_control); 2726 2727 /* Enable the iommu */ 2728 if (!(ioc_feature_control & 0x1)) 2729 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); 2730 2731 /* Restore the iommu BAR */ 2732 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2733 iommu->stored_addr_lo); 2734 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, 2735 iommu->stored_addr_hi); 2736 2737 /* Restore the l1 indirect regs for each of the 6 l1s */ 2738 for (i = 0; i < 6; i++) 2739 for (j = 0; j < 0x12; j++) 2740 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); 2741 2742 /* Restore the l2 indirect regs */ 2743 for (i = 0; i < 0x83; i++) 2744 iommu_write_l2(iommu, i, iommu->stored_l2[i]); 2745 2746 /* Lock PCI setup registers */ 2747 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2748 iommu->stored_addr_lo | 1); 2749 } 2750 2751 static void iommu_enable_ga(struct amd_iommu *iommu) 2752 { 2753 #ifdef CONFIG_IRQ_REMAP 2754 switch (amd_iommu_guest_ir) { 2755 case AMD_IOMMU_GUEST_IR_VAPIC: 2756 case AMD_IOMMU_GUEST_IR_LEGACY_GA: 2757 iommu_feature_enable(iommu, CONTROL_GA_EN); 2758 iommu->irte_ops = &irte_128_ops; 2759 break; 2760 default: 2761 iommu->irte_ops = &irte_32_ops; 2762 break; 2763 } 2764 #endif 2765 } 2766 2767 static void iommu_disable_irtcachedis(struct amd_iommu *iommu) 2768 { 2769 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); 2770 } 2771 2772 static void iommu_enable_irtcachedis(struct amd_iommu *iommu) 2773 { 2774 u64 ctrl; 2775 2776 if (!amd_iommu_irtcachedis) 2777 return; 2778 2779 /* 2780 * Note: 2781 * The support for IRTCacheDis feature is dertermined by 2782 * checking if the bit is writable. 2783 */ 2784 iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS); 2785 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 2786 ctrl &= (1ULL << CONTROL_IRTCACHEDIS); 2787 if (ctrl) 2788 iommu->irtcachedis_enabled = true; 2789 pr_info("iommu%d (%#06x) : IRT cache is %s\n", 2790 iommu->index, iommu->devid, 2791 iommu->irtcachedis_enabled ? "disabled" : "enabled"); 2792 } 2793 2794 static void early_enable_iommu(struct amd_iommu *iommu) 2795 { 2796 iommu_disable(iommu); 2797 iommu_init_flags(iommu); 2798 iommu_set_device_table(iommu); 2799 iommu_enable_command_buffer(iommu); 2800 iommu_enable_event_buffer(iommu); 2801 iommu_set_exclusion_range(iommu); 2802 iommu_enable_ga(iommu); 2803 iommu_enable_xt(iommu); 2804 iommu_enable_irtcachedis(iommu); 2805 iommu_enable(iommu); 2806 iommu_flush_all_caches(iommu); 2807 } 2808 2809 /* 2810 * This function finally enables all IOMMUs found in the system after 2811 * they have been initialized. 2812 * 2813 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy 2814 * the old content of device table entries. Not this case or copy failed, 2815 * just continue as normal kernel does. 2816 */ 2817 static void early_enable_iommus(void) 2818 { 2819 struct amd_iommu *iommu; 2820 struct amd_iommu_pci_seg *pci_seg; 2821 2822 if (!copy_device_table()) { 2823 /* 2824 * If come here because of failure in copying device table from old 2825 * kernel with all IOMMUs enabled, print error message and try to 2826 * free allocated old_dev_tbl_cpy. 2827 */ 2828 if (amd_iommu_pre_enabled) 2829 pr_err("Failed to copy DEV table from previous kernel.\n"); 2830 2831 for_each_pci_segment(pci_seg) { 2832 if (pci_seg->old_dev_tbl_cpy != NULL) { 2833 free_pages((unsigned long)pci_seg->old_dev_tbl_cpy, 2834 get_order(pci_seg->dev_table_size)); 2835 pci_seg->old_dev_tbl_cpy = NULL; 2836 } 2837 } 2838 2839 for_each_iommu(iommu) { 2840 clear_translation_pre_enabled(iommu); 2841 early_enable_iommu(iommu); 2842 } 2843 } else { 2844 pr_info("Copied DEV table from previous kernel.\n"); 2845 2846 for_each_pci_segment(pci_seg) { 2847 free_pages((unsigned long)pci_seg->dev_table, 2848 get_order(pci_seg->dev_table_size)); 2849 pci_seg->dev_table = pci_seg->old_dev_tbl_cpy; 2850 } 2851 2852 for_each_iommu(iommu) { 2853 iommu_disable_command_buffer(iommu); 2854 iommu_disable_event_buffer(iommu); 2855 iommu_disable_irtcachedis(iommu); 2856 iommu_enable_command_buffer(iommu); 2857 iommu_enable_event_buffer(iommu); 2858 iommu_enable_ga(iommu); 2859 iommu_enable_xt(iommu); 2860 iommu_enable_irtcachedis(iommu); 2861 iommu_set_device_table(iommu); 2862 iommu_flush_all_caches(iommu); 2863 } 2864 } 2865 } 2866 2867 static void enable_iommus_v2(void) 2868 { 2869 struct amd_iommu *iommu; 2870 2871 for_each_iommu(iommu) { 2872 iommu_enable_ppr_log(iommu); 2873 iommu_enable_gt(iommu); 2874 } 2875 } 2876 2877 static void enable_iommus_vapic(void) 2878 { 2879 #ifdef CONFIG_IRQ_REMAP 2880 u32 status, i; 2881 struct amd_iommu *iommu; 2882 2883 for_each_iommu(iommu) { 2884 /* 2885 * Disable GALog if already running. It could have been enabled 2886 * in the previous boot before kdump. 2887 */ 2888 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 2889 if (!(status & MMIO_STATUS_GALOG_RUN_MASK)) 2890 continue; 2891 2892 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 2893 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 2894 2895 /* 2896 * Need to set and poll check the GALOGRun bit to zero before 2897 * we can set/ modify GA Log registers safely. 2898 */ 2899 for (i = 0; i < LOOP_TIMEOUT; ++i) { 2900 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 2901 if (!(status & MMIO_STATUS_GALOG_RUN_MASK)) 2902 break; 2903 udelay(10); 2904 } 2905 2906 if (WARN_ON(i >= LOOP_TIMEOUT)) 2907 return; 2908 } 2909 2910 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && 2911 !check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) { 2912 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 2913 return; 2914 } 2915 2916 if (amd_iommu_snp_en && 2917 !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) { 2918 pr_warn("Force to disable Virtual APIC due to SNP\n"); 2919 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 2920 return; 2921 } 2922 2923 /* Enabling GAM and SNPAVIC support */ 2924 for_each_iommu(iommu) { 2925 if (iommu_init_ga_log(iommu) || 2926 iommu_ga_log_enable(iommu)) 2927 return; 2928 2929 iommu_feature_enable(iommu, CONTROL_GAM_EN); 2930 if (amd_iommu_snp_en) 2931 iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN); 2932 } 2933 2934 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); 2935 pr_info("Virtual APIC enabled\n"); 2936 #endif 2937 } 2938 2939 static void enable_iommus(void) 2940 { 2941 early_enable_iommus(); 2942 enable_iommus_vapic(); 2943 enable_iommus_v2(); 2944 } 2945 2946 static void disable_iommus(void) 2947 { 2948 struct amd_iommu *iommu; 2949 2950 for_each_iommu(iommu) 2951 iommu_disable(iommu); 2952 2953 #ifdef CONFIG_IRQ_REMAP 2954 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 2955 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP); 2956 #endif 2957 } 2958 2959 /* 2960 * Suspend/Resume support 2961 * disable suspend until real resume implemented 2962 */ 2963 2964 static void amd_iommu_resume(void) 2965 { 2966 struct amd_iommu *iommu; 2967 2968 for_each_iommu(iommu) 2969 iommu_apply_resume_quirks(iommu); 2970 2971 /* re-load the hardware */ 2972 enable_iommus(); 2973 2974 amd_iommu_enable_interrupts(); 2975 } 2976 2977 static int amd_iommu_suspend(void) 2978 { 2979 /* disable IOMMUs to go out of the way for BIOS */ 2980 disable_iommus(); 2981 2982 return 0; 2983 } 2984 2985 static struct syscore_ops amd_iommu_syscore_ops = { 2986 .suspend = amd_iommu_suspend, 2987 .resume = amd_iommu_resume, 2988 }; 2989 2990 static void __init free_iommu_resources(void) 2991 { 2992 kmem_cache_destroy(amd_iommu_irq_cache); 2993 amd_iommu_irq_cache = NULL; 2994 2995 free_iommu_all(); 2996 free_pci_segments(); 2997 } 2998 2999 /* SB IOAPIC is always on this device in AMD systems */ 3000 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) 3001 3002 static bool __init check_ioapic_information(void) 3003 { 3004 const char *fw_bug = FW_BUG; 3005 bool ret, has_sb_ioapic; 3006 int idx; 3007 3008 has_sb_ioapic = false; 3009 ret = false; 3010 3011 /* 3012 * If we have map overrides on the kernel command line the 3013 * messages in this function might not describe firmware bugs 3014 * anymore - so be careful 3015 */ 3016 if (cmdline_maps) 3017 fw_bug = ""; 3018 3019 for (idx = 0; idx < nr_ioapics; idx++) { 3020 int devid, id = mpc_ioapic_id(idx); 3021 3022 devid = get_ioapic_devid(id); 3023 if (devid < 0) { 3024 pr_err("%s: IOAPIC[%d] not in IVRS table\n", 3025 fw_bug, id); 3026 ret = false; 3027 } else if (devid == IOAPIC_SB_DEVID) { 3028 has_sb_ioapic = true; 3029 ret = true; 3030 } 3031 } 3032 3033 if (!has_sb_ioapic) { 3034 /* 3035 * We expect the SB IOAPIC to be listed in the IVRS 3036 * table. The system timer is connected to the SB IOAPIC 3037 * and if we don't have it in the list the system will 3038 * panic at boot time. This situation usually happens 3039 * when the BIOS is buggy and provides us the wrong 3040 * device id for the IOAPIC in the system. 3041 */ 3042 pr_err("%s: No southbridge IOAPIC found\n", fw_bug); 3043 } 3044 3045 if (!ret) 3046 pr_err("Disabling interrupt remapping\n"); 3047 3048 return ret; 3049 } 3050 3051 static void __init free_dma_resources(void) 3052 { 3053 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 3054 get_order(MAX_DOMAIN_ID/8)); 3055 amd_iommu_pd_alloc_bitmap = NULL; 3056 3057 free_unity_maps(); 3058 } 3059 3060 static void __init ivinfo_init(void *ivrs) 3061 { 3062 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET)); 3063 } 3064 3065 /* 3066 * This is the hardware init function for AMD IOMMU in the system. 3067 * This function is called either from amd_iommu_init or from the interrupt 3068 * remapping setup code. 3069 * 3070 * This function basically parses the ACPI table for AMD IOMMU (IVRS) 3071 * four times: 3072 * 3073 * 1 pass) Discover the most comprehensive IVHD type to use. 3074 * 3075 * 2 pass) Find the highest PCI device id the driver has to handle. 3076 * Upon this information the size of the data structures is 3077 * determined that needs to be allocated. 3078 * 3079 * 3 pass) Initialize the data structures just allocated with the 3080 * information in the ACPI table about available AMD IOMMUs 3081 * in the system. It also maps the PCI devices in the 3082 * system to specific IOMMUs 3083 * 3084 * 4 pass) After the basic data structures are allocated and 3085 * initialized we update them with information about memory 3086 * remapping requirements parsed out of the ACPI table in 3087 * this last pass. 3088 * 3089 * After everything is set up the IOMMUs are enabled and the necessary 3090 * hotplug and suspend notifiers are registered. 3091 */ 3092 static int __init early_amd_iommu_init(void) 3093 { 3094 struct acpi_table_header *ivrs_base; 3095 int remap_cache_sz, ret; 3096 acpi_status status; 3097 3098 if (!amd_iommu_detected) 3099 return -ENODEV; 3100 3101 status = acpi_get_table("IVRS", 0, &ivrs_base); 3102 if (status == AE_NOT_FOUND) 3103 return -ENODEV; 3104 else if (ACPI_FAILURE(status)) { 3105 const char *err = acpi_format_exception(status); 3106 pr_err("IVRS table error: %s\n", err); 3107 return -EINVAL; 3108 } 3109 3110 /* 3111 * Validate checksum here so we don't need to do it when 3112 * we actually parse the table 3113 */ 3114 ret = check_ivrs_checksum(ivrs_base); 3115 if (ret) 3116 goto out; 3117 3118 ivinfo_init(ivrs_base); 3119 3120 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); 3121 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); 3122 3123 /* Device table - directly used by all IOMMUs */ 3124 ret = -ENOMEM; 3125 3126 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( 3127 GFP_KERNEL | __GFP_ZERO, 3128 get_order(MAX_DOMAIN_ID/8)); 3129 if (amd_iommu_pd_alloc_bitmap == NULL) 3130 goto out; 3131 3132 /* 3133 * never allocate domain 0 because its used as the non-allocated and 3134 * error value placeholder 3135 */ 3136 __set_bit(0, amd_iommu_pd_alloc_bitmap); 3137 3138 /* 3139 * now the data structures are allocated and basically initialized 3140 * start the real acpi table scan 3141 */ 3142 ret = init_iommu_all(ivrs_base); 3143 if (ret) 3144 goto out; 3145 3146 /* 5 level guest page table */ 3147 if (cpu_feature_enabled(X86_FEATURE_LA57) && 3148 check_feature_gpt_level() == GUEST_PGTABLE_5_LEVEL) 3149 amd_iommu_gpt_level = PAGE_MODE_5_LEVEL; 3150 3151 /* Disable any previously enabled IOMMUs */ 3152 if (!is_kdump_kernel() || amd_iommu_disabled) 3153 disable_iommus(); 3154 3155 if (amd_iommu_irq_remap) 3156 amd_iommu_irq_remap = check_ioapic_information(); 3157 3158 if (amd_iommu_irq_remap) { 3159 struct amd_iommu_pci_seg *pci_seg; 3160 /* 3161 * Interrupt remapping enabled, create kmem_cache for the 3162 * remapping tables. 3163 */ 3164 ret = -ENOMEM; 3165 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) 3166 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32); 3167 else 3168 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2); 3169 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", 3170 remap_cache_sz, 3171 DTE_INTTAB_ALIGNMENT, 3172 0, NULL); 3173 if (!amd_iommu_irq_cache) 3174 goto out; 3175 3176 for_each_pci_segment(pci_seg) { 3177 if (alloc_irq_lookup_table(pci_seg)) 3178 goto out; 3179 } 3180 } 3181 3182 ret = init_memory_definitions(ivrs_base); 3183 if (ret) 3184 goto out; 3185 3186 /* init the device table */ 3187 init_device_table(); 3188 3189 out: 3190 /* Don't leak any ACPI memory */ 3191 acpi_put_table(ivrs_base); 3192 3193 return ret; 3194 } 3195 3196 static int amd_iommu_enable_interrupts(void) 3197 { 3198 struct amd_iommu *iommu; 3199 int ret = 0; 3200 3201 for_each_iommu(iommu) { 3202 ret = iommu_init_irq(iommu); 3203 if (ret) 3204 goto out; 3205 } 3206 3207 out: 3208 return ret; 3209 } 3210 3211 static bool __init detect_ivrs(void) 3212 { 3213 struct acpi_table_header *ivrs_base; 3214 acpi_status status; 3215 int i; 3216 3217 status = acpi_get_table("IVRS", 0, &ivrs_base); 3218 if (status == AE_NOT_FOUND) 3219 return false; 3220 else if (ACPI_FAILURE(status)) { 3221 const char *err = acpi_format_exception(status); 3222 pr_err("IVRS table error: %s\n", err); 3223 return false; 3224 } 3225 3226 acpi_put_table(ivrs_base); 3227 3228 if (amd_iommu_force_enable) 3229 goto out; 3230 3231 /* Don't use IOMMU if there is Stoney Ridge graphics */ 3232 for (i = 0; i < 32; i++) { 3233 u32 pci_id; 3234 3235 pci_id = read_pci_config(0, i, 0, 0); 3236 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { 3237 pr_info("Disable IOMMU on Stoney Ridge\n"); 3238 return false; 3239 } 3240 } 3241 3242 out: 3243 /* Make sure ACS will be enabled during PCI probe */ 3244 pci_request_acs(); 3245 3246 return true; 3247 } 3248 3249 /**************************************************************************** 3250 * 3251 * AMD IOMMU Initialization State Machine 3252 * 3253 ****************************************************************************/ 3254 3255 static int __init state_next(void) 3256 { 3257 int ret = 0; 3258 3259 switch (init_state) { 3260 case IOMMU_START_STATE: 3261 if (!detect_ivrs()) { 3262 init_state = IOMMU_NOT_FOUND; 3263 ret = -ENODEV; 3264 } else { 3265 init_state = IOMMU_IVRS_DETECTED; 3266 } 3267 break; 3268 case IOMMU_IVRS_DETECTED: 3269 if (amd_iommu_disabled) { 3270 init_state = IOMMU_CMDLINE_DISABLED; 3271 ret = -EINVAL; 3272 } else { 3273 ret = early_amd_iommu_init(); 3274 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; 3275 } 3276 break; 3277 case IOMMU_ACPI_FINISHED: 3278 early_enable_iommus(); 3279 x86_platform.iommu_shutdown = disable_iommus; 3280 init_state = IOMMU_ENABLED; 3281 break; 3282 case IOMMU_ENABLED: 3283 register_syscore_ops(&amd_iommu_syscore_ops); 3284 ret = amd_iommu_init_pci(); 3285 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; 3286 enable_iommus_vapic(); 3287 enable_iommus_v2(); 3288 break; 3289 case IOMMU_PCI_INIT: 3290 ret = amd_iommu_enable_interrupts(); 3291 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 3292 break; 3293 case IOMMU_INTERRUPTS_EN: 3294 init_state = IOMMU_INITIALIZED; 3295 break; 3296 case IOMMU_INITIALIZED: 3297 /* Nothing to do */ 3298 break; 3299 case IOMMU_NOT_FOUND: 3300 case IOMMU_INIT_ERROR: 3301 case IOMMU_CMDLINE_DISABLED: 3302 /* Error states => do nothing */ 3303 ret = -EINVAL; 3304 break; 3305 default: 3306 /* Unknown state */ 3307 BUG(); 3308 } 3309 3310 if (ret) { 3311 free_dma_resources(); 3312 if (!irq_remapping_enabled) { 3313 disable_iommus(); 3314 free_iommu_resources(); 3315 } else { 3316 struct amd_iommu *iommu; 3317 struct amd_iommu_pci_seg *pci_seg; 3318 3319 for_each_pci_segment(pci_seg) 3320 uninit_device_table_dma(pci_seg); 3321 3322 for_each_iommu(iommu) 3323 iommu_flush_all_caches(iommu); 3324 } 3325 } 3326 return ret; 3327 } 3328 3329 static int __init iommu_go_to_state(enum iommu_init_state state) 3330 { 3331 int ret = -EINVAL; 3332 3333 while (init_state != state) { 3334 if (init_state == IOMMU_NOT_FOUND || 3335 init_state == IOMMU_INIT_ERROR || 3336 init_state == IOMMU_CMDLINE_DISABLED) 3337 break; 3338 ret = state_next(); 3339 } 3340 3341 return ret; 3342 } 3343 3344 #ifdef CONFIG_IRQ_REMAP 3345 int __init amd_iommu_prepare(void) 3346 { 3347 int ret; 3348 3349 amd_iommu_irq_remap = true; 3350 3351 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); 3352 if (ret) { 3353 amd_iommu_irq_remap = false; 3354 return ret; 3355 } 3356 3357 return amd_iommu_irq_remap ? 0 : -ENODEV; 3358 } 3359 3360 int __init amd_iommu_enable(void) 3361 { 3362 int ret; 3363 3364 ret = iommu_go_to_state(IOMMU_ENABLED); 3365 if (ret) 3366 return ret; 3367 3368 irq_remapping_enabled = 1; 3369 return amd_iommu_xt_mode; 3370 } 3371 3372 void amd_iommu_disable(void) 3373 { 3374 amd_iommu_suspend(); 3375 } 3376 3377 int amd_iommu_reenable(int mode) 3378 { 3379 amd_iommu_resume(); 3380 3381 return 0; 3382 } 3383 3384 int __init amd_iommu_enable_faulting(void) 3385 { 3386 /* We enable MSI later when PCI is initialized */ 3387 return 0; 3388 } 3389 #endif 3390 3391 /* 3392 * This is the core init function for AMD IOMMU hardware in the system. 3393 * This function is called from the generic x86 DMA layer initialization 3394 * code. 3395 */ 3396 static int __init amd_iommu_init(void) 3397 { 3398 struct amd_iommu *iommu; 3399 int ret; 3400 3401 ret = iommu_go_to_state(IOMMU_INITIALIZED); 3402 #ifdef CONFIG_GART_IOMMU 3403 if (ret && list_empty(&amd_iommu_list)) { 3404 /* 3405 * We failed to initialize the AMD IOMMU - try fallback 3406 * to GART if possible. 3407 */ 3408 gart_iommu_init(); 3409 } 3410 #endif 3411 3412 for_each_iommu(iommu) 3413 amd_iommu_debugfs_setup(iommu); 3414 3415 return ret; 3416 } 3417 3418 static bool amd_iommu_sme_check(void) 3419 { 3420 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) || 3421 (boot_cpu_data.x86 != 0x17)) 3422 return true; 3423 3424 /* For Fam17h, a specific level of support is required */ 3425 if (boot_cpu_data.microcode >= 0x08001205) 3426 return true; 3427 3428 if ((boot_cpu_data.microcode >= 0x08001126) && 3429 (boot_cpu_data.microcode <= 0x080011ff)) 3430 return true; 3431 3432 pr_notice("IOMMU not currently supported when SME is active\n"); 3433 3434 return false; 3435 } 3436 3437 /**************************************************************************** 3438 * 3439 * Early detect code. This code runs at IOMMU detection time in the DMA 3440 * layer. It just looks if there is an IVRS ACPI table to detect AMD 3441 * IOMMUs 3442 * 3443 ****************************************************************************/ 3444 int __init amd_iommu_detect(void) 3445 { 3446 int ret; 3447 3448 if (no_iommu || (iommu_detected && !gart_iommu_aperture)) 3449 return -ENODEV; 3450 3451 if (!amd_iommu_sme_check()) 3452 return -ENODEV; 3453 3454 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); 3455 if (ret) 3456 return ret; 3457 3458 amd_iommu_detected = true; 3459 iommu_detected = 1; 3460 x86_init.iommu.iommu_init = amd_iommu_init; 3461 3462 return 1; 3463 } 3464 3465 /**************************************************************************** 3466 * 3467 * Parsing functions for the AMD IOMMU specific kernel command line 3468 * options. 3469 * 3470 ****************************************************************************/ 3471 3472 static int __init parse_amd_iommu_dump(char *str) 3473 { 3474 amd_iommu_dump = true; 3475 3476 return 1; 3477 } 3478 3479 static int __init parse_amd_iommu_intr(char *str) 3480 { 3481 for (; *str; ++str) { 3482 if (strncmp(str, "legacy", 6) == 0) { 3483 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 3484 break; 3485 } 3486 if (strncmp(str, "vapic", 5) == 0) { 3487 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 3488 break; 3489 } 3490 } 3491 return 1; 3492 } 3493 3494 static int __init parse_amd_iommu_options(char *str) 3495 { 3496 if (!str) 3497 return -EINVAL; 3498 3499 while (*str) { 3500 if (strncmp(str, "fullflush", 9) == 0) { 3501 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n"); 3502 iommu_set_dma_strict(); 3503 } else if (strncmp(str, "force_enable", 12) == 0) { 3504 amd_iommu_force_enable = true; 3505 } else if (strncmp(str, "off", 3) == 0) { 3506 amd_iommu_disabled = true; 3507 } else if (strncmp(str, "force_isolation", 15) == 0) { 3508 amd_iommu_force_isolation = true; 3509 } else if (strncmp(str, "pgtbl_v1", 8) == 0) { 3510 amd_iommu_pgtable = AMD_IOMMU_V1; 3511 } else if (strncmp(str, "pgtbl_v2", 8) == 0) { 3512 amd_iommu_pgtable = AMD_IOMMU_V2; 3513 } else if (strncmp(str, "irtcachedis", 11) == 0) { 3514 amd_iommu_irtcachedis = true; 3515 } else { 3516 pr_notice("Unknown option - '%s'\n", str); 3517 } 3518 3519 str += strcspn(str, ","); 3520 while (*str == ',') 3521 str++; 3522 } 3523 3524 return 1; 3525 } 3526 3527 static int __init parse_ivrs_ioapic(char *str) 3528 { 3529 u32 seg = 0, bus, dev, fn; 3530 int id, i; 3531 u32 devid; 3532 3533 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || 3534 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) 3535 goto found; 3536 3537 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || 3538 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { 3539 pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n", 3540 str, id, seg, bus, dev, fn); 3541 goto found; 3542 } 3543 3544 pr_err("Invalid command line: ivrs_ioapic%s\n", str); 3545 return 1; 3546 3547 found: 3548 if (early_ioapic_map_size == EARLY_MAP_SIZE) { 3549 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", 3550 str); 3551 return 1; 3552 } 3553 3554 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); 3555 3556 cmdline_maps = true; 3557 i = early_ioapic_map_size++; 3558 early_ioapic_map[i].id = id; 3559 early_ioapic_map[i].devid = devid; 3560 early_ioapic_map[i].cmd_line = true; 3561 3562 return 1; 3563 } 3564 3565 static int __init parse_ivrs_hpet(char *str) 3566 { 3567 u32 seg = 0, bus, dev, fn; 3568 int id, i; 3569 u32 devid; 3570 3571 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || 3572 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) 3573 goto found; 3574 3575 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || 3576 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { 3577 pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n", 3578 str, id, seg, bus, dev, fn); 3579 goto found; 3580 } 3581 3582 pr_err("Invalid command line: ivrs_hpet%s\n", str); 3583 return 1; 3584 3585 found: 3586 if (early_hpet_map_size == EARLY_MAP_SIZE) { 3587 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", 3588 str); 3589 return 1; 3590 } 3591 3592 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); 3593 3594 cmdline_maps = true; 3595 i = early_hpet_map_size++; 3596 early_hpet_map[i].id = id; 3597 early_hpet_map[i].devid = devid; 3598 early_hpet_map[i].cmd_line = true; 3599 3600 return 1; 3601 } 3602 3603 #define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN) 3604 3605 static int __init parse_ivrs_acpihid(char *str) 3606 { 3607 u32 seg = 0, bus, dev, fn; 3608 char *hid, *uid, *p, *addr; 3609 char acpiid[ACPIID_LEN] = {0}; 3610 int i; 3611 3612 addr = strchr(str, '@'); 3613 if (!addr) { 3614 addr = strchr(str, '='); 3615 if (!addr) 3616 goto not_found; 3617 3618 ++addr; 3619 3620 if (strlen(addr) > ACPIID_LEN) 3621 goto not_found; 3622 3623 if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 || 3624 sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) { 3625 pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n", 3626 str, acpiid, seg, bus, dev, fn); 3627 goto found; 3628 } 3629 goto not_found; 3630 } 3631 3632 /* We have the '@', make it the terminator to get just the acpiid */ 3633 *addr++ = 0; 3634 3635 if (strlen(str) > ACPIID_LEN + 1) 3636 goto not_found; 3637 3638 if (sscanf(str, "=%s", acpiid) != 1) 3639 goto not_found; 3640 3641 if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 || 3642 sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4) 3643 goto found; 3644 3645 not_found: 3646 pr_err("Invalid command line: ivrs_acpihid%s\n", str); 3647 return 1; 3648 3649 found: 3650 p = acpiid; 3651 hid = strsep(&p, ":"); 3652 uid = p; 3653 3654 if (!hid || !(*hid) || !uid) { 3655 pr_err("Invalid command line: hid or uid\n"); 3656 return 1; 3657 } 3658 3659 /* 3660 * Ignore leading zeroes after ':', so e.g., AMDI0095:00 3661 * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match 3662 */ 3663 while (*uid == '0' && *(uid + 1)) 3664 uid++; 3665 3666 i = early_acpihid_map_size++; 3667 memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); 3668 memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); 3669 early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); 3670 early_acpihid_map[i].cmd_line = true; 3671 3672 return 1; 3673 } 3674 3675 __setup("amd_iommu_dump", parse_amd_iommu_dump); 3676 __setup("amd_iommu=", parse_amd_iommu_options); 3677 __setup("amd_iommu_intr=", parse_amd_iommu_intr); 3678 __setup("ivrs_ioapic", parse_ivrs_ioapic); 3679 __setup("ivrs_hpet", parse_ivrs_hpet); 3680 __setup("ivrs_acpihid", parse_ivrs_acpihid); 3681 3682 bool amd_iommu_v2_supported(void) 3683 { 3684 /* CPU page table size should match IOMMU guest page table size */ 3685 if (cpu_feature_enabled(X86_FEATURE_LA57) && 3686 amd_iommu_gpt_level != PAGE_MODE_5_LEVEL) 3687 return false; 3688 3689 /* 3690 * Since DTE[Mode]=0 is prohibited on SNP-enabled system 3691 * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without 3692 * setting up IOMMUv1 page table. 3693 */ 3694 return amd_iommu_v2_present && !amd_iommu_snp_en; 3695 } 3696 EXPORT_SYMBOL(amd_iommu_v2_supported); 3697 3698 struct amd_iommu *get_amd_iommu(unsigned int idx) 3699 { 3700 unsigned int i = 0; 3701 struct amd_iommu *iommu; 3702 3703 for_each_iommu(iommu) 3704 if (i++ == idx) 3705 return iommu; 3706 return NULL; 3707 } 3708 3709 /**************************************************************************** 3710 * 3711 * IOMMU EFR Performance Counter support functionality. This code allows 3712 * access to the IOMMU PC functionality. 3713 * 3714 ****************************************************************************/ 3715 3716 u8 amd_iommu_pc_get_max_banks(unsigned int idx) 3717 { 3718 struct amd_iommu *iommu = get_amd_iommu(idx); 3719 3720 if (iommu) 3721 return iommu->max_banks; 3722 3723 return 0; 3724 } 3725 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); 3726 3727 bool amd_iommu_pc_supported(void) 3728 { 3729 return amd_iommu_pc_present; 3730 } 3731 EXPORT_SYMBOL(amd_iommu_pc_supported); 3732 3733 u8 amd_iommu_pc_get_max_counters(unsigned int idx) 3734 { 3735 struct amd_iommu *iommu = get_amd_iommu(idx); 3736 3737 if (iommu) 3738 return iommu->max_counters; 3739 3740 return 0; 3741 } 3742 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); 3743 3744 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 3745 u8 fxn, u64 *value, bool is_write) 3746 { 3747 u32 offset; 3748 u32 max_offset_lim; 3749 3750 /* Make sure the IOMMU PC resource is available */ 3751 if (!amd_iommu_pc_present) 3752 return -ENODEV; 3753 3754 /* Check for valid iommu and pc register indexing */ 3755 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) 3756 return -ENODEV; 3757 3758 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn); 3759 3760 /* Limit the offset to the hw defined mmio region aperture */ 3761 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | 3762 (iommu->max_counters << 8) | 0x28); 3763 if ((offset < MMIO_CNTR_REG_OFFSET) || 3764 (offset > max_offset_lim)) 3765 return -EINVAL; 3766 3767 if (is_write) { 3768 u64 val = *value & GENMASK_ULL(47, 0); 3769 3770 writel((u32)val, iommu->mmio_base + offset); 3771 writel((val >> 32), iommu->mmio_base + offset + 4); 3772 } else { 3773 *value = readl(iommu->mmio_base + offset + 4); 3774 *value <<= 32; 3775 *value |= readl(iommu->mmio_base + offset); 3776 *value &= GENMASK_ULL(47, 0); 3777 } 3778 3779 return 0; 3780 } 3781 3782 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3783 { 3784 if (!iommu) 3785 return -EINVAL; 3786 3787 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); 3788 } 3789 3790 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3791 { 3792 if (!iommu) 3793 return -EINVAL; 3794 3795 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); 3796 } 3797 3798 #ifdef CONFIG_AMD_MEM_ENCRYPT 3799 int amd_iommu_snp_enable(void) 3800 { 3801 /* 3802 * The SNP support requires that IOMMU must be enabled, and is 3803 * not configured in the passthrough mode. 3804 */ 3805 if (no_iommu || iommu_default_passthrough()) { 3806 pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported"); 3807 return -EINVAL; 3808 } 3809 3810 /* 3811 * Prevent enabling SNP after IOMMU_ENABLED state because this process 3812 * affect how IOMMU driver sets up data structures and configures 3813 * IOMMU hardware. 3814 */ 3815 if (init_state > IOMMU_ENABLED) { 3816 pr_err("SNP: Too late to enable SNP for IOMMU.\n"); 3817 return -EINVAL; 3818 } 3819 3820 amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP); 3821 if (!amd_iommu_snp_en) 3822 return -EINVAL; 3823 3824 pr_info("SNP enabled\n"); 3825 3826 /* Enforce IOMMU v1 pagetable when SNP is enabled. */ 3827 if (amd_iommu_pgtable != AMD_IOMMU_V1) { 3828 pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n"); 3829 amd_iommu_pgtable = AMD_IOMMU_V1; 3830 } 3831 3832 return 0; 3833 } 3834 #endif 3835