1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "AMD-Vi: " fmt 9 #define dev_fmt(fmt) pr_fmt(fmt) 10 11 #include <linux/pci.h> 12 #include <linux/acpi.h> 13 #include <linux/list.h> 14 #include <linux/bitmap.h> 15 #include <linux/slab.h> 16 #include <linux/syscore_ops.h> 17 #include <linux/interrupt.h> 18 #include <linux/msi.h> 19 #include <linux/irq.h> 20 #include <linux/amd-iommu.h> 21 #include <linux/export.h> 22 #include <linux/kmemleak.h> 23 #include <linux/cc_platform.h> 24 #include <linux/iopoll.h> 25 #include <asm/pci-direct.h> 26 #include <asm/iommu.h> 27 #include <asm/apic.h> 28 #include <asm/gart.h> 29 #include <asm/x86_init.h> 30 #include <asm/io_apic.h> 31 #include <asm/irq_remapping.h> 32 #include <asm/set_memory.h> 33 34 #include <linux/crash_dump.h> 35 36 #include "amd_iommu.h" 37 #include "../irq_remapping.h" 38 39 /* 40 * definitions for the ACPI scanning code 41 */ 42 #define IVRS_HEADER_LENGTH 48 43 44 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40 45 #define ACPI_IVMD_TYPE_ALL 0x20 46 #define ACPI_IVMD_TYPE 0x21 47 #define ACPI_IVMD_TYPE_RANGE 0x22 48 49 #define IVHD_DEV_ALL 0x01 50 #define IVHD_DEV_SELECT 0x02 51 #define IVHD_DEV_SELECT_RANGE_START 0x03 52 #define IVHD_DEV_RANGE_END 0x04 53 #define IVHD_DEV_ALIAS 0x42 54 #define IVHD_DEV_ALIAS_RANGE 0x43 55 #define IVHD_DEV_EXT_SELECT 0x46 56 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 57 #define IVHD_DEV_SPECIAL 0x48 58 #define IVHD_DEV_ACPI_HID 0xf0 59 60 #define UID_NOT_PRESENT 0 61 #define UID_IS_INTEGER 1 62 #define UID_IS_CHARACTER 2 63 64 #define IVHD_SPECIAL_IOAPIC 1 65 #define IVHD_SPECIAL_HPET 2 66 67 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 68 #define IVHD_FLAG_PASSPW_EN_MASK 0x02 69 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 70 #define IVHD_FLAG_ISOC_EN_MASK 0x08 71 72 #define IVMD_FLAG_EXCL_RANGE 0x08 73 #define IVMD_FLAG_IW 0x04 74 #define IVMD_FLAG_IR 0x02 75 #define IVMD_FLAG_UNITY_MAP 0x01 76 77 #define ACPI_DEVFLAG_INITPASS 0x01 78 #define ACPI_DEVFLAG_EXTINT 0x02 79 #define ACPI_DEVFLAG_NMI 0x04 80 #define ACPI_DEVFLAG_SYSMGT1 0x10 81 #define ACPI_DEVFLAG_SYSMGT2 0x20 82 #define ACPI_DEVFLAG_LINT0 0x40 83 #define ACPI_DEVFLAG_LINT1 0x80 84 #define ACPI_DEVFLAG_ATSDIS 0x10000000 85 86 #define LOOP_TIMEOUT 2000000 87 88 #define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \ 89 | ((dev & 0x1f) << 3) | (fn & 0x7)) 90 91 /* 92 * ACPI table definitions 93 * 94 * These data structures are laid over the table to parse the important values 95 * out of it. 96 */ 97 98 /* 99 * structure describing one IOMMU in the ACPI table. Typically followed by one 100 * or more ivhd_entrys. 101 */ 102 struct ivhd_header { 103 u8 type; 104 u8 flags; 105 u16 length; 106 u16 devid; 107 u16 cap_ptr; 108 u64 mmio_phys; 109 u16 pci_seg; 110 u16 info; 111 u32 efr_attr; 112 113 /* Following only valid on IVHD type 11h and 40h */ 114 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */ 115 u64 efr_reg2; 116 } __attribute__((packed)); 117 118 /* 119 * A device entry describing which devices a specific IOMMU translates and 120 * which requestor ids they use. 121 */ 122 struct ivhd_entry { 123 u8 type; 124 u16 devid; 125 u8 flags; 126 struct_group(ext_hid, 127 u32 ext; 128 u32 hidh; 129 ); 130 u64 cid; 131 u8 uidf; 132 u8 uidl; 133 u8 uid; 134 } __attribute__((packed)); 135 136 /* 137 * An AMD IOMMU memory definition structure. It defines things like exclusion 138 * ranges for devices and regions that should be unity mapped. 139 */ 140 struct ivmd_header { 141 u8 type; 142 u8 flags; 143 u16 length; 144 u16 devid; 145 u16 aux; 146 u16 pci_seg; 147 u8 resv[6]; 148 u64 range_start; 149 u64 range_length; 150 } __attribute__((packed)); 151 152 bool amd_iommu_dump; 153 bool amd_iommu_irq_remap __read_mostly; 154 155 enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1; 156 /* Guest page table level */ 157 int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL; 158 159 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 160 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; 161 162 static bool amd_iommu_detected; 163 static bool amd_iommu_disabled __initdata; 164 static bool amd_iommu_force_enable __initdata; 165 static bool amd_iommu_irtcachedis; 166 static int amd_iommu_target_ivhd_type; 167 168 /* Global EFR and EFR2 registers */ 169 u64 amd_iommu_efr; 170 u64 amd_iommu_efr2; 171 172 /* SNP is enabled on the system? */ 173 bool amd_iommu_snp_en; 174 EXPORT_SYMBOL(amd_iommu_snp_en); 175 176 LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */ 177 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 178 system */ 179 180 /* Array to assign indices to IOMMUs*/ 181 struct amd_iommu *amd_iommus[MAX_IOMMUS]; 182 183 /* Number of IOMMUs present in the system */ 184 static int amd_iommus_present; 185 186 /* IOMMUs have a non-present cache? */ 187 bool amd_iommu_np_cache __read_mostly; 188 bool amd_iommu_iotlb_sup __read_mostly = true; 189 190 u32 amd_iommu_max_pasid __read_mostly = ~0; 191 192 bool amd_iommu_v2_present __read_mostly; 193 static bool amd_iommu_pc_present __read_mostly; 194 bool amdr_ivrs_remap_support __read_mostly; 195 196 bool amd_iommu_force_isolation __read_mostly; 197 198 /* 199 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap 200 * to know which ones are already in use. 201 */ 202 unsigned long *amd_iommu_pd_alloc_bitmap; 203 204 enum iommu_init_state { 205 IOMMU_START_STATE, 206 IOMMU_IVRS_DETECTED, 207 IOMMU_ACPI_FINISHED, 208 IOMMU_ENABLED, 209 IOMMU_PCI_INIT, 210 IOMMU_INTERRUPTS_EN, 211 IOMMU_INITIALIZED, 212 IOMMU_NOT_FOUND, 213 IOMMU_INIT_ERROR, 214 IOMMU_CMDLINE_DISABLED, 215 }; 216 217 /* Early ioapic and hpet maps from kernel command line */ 218 #define EARLY_MAP_SIZE 4 219 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; 220 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; 221 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE]; 222 223 static int __initdata early_ioapic_map_size; 224 static int __initdata early_hpet_map_size; 225 static int __initdata early_acpihid_map_size; 226 227 static bool __initdata cmdline_maps; 228 229 static enum iommu_init_state init_state = IOMMU_START_STATE; 230 231 static int amd_iommu_enable_interrupts(void); 232 static int __init iommu_go_to_state(enum iommu_init_state state); 233 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg); 234 235 static bool amd_iommu_pre_enabled = true; 236 237 static u32 amd_iommu_ivinfo __initdata; 238 239 bool translation_pre_enabled(struct amd_iommu *iommu) 240 { 241 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); 242 } 243 244 static void clear_translation_pre_enabled(struct amd_iommu *iommu) 245 { 246 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 247 } 248 249 static void init_translation_status(struct amd_iommu *iommu) 250 { 251 u64 ctrl; 252 253 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 254 if (ctrl & (1<<CONTROL_IOMMU_EN)) 255 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 256 } 257 258 static inline unsigned long tbl_size(int entry_size, int last_bdf) 259 { 260 unsigned shift = PAGE_SHIFT + 261 get_order((last_bdf + 1) * entry_size); 262 263 return 1UL << shift; 264 } 265 266 int amd_iommu_get_num_iommus(void) 267 { 268 return amd_iommus_present; 269 } 270 271 /* 272 * Iterate through all the IOMMUs to get common EFR 273 * masks among all IOMMUs and warn if found inconsistency. 274 */ 275 static void get_global_efr(void) 276 { 277 struct amd_iommu *iommu; 278 279 for_each_iommu(iommu) { 280 u64 tmp = iommu->features; 281 u64 tmp2 = iommu->features2; 282 283 if (list_is_first(&iommu->list, &amd_iommu_list)) { 284 amd_iommu_efr = tmp; 285 amd_iommu_efr2 = tmp2; 286 continue; 287 } 288 289 if (amd_iommu_efr == tmp && 290 amd_iommu_efr2 == tmp2) 291 continue; 292 293 pr_err(FW_BUG 294 "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n", 295 tmp, tmp2, amd_iommu_efr, amd_iommu_efr2, 296 iommu->index, iommu->pci_seg->id, 297 PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid), 298 PCI_FUNC(iommu->devid)); 299 300 amd_iommu_efr &= tmp; 301 amd_iommu_efr2 &= tmp2; 302 } 303 304 pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2); 305 } 306 307 static bool check_feature_on_all_iommus(u64 mask) 308 { 309 return !!(amd_iommu_efr & mask); 310 } 311 312 static inline int check_feature_gpt_level(void) 313 { 314 return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK); 315 } 316 317 /* 318 * For IVHD type 0x11/0x40, EFR is also available via IVHD. 319 * Default to IVHD EFR since it is available sooner 320 * (i.e. before PCI init). 321 */ 322 static void __init early_iommu_features_init(struct amd_iommu *iommu, 323 struct ivhd_header *h) 324 { 325 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) { 326 iommu->features = h->efr_reg; 327 iommu->features2 = h->efr_reg2; 328 } 329 if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP) 330 amdr_ivrs_remap_support = true; 331 } 332 333 /* Access to l1 and l2 indexed register spaces */ 334 335 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) 336 { 337 u32 val; 338 339 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 340 pci_read_config_dword(iommu->dev, 0xfc, &val); 341 return val; 342 } 343 344 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) 345 { 346 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); 347 pci_write_config_dword(iommu->dev, 0xfc, val); 348 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 349 } 350 351 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) 352 { 353 u32 val; 354 355 pci_write_config_dword(iommu->dev, 0xf0, address); 356 pci_read_config_dword(iommu->dev, 0xf4, &val); 357 return val; 358 } 359 360 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) 361 { 362 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); 363 pci_write_config_dword(iommu->dev, 0xf4, val); 364 } 365 366 /**************************************************************************** 367 * 368 * AMD IOMMU MMIO register space handling functions 369 * 370 * These functions are used to program the IOMMU device registers in 371 * MMIO space required for that driver. 372 * 373 ****************************************************************************/ 374 375 /* 376 * This function set the exclusion range in the IOMMU. DMA accesses to the 377 * exclusion range are passed through untranslated 378 */ 379 static void iommu_set_exclusion_range(struct amd_iommu *iommu) 380 { 381 u64 start = iommu->exclusion_start & PAGE_MASK; 382 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; 383 u64 entry; 384 385 if (!iommu->exclusion_start) 386 return; 387 388 entry = start | MMIO_EXCL_ENABLE_MASK; 389 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 390 &entry, sizeof(entry)); 391 392 entry = limit; 393 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 394 &entry, sizeof(entry)); 395 } 396 397 static void iommu_set_cwwb_range(struct amd_iommu *iommu) 398 { 399 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); 400 u64 entry = start & PM_ADDR_MASK; 401 402 if (!check_feature_on_all_iommus(FEATURE_SNP)) 403 return; 404 405 /* Note: 406 * Re-purpose Exclusion base/limit registers for Completion wait 407 * write-back base/limit. 408 */ 409 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 410 &entry, sizeof(entry)); 411 412 /* Note: 413 * Default to 4 Kbytes, which can be specified by setting base 414 * address equal to the limit address. 415 */ 416 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 417 &entry, sizeof(entry)); 418 } 419 420 /* Programs the physical address of the device table into the IOMMU hardware */ 421 static void iommu_set_device_table(struct amd_iommu *iommu) 422 { 423 u64 entry; 424 u32 dev_table_size = iommu->pci_seg->dev_table_size; 425 void *dev_table = (void *)get_dev_table(iommu); 426 427 BUG_ON(iommu->mmio_base == NULL); 428 429 entry = iommu_virt_to_phys(dev_table); 430 entry |= (dev_table_size >> 12) - 1; 431 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, 432 &entry, sizeof(entry)); 433 } 434 435 /* Generic functions to enable/disable certain features of the IOMMU. */ 436 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) 437 { 438 u64 ctrl; 439 440 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 441 ctrl |= (1ULL << bit); 442 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 443 } 444 445 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) 446 { 447 u64 ctrl; 448 449 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 450 ctrl &= ~(1ULL << bit); 451 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 452 } 453 454 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) 455 { 456 u64 ctrl; 457 458 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 459 ctrl &= ~CTRL_INV_TO_MASK; 460 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; 461 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 462 } 463 464 /* Function to enable the hardware */ 465 static void iommu_enable(struct amd_iommu *iommu) 466 { 467 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 468 } 469 470 static void iommu_disable(struct amd_iommu *iommu) 471 { 472 if (!iommu->mmio_base) 473 return; 474 475 /* Disable command buffer */ 476 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 477 478 /* Disable event logging and event interrupts */ 479 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); 480 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 481 482 /* Disable IOMMU GA_LOG */ 483 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 484 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 485 486 /* Disable IOMMU hardware itself */ 487 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); 488 489 /* Clear IRTE cache disabling bit */ 490 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); 491 } 492 493 /* 494 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 495 * the system has one. 496 */ 497 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) 498 { 499 if (!request_mem_region(address, end, "amd_iommu")) { 500 pr_err("Can not reserve memory region %llx-%llx for mmio\n", 501 address, end); 502 pr_err("This is a BIOS bug. Please contact your hardware vendor\n"); 503 return NULL; 504 } 505 506 return (u8 __iomem *)ioremap(address, end); 507 } 508 509 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) 510 { 511 if (iommu->mmio_base) 512 iounmap(iommu->mmio_base); 513 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); 514 } 515 516 static inline u32 get_ivhd_header_size(struct ivhd_header *h) 517 { 518 u32 size = 0; 519 520 switch (h->type) { 521 case 0x10: 522 size = 24; 523 break; 524 case 0x11: 525 case 0x40: 526 size = 40; 527 break; 528 } 529 return size; 530 } 531 532 /**************************************************************************** 533 * 534 * The functions below belong to the first pass of AMD IOMMU ACPI table 535 * parsing. In this pass we try to find out the highest device id this 536 * code has to handle. Upon this information the size of the shared data 537 * structures is determined later. 538 * 539 ****************************************************************************/ 540 541 /* 542 * This function calculates the length of a given IVHD entry 543 */ 544 static inline int ivhd_entry_length(u8 *ivhd) 545 { 546 u32 type = ((struct ivhd_entry *)ivhd)->type; 547 548 if (type < 0x80) { 549 return 0x04 << (*ivhd >> 6); 550 } else if (type == IVHD_DEV_ACPI_HID) { 551 /* For ACPI_HID, offset 21 is uid len */ 552 return *((u8 *)ivhd + 21) + 22; 553 } 554 return 0; 555 } 556 557 /* 558 * After reading the highest device id from the IOMMU PCI capability header 559 * this function looks if there is a higher device id defined in the ACPI table 560 */ 561 static int __init find_last_devid_from_ivhd(struct ivhd_header *h) 562 { 563 u8 *p = (void *)h, *end = (void *)h; 564 struct ivhd_entry *dev; 565 int last_devid = -EINVAL; 566 567 u32 ivhd_size = get_ivhd_header_size(h); 568 569 if (!ivhd_size) { 570 pr_err("Unsupported IVHD type %#x\n", h->type); 571 return -EINVAL; 572 } 573 574 p += ivhd_size; 575 end += h->length; 576 577 while (p < end) { 578 dev = (struct ivhd_entry *)p; 579 switch (dev->type) { 580 case IVHD_DEV_ALL: 581 /* Use maximum BDF value for DEV_ALL */ 582 return 0xffff; 583 case IVHD_DEV_SELECT: 584 case IVHD_DEV_RANGE_END: 585 case IVHD_DEV_ALIAS: 586 case IVHD_DEV_EXT_SELECT: 587 /* all the above subfield types refer to device ids */ 588 if (dev->devid > last_devid) 589 last_devid = dev->devid; 590 break; 591 default: 592 break; 593 } 594 p += ivhd_entry_length(p); 595 } 596 597 WARN_ON(p != end); 598 599 return last_devid; 600 } 601 602 static int __init check_ivrs_checksum(struct acpi_table_header *table) 603 { 604 int i; 605 u8 checksum = 0, *p = (u8 *)table; 606 607 for (i = 0; i < table->length; ++i) 608 checksum += p[i]; 609 if (checksum != 0) { 610 /* ACPI table corrupt */ 611 pr_err(FW_BUG "IVRS invalid checksum\n"); 612 return -ENODEV; 613 } 614 615 return 0; 616 } 617 618 /* 619 * Iterate over all IVHD entries in the ACPI table and find the highest device 620 * id which we need to handle. This is the first of three functions which parse 621 * the ACPI table. So we check the checksum here. 622 */ 623 static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg) 624 { 625 u8 *p = (u8 *)table, *end = (u8 *)table; 626 struct ivhd_header *h; 627 int last_devid, last_bdf = 0; 628 629 p += IVRS_HEADER_LENGTH; 630 631 end += table->length; 632 while (p < end) { 633 h = (struct ivhd_header *)p; 634 if (h->pci_seg == pci_seg && 635 h->type == amd_iommu_target_ivhd_type) { 636 last_devid = find_last_devid_from_ivhd(h); 637 638 if (last_devid < 0) 639 return -EINVAL; 640 if (last_devid > last_bdf) 641 last_bdf = last_devid; 642 } 643 p += h->length; 644 } 645 WARN_ON(p != end); 646 647 return last_bdf; 648 } 649 650 /**************************************************************************** 651 * 652 * The following functions belong to the code path which parses the ACPI table 653 * the second time. In this ACPI parsing iteration we allocate IOMMU specific 654 * data structures, initialize the per PCI segment device/alias/rlookup table 655 * and also basically initialize the hardware. 656 * 657 ****************************************************************************/ 658 659 /* Allocate per PCI segment device table */ 660 static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg) 661 { 662 pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, 663 get_order(pci_seg->dev_table_size)); 664 if (!pci_seg->dev_table) 665 return -ENOMEM; 666 667 return 0; 668 } 669 670 static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg) 671 { 672 free_pages((unsigned long)pci_seg->dev_table, 673 get_order(pci_seg->dev_table_size)); 674 pci_seg->dev_table = NULL; 675 } 676 677 /* Allocate per PCI segment IOMMU rlookup table. */ 678 static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg) 679 { 680 pci_seg->rlookup_table = (void *)__get_free_pages( 681 GFP_KERNEL | __GFP_ZERO, 682 get_order(pci_seg->rlookup_table_size)); 683 if (pci_seg->rlookup_table == NULL) 684 return -ENOMEM; 685 686 return 0; 687 } 688 689 static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg) 690 { 691 free_pages((unsigned long)pci_seg->rlookup_table, 692 get_order(pci_seg->rlookup_table_size)); 693 pci_seg->rlookup_table = NULL; 694 } 695 696 static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) 697 { 698 pci_seg->irq_lookup_table = (void *)__get_free_pages( 699 GFP_KERNEL | __GFP_ZERO, 700 get_order(pci_seg->rlookup_table_size)); 701 kmemleak_alloc(pci_seg->irq_lookup_table, 702 pci_seg->rlookup_table_size, 1, GFP_KERNEL); 703 if (pci_seg->irq_lookup_table == NULL) 704 return -ENOMEM; 705 706 return 0; 707 } 708 709 static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) 710 { 711 kmemleak_free(pci_seg->irq_lookup_table); 712 free_pages((unsigned long)pci_seg->irq_lookup_table, 713 get_order(pci_seg->rlookup_table_size)); 714 pci_seg->irq_lookup_table = NULL; 715 } 716 717 static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg) 718 { 719 int i; 720 721 pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL, 722 get_order(pci_seg->alias_table_size)); 723 if (!pci_seg->alias_table) 724 return -ENOMEM; 725 726 /* 727 * let all alias entries point to itself 728 */ 729 for (i = 0; i <= pci_seg->last_bdf; ++i) 730 pci_seg->alias_table[i] = i; 731 732 return 0; 733 } 734 735 static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg) 736 { 737 free_pages((unsigned long)pci_seg->alias_table, 738 get_order(pci_seg->alias_table_size)); 739 pci_seg->alias_table = NULL; 740 } 741 742 /* 743 * Allocates the command buffer. This buffer is per AMD IOMMU. We can 744 * write commands to that buffer later and the IOMMU will execute them 745 * asynchronously 746 */ 747 static int __init alloc_command_buffer(struct amd_iommu *iommu) 748 { 749 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 750 get_order(CMD_BUFFER_SIZE)); 751 752 return iommu->cmd_buf ? 0 : -ENOMEM; 753 } 754 755 /* 756 * This function restarts event logging in case the IOMMU experienced 757 * an event log buffer overflow. 758 */ 759 void amd_iommu_restart_event_logging(struct amd_iommu *iommu) 760 { 761 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 762 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 763 } 764 765 /* 766 * This function resets the command buffer if the IOMMU stopped fetching 767 * commands from it. 768 */ 769 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) 770 { 771 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 772 773 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 774 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 775 iommu->cmd_buf_head = 0; 776 iommu->cmd_buf_tail = 0; 777 778 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 779 } 780 781 /* 782 * This function writes the command buffer address to the hardware and 783 * enables it. 784 */ 785 static void iommu_enable_command_buffer(struct amd_iommu *iommu) 786 { 787 u64 entry; 788 789 BUG_ON(iommu->cmd_buf == NULL); 790 791 entry = iommu_virt_to_phys(iommu->cmd_buf); 792 entry |= MMIO_CMD_SIZE_512; 793 794 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 795 &entry, sizeof(entry)); 796 797 amd_iommu_reset_cmd_buffer(iommu); 798 } 799 800 /* 801 * This function disables the command buffer 802 */ 803 static void iommu_disable_command_buffer(struct amd_iommu *iommu) 804 { 805 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 806 } 807 808 static void __init free_command_buffer(struct amd_iommu *iommu) 809 { 810 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); 811 } 812 813 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, 814 gfp_t gfp, size_t size) 815 { 816 int order = get_order(size); 817 void *buf = (void *)__get_free_pages(gfp, order); 818 819 if (buf && 820 check_feature_on_all_iommus(FEATURE_SNP) && 821 set_memory_4k((unsigned long)buf, (1 << order))) { 822 free_pages((unsigned long)buf, order); 823 buf = NULL; 824 } 825 826 return buf; 827 } 828 829 /* allocates the memory where the IOMMU will log its events to */ 830 static int __init alloc_event_buffer(struct amd_iommu *iommu) 831 { 832 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 833 EVT_BUFFER_SIZE); 834 835 return iommu->evt_buf ? 0 : -ENOMEM; 836 } 837 838 static void iommu_enable_event_buffer(struct amd_iommu *iommu) 839 { 840 u64 entry; 841 842 BUG_ON(iommu->evt_buf == NULL); 843 844 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; 845 846 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, 847 &entry, sizeof(entry)); 848 849 /* set head and tail to zero manually */ 850 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 851 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 852 853 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 854 } 855 856 /* 857 * This function disables the event log buffer 858 */ 859 static void iommu_disable_event_buffer(struct amd_iommu *iommu) 860 { 861 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 862 } 863 864 static void __init free_event_buffer(struct amd_iommu *iommu) 865 { 866 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); 867 } 868 869 /* allocates the memory where the IOMMU will log its events to */ 870 static int __init alloc_ppr_log(struct amd_iommu *iommu) 871 { 872 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 873 PPR_LOG_SIZE); 874 875 return iommu->ppr_log ? 0 : -ENOMEM; 876 } 877 878 static void iommu_enable_ppr_log(struct amd_iommu *iommu) 879 { 880 u64 entry; 881 882 if (iommu->ppr_log == NULL) 883 return; 884 885 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; 886 887 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, 888 &entry, sizeof(entry)); 889 890 /* set head and tail to zero manually */ 891 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 892 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 893 894 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); 895 iommu_feature_enable(iommu, CONTROL_PPR_EN); 896 } 897 898 static void __init free_ppr_log(struct amd_iommu *iommu) 899 { 900 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); 901 } 902 903 static void free_ga_log(struct amd_iommu *iommu) 904 { 905 #ifdef CONFIG_IRQ_REMAP 906 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE)); 907 free_pages((unsigned long)iommu->ga_log_tail, get_order(8)); 908 #endif 909 } 910 911 #ifdef CONFIG_IRQ_REMAP 912 static int iommu_ga_log_enable(struct amd_iommu *iommu) 913 { 914 u32 status, i; 915 u64 entry; 916 917 if (!iommu->ga_log) 918 return -EINVAL; 919 920 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; 921 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, 922 &entry, sizeof(entry)); 923 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & 924 (BIT_ULL(52)-1)) & ~7ULL; 925 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, 926 &entry, sizeof(entry)); 927 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 928 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); 929 930 931 iommu_feature_enable(iommu, CONTROL_GAINT_EN); 932 iommu_feature_enable(iommu, CONTROL_GALOG_EN); 933 934 for (i = 0; i < LOOP_TIMEOUT; ++i) { 935 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 936 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 937 break; 938 udelay(10); 939 } 940 941 if (WARN_ON(i >= LOOP_TIMEOUT)) 942 return -EINVAL; 943 944 return 0; 945 } 946 947 static int iommu_init_ga_log(struct amd_iommu *iommu) 948 { 949 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 950 return 0; 951 952 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 953 get_order(GA_LOG_SIZE)); 954 if (!iommu->ga_log) 955 goto err_out; 956 957 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 958 get_order(8)); 959 if (!iommu->ga_log_tail) 960 goto err_out; 961 962 return 0; 963 err_out: 964 free_ga_log(iommu); 965 return -EINVAL; 966 } 967 #endif /* CONFIG_IRQ_REMAP */ 968 969 static int __init alloc_cwwb_sem(struct amd_iommu *iommu) 970 { 971 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1); 972 973 return iommu->cmd_sem ? 0 : -ENOMEM; 974 } 975 976 static void __init free_cwwb_sem(struct amd_iommu *iommu) 977 { 978 if (iommu->cmd_sem) 979 free_page((unsigned long)iommu->cmd_sem); 980 } 981 982 static void iommu_enable_xt(struct amd_iommu *iommu) 983 { 984 #ifdef CONFIG_IRQ_REMAP 985 /* 986 * XT mode (32-bit APIC destination ID) requires 987 * GA mode (128-bit IRTE support) as a prerequisite. 988 */ 989 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) && 990 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 991 iommu_feature_enable(iommu, CONTROL_XT_EN); 992 #endif /* CONFIG_IRQ_REMAP */ 993 } 994 995 static void iommu_enable_gt(struct amd_iommu *iommu) 996 { 997 if (!iommu_feature(iommu, FEATURE_GT)) 998 return; 999 1000 iommu_feature_enable(iommu, CONTROL_GT_EN); 1001 } 1002 1003 /* sets a specific bit in the device table entry. */ 1004 static void __set_dev_entry_bit(struct dev_table_entry *dev_table, 1005 u16 devid, u8 bit) 1006 { 1007 int i = (bit >> 6) & 0x03; 1008 int _bit = bit & 0x3f; 1009 1010 dev_table[devid].data[i] |= (1UL << _bit); 1011 } 1012 1013 static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit) 1014 { 1015 struct dev_table_entry *dev_table = get_dev_table(iommu); 1016 1017 return __set_dev_entry_bit(dev_table, devid, bit); 1018 } 1019 1020 static int __get_dev_entry_bit(struct dev_table_entry *dev_table, 1021 u16 devid, u8 bit) 1022 { 1023 int i = (bit >> 6) & 0x03; 1024 int _bit = bit & 0x3f; 1025 1026 return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit; 1027 } 1028 1029 static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit) 1030 { 1031 struct dev_table_entry *dev_table = get_dev_table(iommu); 1032 1033 return __get_dev_entry_bit(dev_table, devid, bit); 1034 } 1035 1036 static bool __copy_device_table(struct amd_iommu *iommu) 1037 { 1038 u64 int_ctl, int_tab_len, entry = 0; 1039 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 1040 struct dev_table_entry *old_devtb = NULL; 1041 u32 lo, hi, devid, old_devtb_size; 1042 phys_addr_t old_devtb_phys; 1043 u16 dom_id, dte_v, irq_v; 1044 gfp_t gfp_flag; 1045 u64 tmp; 1046 1047 /* Each IOMMU use separate device table with the same size */ 1048 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); 1049 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); 1050 entry = (((u64) hi) << 32) + lo; 1051 1052 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; 1053 if (old_devtb_size != pci_seg->dev_table_size) { 1054 pr_err("The device table size of IOMMU:%d is not expected!\n", 1055 iommu->index); 1056 return false; 1057 } 1058 1059 /* 1060 * When SME is enabled in the first kernel, the entry includes the 1061 * memory encryption mask(sme_me_mask), we must remove the memory 1062 * encryption mask to obtain the true physical address in kdump kernel. 1063 */ 1064 old_devtb_phys = __sme_clr(entry) & PAGE_MASK; 1065 1066 if (old_devtb_phys >= 0x100000000ULL) { 1067 pr_err("The address of old device table is above 4G, not trustworthy!\n"); 1068 return false; 1069 } 1070 old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel()) 1071 ? (__force void *)ioremap_encrypted(old_devtb_phys, 1072 pci_seg->dev_table_size) 1073 : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB); 1074 1075 if (!old_devtb) 1076 return false; 1077 1078 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32; 1079 pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, 1080 get_order(pci_seg->dev_table_size)); 1081 if (pci_seg->old_dev_tbl_cpy == NULL) { 1082 pr_err("Failed to allocate memory for copying old device table!\n"); 1083 memunmap(old_devtb); 1084 return false; 1085 } 1086 1087 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { 1088 pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid]; 1089 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; 1090 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; 1091 1092 if (dte_v && dom_id) { 1093 pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; 1094 pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; 1095 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap); 1096 /* If gcr3 table existed, mask it out */ 1097 if (old_devtb[devid].data[0] & DTE_FLAG_GV) { 1098 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; 1099 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; 1100 pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp; 1101 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A; 1102 tmp |= DTE_FLAG_GV; 1103 pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp; 1104 } 1105 } 1106 1107 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; 1108 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; 1109 int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK; 1110 if (irq_v && (int_ctl || int_tab_len)) { 1111 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || 1112 (int_tab_len != DTE_INTTABLEN)) { 1113 pr_err("Wrong old irq remapping flag: %#x\n", devid); 1114 memunmap(old_devtb); 1115 return false; 1116 } 1117 1118 pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; 1119 } 1120 } 1121 memunmap(old_devtb); 1122 1123 return true; 1124 } 1125 1126 static bool copy_device_table(void) 1127 { 1128 struct amd_iommu *iommu; 1129 struct amd_iommu_pci_seg *pci_seg; 1130 1131 if (!amd_iommu_pre_enabled) 1132 return false; 1133 1134 pr_warn("Translation is already enabled - trying to copy translation structures\n"); 1135 1136 /* 1137 * All IOMMUs within PCI segment shares common device table. 1138 * Hence copy device table only once per PCI segment. 1139 */ 1140 for_each_pci_segment(pci_seg) { 1141 for_each_iommu(iommu) { 1142 if (pci_seg->id != iommu->pci_seg->id) 1143 continue; 1144 if (!__copy_device_table(iommu)) 1145 return false; 1146 break; 1147 } 1148 } 1149 1150 return true; 1151 } 1152 1153 void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid) 1154 { 1155 int sysmgt; 1156 1157 sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) | 1158 (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1); 1159 1160 if (sysmgt == 0x01) 1161 set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW); 1162 } 1163 1164 /* 1165 * This function takes the device specific flags read from the ACPI 1166 * table and sets up the device table entry with that information 1167 */ 1168 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, 1169 u16 devid, u32 flags, u32 ext_flags) 1170 { 1171 if (flags & ACPI_DEVFLAG_INITPASS) 1172 set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS); 1173 if (flags & ACPI_DEVFLAG_EXTINT) 1174 set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS); 1175 if (flags & ACPI_DEVFLAG_NMI) 1176 set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS); 1177 if (flags & ACPI_DEVFLAG_SYSMGT1) 1178 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1); 1179 if (flags & ACPI_DEVFLAG_SYSMGT2) 1180 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2); 1181 if (flags & ACPI_DEVFLAG_LINT0) 1182 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS); 1183 if (flags & ACPI_DEVFLAG_LINT1) 1184 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS); 1185 1186 amd_iommu_apply_erratum_63(iommu, devid); 1187 1188 amd_iommu_set_rlookup_table(iommu, devid); 1189 } 1190 1191 int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line) 1192 { 1193 struct devid_map *entry; 1194 struct list_head *list; 1195 1196 if (type == IVHD_SPECIAL_IOAPIC) 1197 list = &ioapic_map; 1198 else if (type == IVHD_SPECIAL_HPET) 1199 list = &hpet_map; 1200 else 1201 return -EINVAL; 1202 1203 list_for_each_entry(entry, list, list) { 1204 if (!(entry->id == id && entry->cmd_line)) 1205 continue; 1206 1207 pr_info("Command-line override present for %s id %d - ignoring\n", 1208 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); 1209 1210 *devid = entry->devid; 1211 1212 return 0; 1213 } 1214 1215 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1216 if (!entry) 1217 return -ENOMEM; 1218 1219 entry->id = id; 1220 entry->devid = *devid; 1221 entry->cmd_line = cmd_line; 1222 1223 list_add_tail(&entry->list, list); 1224 1225 return 0; 1226 } 1227 1228 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid, 1229 bool cmd_line) 1230 { 1231 struct acpihid_map_entry *entry; 1232 struct list_head *list = &acpihid_map; 1233 1234 list_for_each_entry(entry, list, list) { 1235 if (strcmp(entry->hid, hid) || 1236 (*uid && *entry->uid && strcmp(entry->uid, uid)) || 1237 !entry->cmd_line) 1238 continue; 1239 1240 pr_info("Command-line override for hid:%s uid:%s\n", 1241 hid, uid); 1242 *devid = entry->devid; 1243 return 0; 1244 } 1245 1246 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1247 if (!entry) 1248 return -ENOMEM; 1249 1250 memcpy(entry->uid, uid, strlen(uid)); 1251 memcpy(entry->hid, hid, strlen(hid)); 1252 entry->devid = *devid; 1253 entry->cmd_line = cmd_line; 1254 entry->root_devid = (entry->devid & (~0x7)); 1255 1256 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n", 1257 entry->cmd_line ? "cmd" : "ivrs", 1258 entry->hid, entry->uid, entry->root_devid); 1259 1260 list_add_tail(&entry->list, list); 1261 return 0; 1262 } 1263 1264 static int __init add_early_maps(void) 1265 { 1266 int i, ret; 1267 1268 for (i = 0; i < early_ioapic_map_size; ++i) { 1269 ret = add_special_device(IVHD_SPECIAL_IOAPIC, 1270 early_ioapic_map[i].id, 1271 &early_ioapic_map[i].devid, 1272 early_ioapic_map[i].cmd_line); 1273 if (ret) 1274 return ret; 1275 } 1276 1277 for (i = 0; i < early_hpet_map_size; ++i) { 1278 ret = add_special_device(IVHD_SPECIAL_HPET, 1279 early_hpet_map[i].id, 1280 &early_hpet_map[i].devid, 1281 early_hpet_map[i].cmd_line); 1282 if (ret) 1283 return ret; 1284 } 1285 1286 for (i = 0; i < early_acpihid_map_size; ++i) { 1287 ret = add_acpi_hid_device(early_acpihid_map[i].hid, 1288 early_acpihid_map[i].uid, 1289 &early_acpihid_map[i].devid, 1290 early_acpihid_map[i].cmd_line); 1291 if (ret) 1292 return ret; 1293 } 1294 1295 return 0; 1296 } 1297 1298 /* 1299 * Takes a pointer to an AMD IOMMU entry in the ACPI table and 1300 * initializes the hardware and our data structures with it. 1301 */ 1302 static int __init init_iommu_from_acpi(struct amd_iommu *iommu, 1303 struct ivhd_header *h) 1304 { 1305 u8 *p = (u8 *)h; 1306 u8 *end = p, flags = 0; 1307 u16 devid = 0, devid_start = 0, devid_to = 0, seg_id; 1308 u32 dev_i, ext_flags = 0; 1309 bool alias = false; 1310 struct ivhd_entry *e; 1311 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 1312 u32 ivhd_size; 1313 int ret; 1314 1315 1316 ret = add_early_maps(); 1317 if (ret) 1318 return ret; 1319 1320 amd_iommu_apply_ivrs_quirks(); 1321 1322 /* 1323 * First save the recommended feature enable bits from ACPI 1324 */ 1325 iommu->acpi_flags = h->flags; 1326 1327 /* 1328 * Done. Now parse the device entries 1329 */ 1330 ivhd_size = get_ivhd_header_size(h); 1331 if (!ivhd_size) { 1332 pr_err("Unsupported IVHD type %#x\n", h->type); 1333 return -EINVAL; 1334 } 1335 1336 p += ivhd_size; 1337 1338 end += h->length; 1339 1340 1341 while (p < end) { 1342 e = (struct ivhd_entry *)p; 1343 seg_id = pci_seg->id; 1344 1345 switch (e->type) { 1346 case IVHD_DEV_ALL: 1347 1348 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags); 1349 1350 for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i) 1351 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); 1352 break; 1353 case IVHD_DEV_SELECT: 1354 1355 DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x " 1356 "flags: %02x\n", 1357 seg_id, PCI_BUS_NUM(e->devid), 1358 PCI_SLOT(e->devid), 1359 PCI_FUNC(e->devid), 1360 e->flags); 1361 1362 devid = e->devid; 1363 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1364 break; 1365 case IVHD_DEV_SELECT_RANGE_START: 1366 1367 DUMP_printk(" DEV_SELECT_RANGE_START\t " 1368 "devid: %04x:%02x:%02x.%x flags: %02x\n", 1369 seg_id, PCI_BUS_NUM(e->devid), 1370 PCI_SLOT(e->devid), 1371 PCI_FUNC(e->devid), 1372 e->flags); 1373 1374 devid_start = e->devid; 1375 flags = e->flags; 1376 ext_flags = 0; 1377 alias = false; 1378 break; 1379 case IVHD_DEV_ALIAS: 1380 1381 DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x " 1382 "flags: %02x devid_to: %02x:%02x.%x\n", 1383 seg_id, PCI_BUS_NUM(e->devid), 1384 PCI_SLOT(e->devid), 1385 PCI_FUNC(e->devid), 1386 e->flags, 1387 PCI_BUS_NUM(e->ext >> 8), 1388 PCI_SLOT(e->ext >> 8), 1389 PCI_FUNC(e->ext >> 8)); 1390 1391 devid = e->devid; 1392 devid_to = e->ext >> 8; 1393 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); 1394 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); 1395 pci_seg->alias_table[devid] = devid_to; 1396 break; 1397 case IVHD_DEV_ALIAS_RANGE: 1398 1399 DUMP_printk(" DEV_ALIAS_RANGE\t\t " 1400 "devid: %04x:%02x:%02x.%x flags: %02x " 1401 "devid_to: %04x:%02x:%02x.%x\n", 1402 seg_id, PCI_BUS_NUM(e->devid), 1403 PCI_SLOT(e->devid), 1404 PCI_FUNC(e->devid), 1405 e->flags, 1406 seg_id, PCI_BUS_NUM(e->ext >> 8), 1407 PCI_SLOT(e->ext >> 8), 1408 PCI_FUNC(e->ext >> 8)); 1409 1410 devid_start = e->devid; 1411 flags = e->flags; 1412 devid_to = e->ext >> 8; 1413 ext_flags = 0; 1414 alias = true; 1415 break; 1416 case IVHD_DEV_EXT_SELECT: 1417 1418 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x " 1419 "flags: %02x ext: %08x\n", 1420 seg_id, PCI_BUS_NUM(e->devid), 1421 PCI_SLOT(e->devid), 1422 PCI_FUNC(e->devid), 1423 e->flags, e->ext); 1424 1425 devid = e->devid; 1426 set_dev_entry_from_acpi(iommu, devid, e->flags, 1427 e->ext); 1428 break; 1429 case IVHD_DEV_EXT_SELECT_RANGE: 1430 1431 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " 1432 "%04x:%02x:%02x.%x flags: %02x ext: %08x\n", 1433 seg_id, PCI_BUS_NUM(e->devid), 1434 PCI_SLOT(e->devid), 1435 PCI_FUNC(e->devid), 1436 e->flags, e->ext); 1437 1438 devid_start = e->devid; 1439 flags = e->flags; 1440 ext_flags = e->ext; 1441 alias = false; 1442 break; 1443 case IVHD_DEV_RANGE_END: 1444 1445 DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n", 1446 seg_id, PCI_BUS_NUM(e->devid), 1447 PCI_SLOT(e->devid), 1448 PCI_FUNC(e->devid)); 1449 1450 devid = e->devid; 1451 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { 1452 if (alias) { 1453 pci_seg->alias_table[dev_i] = devid_to; 1454 set_dev_entry_from_acpi(iommu, 1455 devid_to, flags, ext_flags); 1456 } 1457 set_dev_entry_from_acpi(iommu, dev_i, 1458 flags, ext_flags); 1459 } 1460 break; 1461 case IVHD_DEV_SPECIAL: { 1462 u8 handle, type; 1463 const char *var; 1464 u32 devid; 1465 int ret; 1466 1467 handle = e->ext & 0xff; 1468 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8)); 1469 type = (e->ext >> 24) & 0xff; 1470 1471 if (type == IVHD_SPECIAL_IOAPIC) 1472 var = "IOAPIC"; 1473 else if (type == IVHD_SPECIAL_HPET) 1474 var = "HPET"; 1475 else 1476 var = "UNKNOWN"; 1477 1478 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n", 1479 var, (int)handle, 1480 seg_id, PCI_BUS_NUM(devid), 1481 PCI_SLOT(devid), 1482 PCI_FUNC(devid)); 1483 1484 ret = add_special_device(type, handle, &devid, false); 1485 if (ret) 1486 return ret; 1487 1488 /* 1489 * add_special_device might update the devid in case a 1490 * command-line override is present. So call 1491 * set_dev_entry_from_acpi after add_special_device. 1492 */ 1493 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1494 1495 break; 1496 } 1497 case IVHD_DEV_ACPI_HID: { 1498 u32 devid; 1499 u8 hid[ACPIHID_HID_LEN]; 1500 u8 uid[ACPIHID_UID_LEN]; 1501 int ret; 1502 1503 if (h->type != 0x40) { 1504 pr_err(FW_BUG "Invalid IVHD device type %#x\n", 1505 e->type); 1506 break; 1507 } 1508 1509 BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1); 1510 memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1); 1511 hid[ACPIHID_HID_LEN - 1] = '\0'; 1512 1513 if (!(*hid)) { 1514 pr_err(FW_BUG "Invalid HID.\n"); 1515 break; 1516 } 1517 1518 uid[0] = '\0'; 1519 switch (e->uidf) { 1520 case UID_NOT_PRESENT: 1521 1522 if (e->uidl != 0) 1523 pr_warn(FW_BUG "Invalid UID length.\n"); 1524 1525 break; 1526 case UID_IS_INTEGER: 1527 1528 sprintf(uid, "%d", e->uid); 1529 1530 break; 1531 case UID_IS_CHARACTER: 1532 1533 memcpy(uid, &e->uid, e->uidl); 1534 uid[e->uidl] = '\0'; 1535 1536 break; 1537 default: 1538 break; 1539 } 1540 1541 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid); 1542 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n", 1543 hid, uid, seg_id, 1544 PCI_BUS_NUM(devid), 1545 PCI_SLOT(devid), 1546 PCI_FUNC(devid)); 1547 1548 flags = e->flags; 1549 1550 ret = add_acpi_hid_device(hid, uid, &devid, false); 1551 if (ret) 1552 return ret; 1553 1554 /* 1555 * add_special_device might update the devid in case a 1556 * command-line override is present. So call 1557 * set_dev_entry_from_acpi after add_special_device. 1558 */ 1559 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1560 1561 break; 1562 } 1563 default: 1564 break; 1565 } 1566 1567 p += ivhd_entry_length(p); 1568 } 1569 1570 return 0; 1571 } 1572 1573 /* Allocate PCI segment data structure */ 1574 static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id, 1575 struct acpi_table_header *ivrs_base) 1576 { 1577 struct amd_iommu_pci_seg *pci_seg; 1578 int last_bdf; 1579 1580 /* 1581 * First parse ACPI tables to find the largest Bus/Dev/Func we need to 1582 * handle in this PCI segment. Upon this information the shared data 1583 * structures for the PCI segments in the system will be allocated. 1584 */ 1585 last_bdf = find_last_devid_acpi(ivrs_base, id); 1586 if (last_bdf < 0) 1587 return NULL; 1588 1589 pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL); 1590 if (pci_seg == NULL) 1591 return NULL; 1592 1593 pci_seg->last_bdf = last_bdf; 1594 DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf); 1595 pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf); 1596 pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf); 1597 pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf); 1598 1599 pci_seg->id = id; 1600 init_llist_head(&pci_seg->dev_data_list); 1601 INIT_LIST_HEAD(&pci_seg->unity_map); 1602 list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list); 1603 1604 if (alloc_dev_table(pci_seg)) 1605 return NULL; 1606 if (alloc_alias_table(pci_seg)) 1607 return NULL; 1608 if (alloc_rlookup_table(pci_seg)) 1609 return NULL; 1610 1611 return pci_seg; 1612 } 1613 1614 static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id, 1615 struct acpi_table_header *ivrs_base) 1616 { 1617 struct amd_iommu_pci_seg *pci_seg; 1618 1619 for_each_pci_segment(pci_seg) { 1620 if (pci_seg->id == id) 1621 return pci_seg; 1622 } 1623 1624 return alloc_pci_segment(id, ivrs_base); 1625 } 1626 1627 static void __init free_pci_segments(void) 1628 { 1629 struct amd_iommu_pci_seg *pci_seg, *next; 1630 1631 for_each_pci_segment_safe(pci_seg, next) { 1632 list_del(&pci_seg->list); 1633 free_irq_lookup_table(pci_seg); 1634 free_rlookup_table(pci_seg); 1635 free_alias_table(pci_seg); 1636 free_dev_table(pci_seg); 1637 kfree(pci_seg); 1638 } 1639 } 1640 1641 static void __init free_iommu_one(struct amd_iommu *iommu) 1642 { 1643 free_cwwb_sem(iommu); 1644 free_command_buffer(iommu); 1645 free_event_buffer(iommu); 1646 free_ppr_log(iommu); 1647 free_ga_log(iommu); 1648 iommu_unmap_mmio_space(iommu); 1649 } 1650 1651 static void __init free_iommu_all(void) 1652 { 1653 struct amd_iommu *iommu, *next; 1654 1655 for_each_iommu_safe(iommu, next) { 1656 list_del(&iommu->list); 1657 free_iommu_one(iommu); 1658 kfree(iommu); 1659 } 1660 } 1661 1662 /* 1663 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) 1664 * Workaround: 1665 * BIOS should disable L2B micellaneous clock gating by setting 1666 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b 1667 */ 1668 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) 1669 { 1670 u32 value; 1671 1672 if ((boot_cpu_data.x86 != 0x15) || 1673 (boot_cpu_data.x86_model < 0x10) || 1674 (boot_cpu_data.x86_model > 0x1f)) 1675 return; 1676 1677 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1678 pci_read_config_dword(iommu->dev, 0xf4, &value); 1679 1680 if (value & BIT(2)) 1681 return; 1682 1683 /* Select NB indirect register 0x90 and enable writing */ 1684 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); 1685 1686 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); 1687 pci_info(iommu->dev, "Applying erratum 746 workaround\n"); 1688 1689 /* Clear the enable writing bit */ 1690 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1691 } 1692 1693 /* 1694 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission) 1695 * Workaround: 1696 * BIOS should enable ATS write permission check by setting 1697 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b 1698 */ 1699 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) 1700 { 1701 u32 value; 1702 1703 if ((boot_cpu_data.x86 != 0x15) || 1704 (boot_cpu_data.x86_model < 0x30) || 1705 (boot_cpu_data.x86_model > 0x3f)) 1706 return; 1707 1708 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */ 1709 value = iommu_read_l2(iommu, 0x47); 1710 1711 if (value & BIT(0)) 1712 return; 1713 1714 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */ 1715 iommu_write_l2(iommu, 0x47, value | BIT(0)); 1716 1717 pci_info(iommu->dev, "Applying ATS write check workaround\n"); 1718 } 1719 1720 /* 1721 * This function glues the initialization function for one IOMMU 1722 * together and also allocates the command buffer and programs the 1723 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1724 */ 1725 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h, 1726 struct acpi_table_header *ivrs_base) 1727 { 1728 struct amd_iommu_pci_seg *pci_seg; 1729 1730 pci_seg = get_pci_segment(h->pci_seg, ivrs_base); 1731 if (pci_seg == NULL) 1732 return -ENOMEM; 1733 iommu->pci_seg = pci_seg; 1734 1735 raw_spin_lock_init(&iommu->lock); 1736 iommu->cmd_sem_val = 0; 1737 1738 /* Add IOMMU to internal data structures */ 1739 list_add_tail(&iommu->list, &amd_iommu_list); 1740 iommu->index = amd_iommus_present++; 1741 1742 if (unlikely(iommu->index >= MAX_IOMMUS)) { 1743 WARN(1, "System has more IOMMUs than supported by this driver\n"); 1744 return -ENOSYS; 1745 } 1746 1747 /* Index is fine - add IOMMU to the array */ 1748 amd_iommus[iommu->index] = iommu; 1749 1750 /* 1751 * Copy data from ACPI table entry to the iommu struct 1752 */ 1753 iommu->devid = h->devid; 1754 iommu->cap_ptr = h->cap_ptr; 1755 iommu->mmio_phys = h->mmio_phys; 1756 1757 switch (h->type) { 1758 case 0x10: 1759 /* Check if IVHD EFR contains proper max banks/counters */ 1760 if ((h->efr_attr != 0) && 1761 ((h->efr_attr & (0xF << 13)) != 0) && 1762 ((h->efr_attr & (0x3F << 17)) != 0)) 1763 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1764 else 1765 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1766 1767 /* 1768 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. 1769 * GAM also requires GA mode. Therefore, we need to 1770 * check cmpxchg16b support before enabling it. 1771 */ 1772 if (!boot_cpu_has(X86_FEATURE_CX16) || 1773 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) 1774 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1775 break; 1776 case 0x11: 1777 case 0x40: 1778 if (h->efr_reg & (1 << 9)) 1779 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1780 else 1781 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1782 1783 /* 1784 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. 1785 * XT, GAM also requires GA mode. Therefore, we need to 1786 * check cmpxchg16b support before enabling them. 1787 */ 1788 if (!boot_cpu_has(X86_FEATURE_CX16) || 1789 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { 1790 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1791 break; 1792 } 1793 1794 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) 1795 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; 1796 1797 early_iommu_features_init(iommu, h); 1798 1799 break; 1800 default: 1801 return -EINVAL; 1802 } 1803 1804 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, 1805 iommu->mmio_phys_end); 1806 if (!iommu->mmio_base) 1807 return -ENOMEM; 1808 1809 return init_iommu_from_acpi(iommu, h); 1810 } 1811 1812 static int __init init_iommu_one_late(struct amd_iommu *iommu) 1813 { 1814 int ret; 1815 1816 if (alloc_cwwb_sem(iommu)) 1817 return -ENOMEM; 1818 1819 if (alloc_command_buffer(iommu)) 1820 return -ENOMEM; 1821 1822 if (alloc_event_buffer(iommu)) 1823 return -ENOMEM; 1824 1825 iommu->int_enabled = false; 1826 1827 init_translation_status(iommu); 1828 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { 1829 iommu_disable(iommu); 1830 clear_translation_pre_enabled(iommu); 1831 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", 1832 iommu->index); 1833 } 1834 if (amd_iommu_pre_enabled) 1835 amd_iommu_pre_enabled = translation_pre_enabled(iommu); 1836 1837 if (amd_iommu_irq_remap) { 1838 ret = amd_iommu_create_irq_domain(iommu); 1839 if (ret) 1840 return ret; 1841 } 1842 1843 /* 1844 * Make sure IOMMU is not considered to translate itself. The IVRS 1845 * table tells us so, but this is a lie! 1846 */ 1847 iommu->pci_seg->rlookup_table[iommu->devid] = NULL; 1848 1849 return 0; 1850 } 1851 1852 /** 1853 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type 1854 * @ivrs: Pointer to the IVRS header 1855 * 1856 * This function search through all IVDB of the maximum supported IVHD 1857 */ 1858 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs) 1859 { 1860 u8 *base = (u8 *)ivrs; 1861 struct ivhd_header *ivhd = (struct ivhd_header *) 1862 (base + IVRS_HEADER_LENGTH); 1863 u8 last_type = ivhd->type; 1864 u16 devid = ivhd->devid; 1865 1866 while (((u8 *)ivhd - base < ivrs->length) && 1867 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { 1868 u8 *p = (u8 *) ivhd; 1869 1870 if (ivhd->devid == devid) 1871 last_type = ivhd->type; 1872 ivhd = (struct ivhd_header *)(p + ivhd->length); 1873 } 1874 1875 return last_type; 1876 } 1877 1878 /* 1879 * Iterates over all IOMMU entries in the ACPI table, allocates the 1880 * IOMMU structure and initializes it with init_iommu_one() 1881 */ 1882 static int __init init_iommu_all(struct acpi_table_header *table) 1883 { 1884 u8 *p = (u8 *)table, *end = (u8 *)table; 1885 struct ivhd_header *h; 1886 struct amd_iommu *iommu; 1887 int ret; 1888 1889 end += table->length; 1890 p += IVRS_HEADER_LENGTH; 1891 1892 /* Phase 1: Process all IVHD blocks */ 1893 while (p < end) { 1894 h = (struct ivhd_header *)p; 1895 if (*p == amd_iommu_target_ivhd_type) { 1896 1897 DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x " 1898 "flags: %01x info %04x\n", 1899 h->pci_seg, PCI_BUS_NUM(h->devid), 1900 PCI_SLOT(h->devid), PCI_FUNC(h->devid), 1901 h->cap_ptr, h->flags, h->info); 1902 DUMP_printk(" mmio-addr: %016llx\n", 1903 h->mmio_phys); 1904 1905 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 1906 if (iommu == NULL) 1907 return -ENOMEM; 1908 1909 ret = init_iommu_one(iommu, h, table); 1910 if (ret) 1911 return ret; 1912 } 1913 p += h->length; 1914 1915 } 1916 WARN_ON(p != end); 1917 1918 /* Phase 2 : Early feature support check */ 1919 get_global_efr(); 1920 1921 /* Phase 3 : Enabling IOMMU features */ 1922 for_each_iommu(iommu) { 1923 ret = init_iommu_one_late(iommu); 1924 if (ret) 1925 return ret; 1926 } 1927 1928 return 0; 1929 } 1930 1931 static void init_iommu_perf_ctr(struct amd_iommu *iommu) 1932 { 1933 u64 val; 1934 struct pci_dev *pdev = iommu->dev; 1935 1936 if (!iommu_feature(iommu, FEATURE_PC)) 1937 return; 1938 1939 amd_iommu_pc_present = true; 1940 1941 pci_info(pdev, "IOMMU performance counters supported\n"); 1942 1943 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); 1944 iommu->max_banks = (u8) ((val >> 12) & 0x3f); 1945 iommu->max_counters = (u8) ((val >> 7) & 0xf); 1946 1947 return; 1948 } 1949 1950 static ssize_t amd_iommu_show_cap(struct device *dev, 1951 struct device_attribute *attr, 1952 char *buf) 1953 { 1954 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1955 return sysfs_emit(buf, "%x\n", iommu->cap); 1956 } 1957 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); 1958 1959 static ssize_t amd_iommu_show_features(struct device *dev, 1960 struct device_attribute *attr, 1961 char *buf) 1962 { 1963 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1964 return sysfs_emit(buf, "%llx:%llx\n", iommu->features2, iommu->features); 1965 } 1966 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); 1967 1968 static struct attribute *amd_iommu_attrs[] = { 1969 &dev_attr_cap.attr, 1970 &dev_attr_features.attr, 1971 NULL, 1972 }; 1973 1974 static struct attribute_group amd_iommu_group = { 1975 .name = "amd-iommu", 1976 .attrs = amd_iommu_attrs, 1977 }; 1978 1979 static const struct attribute_group *amd_iommu_groups[] = { 1980 &amd_iommu_group, 1981 NULL, 1982 }; 1983 1984 /* 1985 * Note: IVHD 0x11 and 0x40 also contains exact copy 1986 * of the IOMMU Extended Feature Register [MMIO Offset 0030h]. 1987 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init). 1988 */ 1989 static void __init late_iommu_features_init(struct amd_iommu *iommu) 1990 { 1991 u64 features, features2; 1992 1993 if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) 1994 return; 1995 1996 /* read extended feature bits */ 1997 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); 1998 features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2); 1999 2000 if (!iommu->features) { 2001 iommu->features = features; 2002 iommu->features2 = features2; 2003 return; 2004 } 2005 2006 /* 2007 * Sanity check and warn if EFR values from 2008 * IVHD and MMIO conflict. 2009 */ 2010 if (features != iommu->features || 2011 features2 != iommu->features2) { 2012 pr_warn(FW_WARN 2013 "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n", 2014 features, iommu->features, 2015 features2, iommu->features2); 2016 } 2017 } 2018 2019 static int __init iommu_init_pci(struct amd_iommu *iommu) 2020 { 2021 int cap_ptr = iommu->cap_ptr; 2022 int ret; 2023 2024 iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, 2025 PCI_BUS_NUM(iommu->devid), 2026 iommu->devid & 0xff); 2027 if (!iommu->dev) 2028 return -ENODEV; 2029 2030 /* Prevent binding other PCI device drivers to IOMMU devices */ 2031 iommu->dev->match_driver = false; 2032 2033 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 2034 &iommu->cap); 2035 2036 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) 2037 amd_iommu_iotlb_sup = false; 2038 2039 late_iommu_features_init(iommu); 2040 2041 if (iommu_feature(iommu, FEATURE_GT)) { 2042 int glxval; 2043 u32 max_pasid; 2044 u64 pasmax; 2045 2046 pasmax = iommu->features & FEATURE_PASID_MASK; 2047 pasmax >>= FEATURE_PASID_SHIFT; 2048 max_pasid = (1 << (pasmax + 1)) - 1; 2049 2050 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); 2051 2052 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); 2053 2054 glxval = iommu->features & FEATURE_GLXVAL_MASK; 2055 glxval >>= FEATURE_GLXVAL_SHIFT; 2056 2057 if (amd_iommu_max_glx_val == -1) 2058 amd_iommu_max_glx_val = glxval; 2059 else 2060 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); 2061 } 2062 2063 if (iommu_feature(iommu, FEATURE_GT) && 2064 iommu_feature(iommu, FEATURE_PPR)) { 2065 iommu->is_iommu_v2 = true; 2066 amd_iommu_v2_present = true; 2067 } 2068 2069 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) 2070 return -ENOMEM; 2071 2072 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) { 2073 pr_info("Using strict mode due to virtualization\n"); 2074 iommu_set_dma_strict(); 2075 amd_iommu_np_cache = true; 2076 } 2077 2078 init_iommu_perf_ctr(iommu); 2079 2080 if (amd_iommu_pgtable == AMD_IOMMU_V2) { 2081 if (!iommu_feature(iommu, FEATURE_GIOSUP) || 2082 !iommu_feature(iommu, FEATURE_GT)) { 2083 pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); 2084 amd_iommu_pgtable = AMD_IOMMU_V1; 2085 } else if (iommu_default_passthrough()) { 2086 pr_warn("V2 page table doesn't support passthrough mode. Fallback to v1.\n"); 2087 amd_iommu_pgtable = AMD_IOMMU_V1; 2088 } 2089 } 2090 2091 if (is_rd890_iommu(iommu->dev)) { 2092 int i, j; 2093 2094 iommu->root_pdev = 2095 pci_get_domain_bus_and_slot(iommu->pci_seg->id, 2096 iommu->dev->bus->number, 2097 PCI_DEVFN(0, 0)); 2098 2099 /* 2100 * Some rd890 systems may not be fully reconfigured by the 2101 * BIOS, so it's necessary for us to store this information so 2102 * it can be reprogrammed on resume 2103 */ 2104 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, 2105 &iommu->stored_addr_lo); 2106 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, 2107 &iommu->stored_addr_hi); 2108 2109 /* Low bit locks writes to configuration space */ 2110 iommu->stored_addr_lo &= ~1; 2111 2112 for (i = 0; i < 6; i++) 2113 for (j = 0; j < 0x12; j++) 2114 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); 2115 2116 for (i = 0; i < 0x83; i++) 2117 iommu->stored_l2[i] = iommu_read_l2(iommu, i); 2118 } 2119 2120 amd_iommu_erratum_746_workaround(iommu); 2121 amd_iommu_ats_write_check_workaround(iommu); 2122 2123 ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, 2124 amd_iommu_groups, "ivhd%d", iommu->index); 2125 if (ret) 2126 return ret; 2127 2128 iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); 2129 2130 return pci_enable_device(iommu->dev); 2131 } 2132 2133 static void print_iommu_info(void) 2134 { 2135 static const char * const feat_str[] = { 2136 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", 2137 "IA", "GA", "HE", "PC" 2138 }; 2139 struct amd_iommu *iommu; 2140 2141 for_each_iommu(iommu) { 2142 struct pci_dev *pdev = iommu->dev; 2143 int i; 2144 2145 pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr); 2146 2147 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { 2148 pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2); 2149 2150 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { 2151 if (iommu_feature(iommu, (1ULL << i))) 2152 pr_cont(" %s", feat_str[i]); 2153 } 2154 2155 if (iommu->features & FEATURE_GAM_VAPIC) 2156 pr_cont(" GA_vAPIC"); 2157 2158 if (iommu->features & FEATURE_SNP) 2159 pr_cont(" SNP"); 2160 2161 pr_cont("\n"); 2162 } 2163 } 2164 if (irq_remapping_enabled) { 2165 pr_info("Interrupt remapping enabled\n"); 2166 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 2167 pr_info("X2APIC enabled\n"); 2168 } 2169 if (amd_iommu_pgtable == AMD_IOMMU_V2) { 2170 pr_info("V2 page table enabled (Paging mode : %d level)\n", 2171 amd_iommu_gpt_level); 2172 } 2173 } 2174 2175 static int __init amd_iommu_init_pci(void) 2176 { 2177 struct amd_iommu *iommu; 2178 struct amd_iommu_pci_seg *pci_seg; 2179 int ret; 2180 2181 for_each_iommu(iommu) { 2182 ret = iommu_init_pci(iommu); 2183 if (ret) { 2184 pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n", 2185 iommu->index, ret); 2186 goto out; 2187 } 2188 /* Need to setup range after PCI init */ 2189 iommu_set_cwwb_range(iommu); 2190 } 2191 2192 /* 2193 * Order is important here to make sure any unity map requirements are 2194 * fulfilled. The unity mappings are created and written to the device 2195 * table during the iommu_init_pci() call. 2196 * 2197 * After that we call init_device_table_dma() to make sure any 2198 * uninitialized DTE will block DMA, and in the end we flush the caches 2199 * of all IOMMUs to make sure the changes to the device table are 2200 * active. 2201 */ 2202 for_each_pci_segment(pci_seg) 2203 init_device_table_dma(pci_seg); 2204 2205 for_each_iommu(iommu) 2206 iommu_flush_all_caches(iommu); 2207 2208 print_iommu_info(); 2209 2210 out: 2211 return ret; 2212 } 2213 2214 /**************************************************************************** 2215 * 2216 * The following functions initialize the MSI interrupts for all IOMMUs 2217 * in the system. It's a bit challenging because there could be multiple 2218 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per 2219 * pci_dev. 2220 * 2221 ****************************************************************************/ 2222 2223 static int iommu_setup_msi(struct amd_iommu *iommu) 2224 { 2225 int r; 2226 2227 r = pci_enable_msi(iommu->dev); 2228 if (r) 2229 return r; 2230 2231 r = request_threaded_irq(iommu->dev->irq, 2232 amd_iommu_int_handler, 2233 amd_iommu_int_thread, 2234 0, "AMD-Vi", 2235 iommu); 2236 2237 if (r) { 2238 pci_disable_msi(iommu->dev); 2239 return r; 2240 } 2241 2242 return 0; 2243 } 2244 2245 union intcapxt { 2246 u64 capxt; 2247 struct { 2248 u64 reserved_0 : 2, 2249 dest_mode_logical : 1, 2250 reserved_1 : 5, 2251 destid_0_23 : 24, 2252 vector : 8, 2253 reserved_2 : 16, 2254 destid_24_31 : 8; 2255 }; 2256 } __attribute__ ((packed)); 2257 2258 2259 static struct irq_chip intcapxt_controller; 2260 2261 static int intcapxt_irqdomain_activate(struct irq_domain *domain, 2262 struct irq_data *irqd, bool reserve) 2263 { 2264 return 0; 2265 } 2266 2267 static void intcapxt_irqdomain_deactivate(struct irq_domain *domain, 2268 struct irq_data *irqd) 2269 { 2270 } 2271 2272 2273 static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, 2274 unsigned int nr_irqs, void *arg) 2275 { 2276 struct irq_alloc_info *info = arg; 2277 int i, ret; 2278 2279 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI) 2280 return -EINVAL; 2281 2282 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 2283 if (ret < 0) 2284 return ret; 2285 2286 for (i = virq; i < virq + nr_irqs; i++) { 2287 struct irq_data *irqd = irq_domain_get_irq_data(domain, i); 2288 2289 irqd->chip = &intcapxt_controller; 2290 irqd->chip_data = info->data; 2291 __irq_set_handler(i, handle_edge_irq, 0, "edge"); 2292 } 2293 2294 return ret; 2295 } 2296 2297 static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq, 2298 unsigned int nr_irqs) 2299 { 2300 irq_domain_free_irqs_top(domain, virq, nr_irqs); 2301 } 2302 2303 2304 static void intcapxt_unmask_irq(struct irq_data *irqd) 2305 { 2306 struct amd_iommu *iommu = irqd->chip_data; 2307 struct irq_cfg *cfg = irqd_cfg(irqd); 2308 union intcapxt xt; 2309 2310 xt.capxt = 0ULL; 2311 xt.dest_mode_logical = apic->dest_mode_logical; 2312 xt.vector = cfg->vector; 2313 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); 2314 xt.destid_24_31 = cfg->dest_apicid >> 24; 2315 2316 /** 2317 * Current IOMMU implementation uses the same IRQ for all 2318 * 3 IOMMU interrupts. 2319 */ 2320 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); 2321 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); 2322 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); 2323 } 2324 2325 static void intcapxt_mask_irq(struct irq_data *irqd) 2326 { 2327 struct amd_iommu *iommu = irqd->chip_data; 2328 2329 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); 2330 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); 2331 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); 2332 } 2333 2334 2335 static int intcapxt_set_affinity(struct irq_data *irqd, 2336 const struct cpumask *mask, bool force) 2337 { 2338 struct irq_data *parent = irqd->parent_data; 2339 int ret; 2340 2341 ret = parent->chip->irq_set_affinity(parent, mask, force); 2342 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) 2343 return ret; 2344 return 0; 2345 } 2346 2347 static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on) 2348 { 2349 return on ? -EOPNOTSUPP : 0; 2350 } 2351 2352 static struct irq_chip intcapxt_controller = { 2353 .name = "IOMMU-MSI", 2354 .irq_unmask = intcapxt_unmask_irq, 2355 .irq_mask = intcapxt_mask_irq, 2356 .irq_ack = irq_chip_ack_parent, 2357 .irq_retrigger = irq_chip_retrigger_hierarchy, 2358 .irq_set_affinity = intcapxt_set_affinity, 2359 .irq_set_wake = intcapxt_set_wake, 2360 .flags = IRQCHIP_MASK_ON_SUSPEND, 2361 }; 2362 2363 static const struct irq_domain_ops intcapxt_domain_ops = { 2364 .alloc = intcapxt_irqdomain_alloc, 2365 .free = intcapxt_irqdomain_free, 2366 .activate = intcapxt_irqdomain_activate, 2367 .deactivate = intcapxt_irqdomain_deactivate, 2368 }; 2369 2370 2371 static struct irq_domain *iommu_irqdomain; 2372 2373 static struct irq_domain *iommu_get_irqdomain(void) 2374 { 2375 struct fwnode_handle *fn; 2376 2377 /* No need for locking here (yet) as the init is single-threaded */ 2378 if (iommu_irqdomain) 2379 return iommu_irqdomain; 2380 2381 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI"); 2382 if (!fn) 2383 return NULL; 2384 2385 iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0, 2386 fn, &intcapxt_domain_ops, 2387 NULL); 2388 if (!iommu_irqdomain) 2389 irq_domain_free_fwnode(fn); 2390 2391 return iommu_irqdomain; 2392 } 2393 2394 static int iommu_setup_intcapxt(struct amd_iommu *iommu) 2395 { 2396 struct irq_domain *domain; 2397 struct irq_alloc_info info; 2398 int irq, ret; 2399 int node = dev_to_node(&iommu->dev->dev); 2400 2401 domain = iommu_get_irqdomain(); 2402 if (!domain) 2403 return -ENXIO; 2404 2405 init_irq_alloc_info(&info, NULL); 2406 info.type = X86_IRQ_ALLOC_TYPE_AMDVI; 2407 info.data = iommu; 2408 2409 irq = irq_domain_alloc_irqs(domain, 1, node, &info); 2410 if (irq < 0) { 2411 irq_domain_remove(domain); 2412 return irq; 2413 } 2414 2415 ret = request_threaded_irq(irq, amd_iommu_int_handler, 2416 amd_iommu_int_thread, 0, "AMD-Vi", iommu); 2417 if (ret) { 2418 irq_domain_free_irqs(irq, 1); 2419 irq_domain_remove(domain); 2420 return ret; 2421 } 2422 2423 return 0; 2424 } 2425 2426 static int iommu_init_irq(struct amd_iommu *iommu) 2427 { 2428 int ret; 2429 2430 if (iommu->int_enabled) 2431 goto enable_faults; 2432 2433 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 2434 ret = iommu_setup_intcapxt(iommu); 2435 else if (iommu->dev->msi_cap) 2436 ret = iommu_setup_msi(iommu); 2437 else 2438 ret = -ENODEV; 2439 2440 if (ret) 2441 return ret; 2442 2443 iommu->int_enabled = true; 2444 enable_faults: 2445 2446 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 2447 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); 2448 2449 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 2450 2451 if (iommu->ppr_log != NULL) 2452 iommu_feature_enable(iommu, CONTROL_PPRINT_EN); 2453 return 0; 2454 } 2455 2456 /**************************************************************************** 2457 * 2458 * The next functions belong to the third pass of parsing the ACPI 2459 * table. In this last pass the memory mapping requirements are 2460 * gathered (like exclusion and unity mapping ranges). 2461 * 2462 ****************************************************************************/ 2463 2464 static void __init free_unity_maps(void) 2465 { 2466 struct unity_map_entry *entry, *next; 2467 struct amd_iommu_pci_seg *p, *pci_seg; 2468 2469 for_each_pci_segment_safe(pci_seg, p) { 2470 list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) { 2471 list_del(&entry->list); 2472 kfree(entry); 2473 } 2474 } 2475 } 2476 2477 /* called for unity map ACPI definition */ 2478 static int __init init_unity_map_range(struct ivmd_header *m, 2479 struct acpi_table_header *ivrs_base) 2480 { 2481 struct unity_map_entry *e = NULL; 2482 struct amd_iommu_pci_seg *pci_seg; 2483 char *s; 2484 2485 pci_seg = get_pci_segment(m->pci_seg, ivrs_base); 2486 if (pci_seg == NULL) 2487 return -ENOMEM; 2488 2489 e = kzalloc(sizeof(*e), GFP_KERNEL); 2490 if (e == NULL) 2491 return -ENOMEM; 2492 2493 switch (m->type) { 2494 default: 2495 kfree(e); 2496 return 0; 2497 case ACPI_IVMD_TYPE: 2498 s = "IVMD_TYPEi\t\t\t"; 2499 e->devid_start = e->devid_end = m->devid; 2500 break; 2501 case ACPI_IVMD_TYPE_ALL: 2502 s = "IVMD_TYPE_ALL\t\t"; 2503 e->devid_start = 0; 2504 e->devid_end = pci_seg->last_bdf; 2505 break; 2506 case ACPI_IVMD_TYPE_RANGE: 2507 s = "IVMD_TYPE_RANGE\t\t"; 2508 e->devid_start = m->devid; 2509 e->devid_end = m->aux; 2510 break; 2511 } 2512 e->address_start = PAGE_ALIGN(m->range_start); 2513 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); 2514 e->prot = m->flags >> 1; 2515 2516 /* 2517 * Treat per-device exclusion ranges as r/w unity-mapped regions 2518 * since some buggy BIOSes might lead to the overwritten exclusion 2519 * range (exclusion_start and exclusion_length members). This 2520 * happens when there are multiple exclusion ranges (IVMD entries) 2521 * defined in ACPI table. 2522 */ 2523 if (m->flags & IVMD_FLAG_EXCL_RANGE) 2524 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1; 2525 2526 DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: " 2527 "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx" 2528 " flags: %x\n", s, m->pci_seg, 2529 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), 2530 PCI_FUNC(e->devid_start), m->pci_seg, 2531 PCI_BUS_NUM(e->devid_end), 2532 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), 2533 e->address_start, e->address_end, m->flags); 2534 2535 list_add_tail(&e->list, &pci_seg->unity_map); 2536 2537 return 0; 2538 } 2539 2540 /* iterates over all memory definitions we find in the ACPI table */ 2541 static int __init init_memory_definitions(struct acpi_table_header *table) 2542 { 2543 u8 *p = (u8 *)table, *end = (u8 *)table; 2544 struct ivmd_header *m; 2545 2546 end += table->length; 2547 p += IVRS_HEADER_LENGTH; 2548 2549 while (p < end) { 2550 m = (struct ivmd_header *)p; 2551 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) 2552 init_unity_map_range(m, table); 2553 2554 p += m->length; 2555 } 2556 2557 return 0; 2558 } 2559 2560 /* 2561 * Init the device table to not allow DMA access for devices 2562 */ 2563 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg) 2564 { 2565 u32 devid; 2566 struct dev_table_entry *dev_table = pci_seg->dev_table; 2567 2568 if (dev_table == NULL) 2569 return; 2570 2571 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { 2572 __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID); 2573 if (!amd_iommu_snp_en) 2574 __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION); 2575 } 2576 } 2577 2578 static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg) 2579 { 2580 u32 devid; 2581 struct dev_table_entry *dev_table = pci_seg->dev_table; 2582 2583 if (dev_table == NULL) 2584 return; 2585 2586 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { 2587 dev_table[devid].data[0] = 0ULL; 2588 dev_table[devid].data[1] = 0ULL; 2589 } 2590 } 2591 2592 static void init_device_table(void) 2593 { 2594 struct amd_iommu_pci_seg *pci_seg; 2595 u32 devid; 2596 2597 if (!amd_iommu_irq_remap) 2598 return; 2599 2600 for_each_pci_segment(pci_seg) { 2601 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) 2602 __set_dev_entry_bit(pci_seg->dev_table, 2603 devid, DEV_ENTRY_IRQ_TBL_EN); 2604 } 2605 } 2606 2607 static void iommu_init_flags(struct amd_iommu *iommu) 2608 { 2609 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 2610 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 2611 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 2612 2613 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? 2614 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 2615 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 2616 2617 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 2618 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 2619 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 2620 2621 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? 2622 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 2623 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 2624 2625 /* 2626 * make IOMMU memory accesses cache coherent 2627 */ 2628 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 2629 2630 /* Set IOTLB invalidation timeout to 1s */ 2631 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); 2632 } 2633 2634 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) 2635 { 2636 int i, j; 2637 u32 ioc_feature_control; 2638 struct pci_dev *pdev = iommu->root_pdev; 2639 2640 /* RD890 BIOSes may not have completely reconfigured the iommu */ 2641 if (!is_rd890_iommu(iommu->dev) || !pdev) 2642 return; 2643 2644 /* 2645 * First, we need to ensure that the iommu is enabled. This is 2646 * controlled by a register in the northbridge 2647 */ 2648 2649 /* Select Northbridge indirect register 0x75 and enable writing */ 2650 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); 2651 pci_read_config_dword(pdev, 0x64, &ioc_feature_control); 2652 2653 /* Enable the iommu */ 2654 if (!(ioc_feature_control & 0x1)) 2655 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); 2656 2657 /* Restore the iommu BAR */ 2658 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2659 iommu->stored_addr_lo); 2660 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, 2661 iommu->stored_addr_hi); 2662 2663 /* Restore the l1 indirect regs for each of the 6 l1s */ 2664 for (i = 0; i < 6; i++) 2665 for (j = 0; j < 0x12; j++) 2666 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); 2667 2668 /* Restore the l2 indirect regs */ 2669 for (i = 0; i < 0x83; i++) 2670 iommu_write_l2(iommu, i, iommu->stored_l2[i]); 2671 2672 /* Lock PCI setup registers */ 2673 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2674 iommu->stored_addr_lo | 1); 2675 } 2676 2677 static void iommu_enable_ga(struct amd_iommu *iommu) 2678 { 2679 #ifdef CONFIG_IRQ_REMAP 2680 switch (amd_iommu_guest_ir) { 2681 case AMD_IOMMU_GUEST_IR_VAPIC: 2682 case AMD_IOMMU_GUEST_IR_LEGACY_GA: 2683 iommu_feature_enable(iommu, CONTROL_GA_EN); 2684 iommu->irte_ops = &irte_128_ops; 2685 break; 2686 default: 2687 iommu->irte_ops = &irte_32_ops; 2688 break; 2689 } 2690 #endif 2691 } 2692 2693 static void iommu_disable_irtcachedis(struct amd_iommu *iommu) 2694 { 2695 iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); 2696 } 2697 2698 static void iommu_enable_irtcachedis(struct amd_iommu *iommu) 2699 { 2700 u64 ctrl; 2701 2702 if (!amd_iommu_irtcachedis) 2703 return; 2704 2705 /* 2706 * Note: 2707 * The support for IRTCacheDis feature is dertermined by 2708 * checking if the bit is writable. 2709 */ 2710 iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS); 2711 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 2712 ctrl &= (1ULL << CONTROL_IRTCACHEDIS); 2713 if (ctrl) 2714 iommu->irtcachedis_enabled = true; 2715 pr_info("iommu%d (%#06x) : IRT cache is %s\n", 2716 iommu->index, iommu->devid, 2717 iommu->irtcachedis_enabled ? "disabled" : "enabled"); 2718 } 2719 2720 static void early_enable_iommu(struct amd_iommu *iommu) 2721 { 2722 iommu_disable(iommu); 2723 iommu_init_flags(iommu); 2724 iommu_set_device_table(iommu); 2725 iommu_enable_command_buffer(iommu); 2726 iommu_enable_event_buffer(iommu); 2727 iommu_set_exclusion_range(iommu); 2728 iommu_enable_ga(iommu); 2729 iommu_enable_xt(iommu); 2730 iommu_enable_irtcachedis(iommu); 2731 iommu_enable(iommu); 2732 iommu_flush_all_caches(iommu); 2733 } 2734 2735 /* 2736 * This function finally enables all IOMMUs found in the system after 2737 * they have been initialized. 2738 * 2739 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy 2740 * the old content of device table entries. Not this case or copy failed, 2741 * just continue as normal kernel does. 2742 */ 2743 static void early_enable_iommus(void) 2744 { 2745 struct amd_iommu *iommu; 2746 struct amd_iommu_pci_seg *pci_seg; 2747 2748 if (!copy_device_table()) { 2749 /* 2750 * If come here because of failure in copying device table from old 2751 * kernel with all IOMMUs enabled, print error message and try to 2752 * free allocated old_dev_tbl_cpy. 2753 */ 2754 if (amd_iommu_pre_enabled) 2755 pr_err("Failed to copy DEV table from previous kernel.\n"); 2756 2757 for_each_pci_segment(pci_seg) { 2758 if (pci_seg->old_dev_tbl_cpy != NULL) { 2759 free_pages((unsigned long)pci_seg->old_dev_tbl_cpy, 2760 get_order(pci_seg->dev_table_size)); 2761 pci_seg->old_dev_tbl_cpy = NULL; 2762 } 2763 } 2764 2765 for_each_iommu(iommu) { 2766 clear_translation_pre_enabled(iommu); 2767 early_enable_iommu(iommu); 2768 } 2769 } else { 2770 pr_info("Copied DEV table from previous kernel.\n"); 2771 2772 for_each_pci_segment(pci_seg) { 2773 free_pages((unsigned long)pci_seg->dev_table, 2774 get_order(pci_seg->dev_table_size)); 2775 pci_seg->dev_table = pci_seg->old_dev_tbl_cpy; 2776 } 2777 2778 for_each_iommu(iommu) { 2779 iommu_disable_command_buffer(iommu); 2780 iommu_disable_event_buffer(iommu); 2781 iommu_disable_irtcachedis(iommu); 2782 iommu_enable_command_buffer(iommu); 2783 iommu_enable_event_buffer(iommu); 2784 iommu_enable_ga(iommu); 2785 iommu_enable_xt(iommu); 2786 iommu_enable_irtcachedis(iommu); 2787 iommu_set_device_table(iommu); 2788 iommu_flush_all_caches(iommu); 2789 } 2790 } 2791 } 2792 2793 static void enable_iommus_v2(void) 2794 { 2795 struct amd_iommu *iommu; 2796 2797 for_each_iommu(iommu) { 2798 iommu_enable_ppr_log(iommu); 2799 iommu_enable_gt(iommu); 2800 } 2801 } 2802 2803 static void enable_iommus_vapic(void) 2804 { 2805 #ifdef CONFIG_IRQ_REMAP 2806 u32 status, i; 2807 struct amd_iommu *iommu; 2808 2809 for_each_iommu(iommu) { 2810 /* 2811 * Disable GALog if already running. It could have been enabled 2812 * in the previous boot before kdump. 2813 */ 2814 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 2815 if (!(status & MMIO_STATUS_GALOG_RUN_MASK)) 2816 continue; 2817 2818 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 2819 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 2820 2821 /* 2822 * Need to set and poll check the GALOGRun bit to zero before 2823 * we can set/ modify GA Log registers safely. 2824 */ 2825 for (i = 0; i < LOOP_TIMEOUT; ++i) { 2826 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 2827 if (!(status & MMIO_STATUS_GALOG_RUN_MASK)) 2828 break; 2829 udelay(10); 2830 } 2831 2832 if (WARN_ON(i >= LOOP_TIMEOUT)) 2833 return; 2834 } 2835 2836 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && 2837 !check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) { 2838 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 2839 return; 2840 } 2841 2842 if (amd_iommu_snp_en && 2843 !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) { 2844 pr_warn("Force to disable Virtual APIC due to SNP\n"); 2845 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 2846 return; 2847 } 2848 2849 /* Enabling GAM and SNPAVIC support */ 2850 for_each_iommu(iommu) { 2851 if (iommu_init_ga_log(iommu) || 2852 iommu_ga_log_enable(iommu)) 2853 return; 2854 2855 iommu_feature_enable(iommu, CONTROL_GAM_EN); 2856 if (amd_iommu_snp_en) 2857 iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN); 2858 } 2859 2860 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); 2861 pr_info("Virtual APIC enabled\n"); 2862 #endif 2863 } 2864 2865 static void enable_iommus(void) 2866 { 2867 early_enable_iommus(); 2868 enable_iommus_vapic(); 2869 enable_iommus_v2(); 2870 } 2871 2872 static void disable_iommus(void) 2873 { 2874 struct amd_iommu *iommu; 2875 2876 for_each_iommu(iommu) 2877 iommu_disable(iommu); 2878 2879 #ifdef CONFIG_IRQ_REMAP 2880 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 2881 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP); 2882 #endif 2883 } 2884 2885 /* 2886 * Suspend/Resume support 2887 * disable suspend until real resume implemented 2888 */ 2889 2890 static void amd_iommu_resume(void) 2891 { 2892 struct amd_iommu *iommu; 2893 2894 for_each_iommu(iommu) 2895 iommu_apply_resume_quirks(iommu); 2896 2897 /* re-load the hardware */ 2898 enable_iommus(); 2899 2900 amd_iommu_enable_interrupts(); 2901 } 2902 2903 static int amd_iommu_suspend(void) 2904 { 2905 /* disable IOMMUs to go out of the way for BIOS */ 2906 disable_iommus(); 2907 2908 return 0; 2909 } 2910 2911 static struct syscore_ops amd_iommu_syscore_ops = { 2912 .suspend = amd_iommu_suspend, 2913 .resume = amd_iommu_resume, 2914 }; 2915 2916 static void __init free_iommu_resources(void) 2917 { 2918 kmem_cache_destroy(amd_iommu_irq_cache); 2919 amd_iommu_irq_cache = NULL; 2920 2921 free_iommu_all(); 2922 free_pci_segments(); 2923 } 2924 2925 /* SB IOAPIC is always on this device in AMD systems */ 2926 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) 2927 2928 static bool __init check_ioapic_information(void) 2929 { 2930 const char *fw_bug = FW_BUG; 2931 bool ret, has_sb_ioapic; 2932 int idx; 2933 2934 has_sb_ioapic = false; 2935 ret = false; 2936 2937 /* 2938 * If we have map overrides on the kernel command line the 2939 * messages in this function might not describe firmware bugs 2940 * anymore - so be careful 2941 */ 2942 if (cmdline_maps) 2943 fw_bug = ""; 2944 2945 for (idx = 0; idx < nr_ioapics; idx++) { 2946 int devid, id = mpc_ioapic_id(idx); 2947 2948 devid = get_ioapic_devid(id); 2949 if (devid < 0) { 2950 pr_err("%s: IOAPIC[%d] not in IVRS table\n", 2951 fw_bug, id); 2952 ret = false; 2953 } else if (devid == IOAPIC_SB_DEVID) { 2954 has_sb_ioapic = true; 2955 ret = true; 2956 } 2957 } 2958 2959 if (!has_sb_ioapic) { 2960 /* 2961 * We expect the SB IOAPIC to be listed in the IVRS 2962 * table. The system timer is connected to the SB IOAPIC 2963 * and if we don't have it in the list the system will 2964 * panic at boot time. This situation usually happens 2965 * when the BIOS is buggy and provides us the wrong 2966 * device id for the IOAPIC in the system. 2967 */ 2968 pr_err("%s: No southbridge IOAPIC found\n", fw_bug); 2969 } 2970 2971 if (!ret) 2972 pr_err("Disabling interrupt remapping\n"); 2973 2974 return ret; 2975 } 2976 2977 static void __init free_dma_resources(void) 2978 { 2979 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 2980 get_order(MAX_DOMAIN_ID/8)); 2981 amd_iommu_pd_alloc_bitmap = NULL; 2982 2983 free_unity_maps(); 2984 } 2985 2986 static void __init ivinfo_init(void *ivrs) 2987 { 2988 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET)); 2989 } 2990 2991 /* 2992 * This is the hardware init function for AMD IOMMU in the system. 2993 * This function is called either from amd_iommu_init or from the interrupt 2994 * remapping setup code. 2995 * 2996 * This function basically parses the ACPI table for AMD IOMMU (IVRS) 2997 * four times: 2998 * 2999 * 1 pass) Discover the most comprehensive IVHD type to use. 3000 * 3001 * 2 pass) Find the highest PCI device id the driver has to handle. 3002 * Upon this information the size of the data structures is 3003 * determined that needs to be allocated. 3004 * 3005 * 3 pass) Initialize the data structures just allocated with the 3006 * information in the ACPI table about available AMD IOMMUs 3007 * in the system. It also maps the PCI devices in the 3008 * system to specific IOMMUs 3009 * 3010 * 4 pass) After the basic data structures are allocated and 3011 * initialized we update them with information about memory 3012 * remapping requirements parsed out of the ACPI table in 3013 * this last pass. 3014 * 3015 * After everything is set up the IOMMUs are enabled and the necessary 3016 * hotplug and suspend notifiers are registered. 3017 */ 3018 static int __init early_amd_iommu_init(void) 3019 { 3020 struct acpi_table_header *ivrs_base; 3021 int remap_cache_sz, ret; 3022 acpi_status status; 3023 3024 if (!amd_iommu_detected) 3025 return -ENODEV; 3026 3027 status = acpi_get_table("IVRS", 0, &ivrs_base); 3028 if (status == AE_NOT_FOUND) 3029 return -ENODEV; 3030 else if (ACPI_FAILURE(status)) { 3031 const char *err = acpi_format_exception(status); 3032 pr_err("IVRS table error: %s\n", err); 3033 return -EINVAL; 3034 } 3035 3036 /* 3037 * Validate checksum here so we don't need to do it when 3038 * we actually parse the table 3039 */ 3040 ret = check_ivrs_checksum(ivrs_base); 3041 if (ret) 3042 goto out; 3043 3044 ivinfo_init(ivrs_base); 3045 3046 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); 3047 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); 3048 3049 /* Device table - directly used by all IOMMUs */ 3050 ret = -ENOMEM; 3051 3052 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( 3053 GFP_KERNEL | __GFP_ZERO, 3054 get_order(MAX_DOMAIN_ID/8)); 3055 if (amd_iommu_pd_alloc_bitmap == NULL) 3056 goto out; 3057 3058 /* 3059 * never allocate domain 0 because its used as the non-allocated and 3060 * error value placeholder 3061 */ 3062 __set_bit(0, amd_iommu_pd_alloc_bitmap); 3063 3064 /* 3065 * now the data structures are allocated and basically initialized 3066 * start the real acpi table scan 3067 */ 3068 ret = init_iommu_all(ivrs_base); 3069 if (ret) 3070 goto out; 3071 3072 /* 5 level guest page table */ 3073 if (cpu_feature_enabled(X86_FEATURE_LA57) && 3074 check_feature_gpt_level() == GUEST_PGTABLE_5_LEVEL) 3075 amd_iommu_gpt_level = PAGE_MODE_5_LEVEL; 3076 3077 /* Disable any previously enabled IOMMUs */ 3078 if (!is_kdump_kernel() || amd_iommu_disabled) 3079 disable_iommus(); 3080 3081 if (amd_iommu_irq_remap) 3082 amd_iommu_irq_remap = check_ioapic_information(); 3083 3084 if (amd_iommu_irq_remap) { 3085 struct amd_iommu_pci_seg *pci_seg; 3086 /* 3087 * Interrupt remapping enabled, create kmem_cache for the 3088 * remapping tables. 3089 */ 3090 ret = -ENOMEM; 3091 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) 3092 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32); 3093 else 3094 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2); 3095 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", 3096 remap_cache_sz, 3097 DTE_INTTAB_ALIGNMENT, 3098 0, NULL); 3099 if (!amd_iommu_irq_cache) 3100 goto out; 3101 3102 for_each_pci_segment(pci_seg) { 3103 if (alloc_irq_lookup_table(pci_seg)) 3104 goto out; 3105 } 3106 } 3107 3108 ret = init_memory_definitions(ivrs_base); 3109 if (ret) 3110 goto out; 3111 3112 /* init the device table */ 3113 init_device_table(); 3114 3115 out: 3116 /* Don't leak any ACPI memory */ 3117 acpi_put_table(ivrs_base); 3118 3119 return ret; 3120 } 3121 3122 static int amd_iommu_enable_interrupts(void) 3123 { 3124 struct amd_iommu *iommu; 3125 int ret = 0; 3126 3127 for_each_iommu(iommu) { 3128 ret = iommu_init_irq(iommu); 3129 if (ret) 3130 goto out; 3131 } 3132 3133 out: 3134 return ret; 3135 } 3136 3137 static bool __init detect_ivrs(void) 3138 { 3139 struct acpi_table_header *ivrs_base; 3140 acpi_status status; 3141 int i; 3142 3143 status = acpi_get_table("IVRS", 0, &ivrs_base); 3144 if (status == AE_NOT_FOUND) 3145 return false; 3146 else if (ACPI_FAILURE(status)) { 3147 const char *err = acpi_format_exception(status); 3148 pr_err("IVRS table error: %s\n", err); 3149 return false; 3150 } 3151 3152 acpi_put_table(ivrs_base); 3153 3154 if (amd_iommu_force_enable) 3155 goto out; 3156 3157 /* Don't use IOMMU if there is Stoney Ridge graphics */ 3158 for (i = 0; i < 32; i++) { 3159 u32 pci_id; 3160 3161 pci_id = read_pci_config(0, i, 0, 0); 3162 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { 3163 pr_info("Disable IOMMU on Stoney Ridge\n"); 3164 return false; 3165 } 3166 } 3167 3168 out: 3169 /* Make sure ACS will be enabled during PCI probe */ 3170 pci_request_acs(); 3171 3172 return true; 3173 } 3174 3175 /**************************************************************************** 3176 * 3177 * AMD IOMMU Initialization State Machine 3178 * 3179 ****************************************************************************/ 3180 3181 static int __init state_next(void) 3182 { 3183 int ret = 0; 3184 3185 switch (init_state) { 3186 case IOMMU_START_STATE: 3187 if (!detect_ivrs()) { 3188 init_state = IOMMU_NOT_FOUND; 3189 ret = -ENODEV; 3190 } else { 3191 init_state = IOMMU_IVRS_DETECTED; 3192 } 3193 break; 3194 case IOMMU_IVRS_DETECTED: 3195 if (amd_iommu_disabled) { 3196 init_state = IOMMU_CMDLINE_DISABLED; 3197 ret = -EINVAL; 3198 } else { 3199 ret = early_amd_iommu_init(); 3200 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; 3201 } 3202 break; 3203 case IOMMU_ACPI_FINISHED: 3204 early_enable_iommus(); 3205 x86_platform.iommu_shutdown = disable_iommus; 3206 init_state = IOMMU_ENABLED; 3207 break; 3208 case IOMMU_ENABLED: 3209 register_syscore_ops(&amd_iommu_syscore_ops); 3210 ret = amd_iommu_init_pci(); 3211 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; 3212 enable_iommus_vapic(); 3213 enable_iommus_v2(); 3214 break; 3215 case IOMMU_PCI_INIT: 3216 ret = amd_iommu_enable_interrupts(); 3217 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 3218 break; 3219 case IOMMU_INTERRUPTS_EN: 3220 init_state = IOMMU_INITIALIZED; 3221 break; 3222 case IOMMU_INITIALIZED: 3223 /* Nothing to do */ 3224 break; 3225 case IOMMU_NOT_FOUND: 3226 case IOMMU_INIT_ERROR: 3227 case IOMMU_CMDLINE_DISABLED: 3228 /* Error states => do nothing */ 3229 ret = -EINVAL; 3230 break; 3231 default: 3232 /* Unknown state */ 3233 BUG(); 3234 } 3235 3236 if (ret) { 3237 free_dma_resources(); 3238 if (!irq_remapping_enabled) { 3239 disable_iommus(); 3240 free_iommu_resources(); 3241 } else { 3242 struct amd_iommu *iommu; 3243 struct amd_iommu_pci_seg *pci_seg; 3244 3245 for_each_pci_segment(pci_seg) 3246 uninit_device_table_dma(pci_seg); 3247 3248 for_each_iommu(iommu) 3249 iommu_flush_all_caches(iommu); 3250 } 3251 } 3252 return ret; 3253 } 3254 3255 static int __init iommu_go_to_state(enum iommu_init_state state) 3256 { 3257 int ret = -EINVAL; 3258 3259 while (init_state != state) { 3260 if (init_state == IOMMU_NOT_FOUND || 3261 init_state == IOMMU_INIT_ERROR || 3262 init_state == IOMMU_CMDLINE_DISABLED) 3263 break; 3264 ret = state_next(); 3265 } 3266 3267 return ret; 3268 } 3269 3270 #ifdef CONFIG_IRQ_REMAP 3271 int __init amd_iommu_prepare(void) 3272 { 3273 int ret; 3274 3275 amd_iommu_irq_remap = true; 3276 3277 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); 3278 if (ret) { 3279 amd_iommu_irq_remap = false; 3280 return ret; 3281 } 3282 3283 return amd_iommu_irq_remap ? 0 : -ENODEV; 3284 } 3285 3286 int __init amd_iommu_enable(void) 3287 { 3288 int ret; 3289 3290 ret = iommu_go_to_state(IOMMU_ENABLED); 3291 if (ret) 3292 return ret; 3293 3294 irq_remapping_enabled = 1; 3295 return amd_iommu_xt_mode; 3296 } 3297 3298 void amd_iommu_disable(void) 3299 { 3300 amd_iommu_suspend(); 3301 } 3302 3303 int amd_iommu_reenable(int mode) 3304 { 3305 amd_iommu_resume(); 3306 3307 return 0; 3308 } 3309 3310 int __init amd_iommu_enable_faulting(void) 3311 { 3312 /* We enable MSI later when PCI is initialized */ 3313 return 0; 3314 } 3315 #endif 3316 3317 /* 3318 * This is the core init function for AMD IOMMU hardware in the system. 3319 * This function is called from the generic x86 DMA layer initialization 3320 * code. 3321 */ 3322 static int __init amd_iommu_init(void) 3323 { 3324 struct amd_iommu *iommu; 3325 int ret; 3326 3327 ret = iommu_go_to_state(IOMMU_INITIALIZED); 3328 #ifdef CONFIG_GART_IOMMU 3329 if (ret && list_empty(&amd_iommu_list)) { 3330 /* 3331 * We failed to initialize the AMD IOMMU - try fallback 3332 * to GART if possible. 3333 */ 3334 gart_iommu_init(); 3335 } 3336 #endif 3337 3338 for_each_iommu(iommu) 3339 amd_iommu_debugfs_setup(iommu); 3340 3341 return ret; 3342 } 3343 3344 static bool amd_iommu_sme_check(void) 3345 { 3346 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) || 3347 (boot_cpu_data.x86 != 0x17)) 3348 return true; 3349 3350 /* For Fam17h, a specific level of support is required */ 3351 if (boot_cpu_data.microcode >= 0x08001205) 3352 return true; 3353 3354 if ((boot_cpu_data.microcode >= 0x08001126) && 3355 (boot_cpu_data.microcode <= 0x080011ff)) 3356 return true; 3357 3358 pr_notice("IOMMU not currently supported when SME is active\n"); 3359 3360 return false; 3361 } 3362 3363 /**************************************************************************** 3364 * 3365 * Early detect code. This code runs at IOMMU detection time in the DMA 3366 * layer. It just looks if there is an IVRS ACPI table to detect AMD 3367 * IOMMUs 3368 * 3369 ****************************************************************************/ 3370 int __init amd_iommu_detect(void) 3371 { 3372 int ret; 3373 3374 if (no_iommu || (iommu_detected && !gart_iommu_aperture)) 3375 return -ENODEV; 3376 3377 if (!amd_iommu_sme_check()) 3378 return -ENODEV; 3379 3380 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); 3381 if (ret) 3382 return ret; 3383 3384 amd_iommu_detected = true; 3385 iommu_detected = 1; 3386 x86_init.iommu.iommu_init = amd_iommu_init; 3387 3388 return 1; 3389 } 3390 3391 /**************************************************************************** 3392 * 3393 * Parsing functions for the AMD IOMMU specific kernel command line 3394 * options. 3395 * 3396 ****************************************************************************/ 3397 3398 static int __init parse_amd_iommu_dump(char *str) 3399 { 3400 amd_iommu_dump = true; 3401 3402 return 1; 3403 } 3404 3405 static int __init parse_amd_iommu_intr(char *str) 3406 { 3407 for (; *str; ++str) { 3408 if (strncmp(str, "legacy", 6) == 0) { 3409 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 3410 break; 3411 } 3412 if (strncmp(str, "vapic", 5) == 0) { 3413 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 3414 break; 3415 } 3416 } 3417 return 1; 3418 } 3419 3420 static int __init parse_amd_iommu_options(char *str) 3421 { 3422 if (!str) 3423 return -EINVAL; 3424 3425 while (*str) { 3426 if (strncmp(str, "fullflush", 9) == 0) { 3427 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n"); 3428 iommu_set_dma_strict(); 3429 } else if (strncmp(str, "force_enable", 12) == 0) { 3430 amd_iommu_force_enable = true; 3431 } else if (strncmp(str, "off", 3) == 0) { 3432 amd_iommu_disabled = true; 3433 } else if (strncmp(str, "force_isolation", 15) == 0) { 3434 amd_iommu_force_isolation = true; 3435 } else if (strncmp(str, "pgtbl_v1", 8) == 0) { 3436 amd_iommu_pgtable = AMD_IOMMU_V1; 3437 } else if (strncmp(str, "pgtbl_v2", 8) == 0) { 3438 amd_iommu_pgtable = AMD_IOMMU_V2; 3439 } else if (strncmp(str, "irtcachedis", 11) == 0) { 3440 amd_iommu_irtcachedis = true; 3441 } else { 3442 pr_notice("Unknown option - '%s'\n", str); 3443 } 3444 3445 str += strcspn(str, ","); 3446 while (*str == ',') 3447 str++; 3448 } 3449 3450 return 1; 3451 } 3452 3453 static int __init parse_ivrs_ioapic(char *str) 3454 { 3455 u32 seg = 0, bus, dev, fn; 3456 int id, i; 3457 u32 devid; 3458 3459 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || 3460 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) 3461 goto found; 3462 3463 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || 3464 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { 3465 pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n", 3466 str, id, seg, bus, dev, fn); 3467 goto found; 3468 } 3469 3470 pr_err("Invalid command line: ivrs_ioapic%s\n", str); 3471 return 1; 3472 3473 found: 3474 if (early_ioapic_map_size == EARLY_MAP_SIZE) { 3475 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", 3476 str); 3477 return 1; 3478 } 3479 3480 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); 3481 3482 cmdline_maps = true; 3483 i = early_ioapic_map_size++; 3484 early_ioapic_map[i].id = id; 3485 early_ioapic_map[i].devid = devid; 3486 early_ioapic_map[i].cmd_line = true; 3487 3488 return 1; 3489 } 3490 3491 static int __init parse_ivrs_hpet(char *str) 3492 { 3493 u32 seg = 0, bus, dev, fn; 3494 int id, i; 3495 u32 devid; 3496 3497 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || 3498 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) 3499 goto found; 3500 3501 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || 3502 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { 3503 pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n", 3504 str, id, seg, bus, dev, fn); 3505 goto found; 3506 } 3507 3508 pr_err("Invalid command line: ivrs_hpet%s\n", str); 3509 return 1; 3510 3511 found: 3512 if (early_hpet_map_size == EARLY_MAP_SIZE) { 3513 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", 3514 str); 3515 return 1; 3516 } 3517 3518 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); 3519 3520 cmdline_maps = true; 3521 i = early_hpet_map_size++; 3522 early_hpet_map[i].id = id; 3523 early_hpet_map[i].devid = devid; 3524 early_hpet_map[i].cmd_line = true; 3525 3526 return 1; 3527 } 3528 3529 #define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN) 3530 3531 static int __init parse_ivrs_acpihid(char *str) 3532 { 3533 u32 seg = 0, bus, dev, fn; 3534 char *hid, *uid, *p, *addr; 3535 char acpiid[ACPIID_LEN] = {0}; 3536 int i; 3537 3538 addr = strchr(str, '@'); 3539 if (!addr) { 3540 addr = strchr(str, '='); 3541 if (!addr) 3542 goto not_found; 3543 3544 ++addr; 3545 3546 if (strlen(addr) > ACPIID_LEN) 3547 goto not_found; 3548 3549 if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 || 3550 sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) { 3551 pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n", 3552 str, acpiid, seg, bus, dev, fn); 3553 goto found; 3554 } 3555 goto not_found; 3556 } 3557 3558 /* We have the '@', make it the terminator to get just the acpiid */ 3559 *addr++ = 0; 3560 3561 if (strlen(str) > ACPIID_LEN + 1) 3562 goto not_found; 3563 3564 if (sscanf(str, "=%s", acpiid) != 1) 3565 goto not_found; 3566 3567 if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 || 3568 sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4) 3569 goto found; 3570 3571 not_found: 3572 pr_err("Invalid command line: ivrs_acpihid%s\n", str); 3573 return 1; 3574 3575 found: 3576 p = acpiid; 3577 hid = strsep(&p, ":"); 3578 uid = p; 3579 3580 if (!hid || !(*hid) || !uid) { 3581 pr_err("Invalid command line: hid or uid\n"); 3582 return 1; 3583 } 3584 3585 /* 3586 * Ignore leading zeroes after ':', so e.g., AMDI0095:00 3587 * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match 3588 */ 3589 while (*uid == '0' && *(uid + 1)) 3590 uid++; 3591 3592 i = early_acpihid_map_size++; 3593 memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); 3594 memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); 3595 early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); 3596 early_acpihid_map[i].cmd_line = true; 3597 3598 return 1; 3599 } 3600 3601 __setup("amd_iommu_dump", parse_amd_iommu_dump); 3602 __setup("amd_iommu=", parse_amd_iommu_options); 3603 __setup("amd_iommu_intr=", parse_amd_iommu_intr); 3604 __setup("ivrs_ioapic", parse_ivrs_ioapic); 3605 __setup("ivrs_hpet", parse_ivrs_hpet); 3606 __setup("ivrs_acpihid", parse_ivrs_acpihid); 3607 3608 bool amd_iommu_v2_supported(void) 3609 { 3610 /* CPU page table size should match IOMMU guest page table size */ 3611 if (cpu_feature_enabled(X86_FEATURE_LA57) && 3612 amd_iommu_gpt_level != PAGE_MODE_5_LEVEL) 3613 return false; 3614 3615 /* 3616 * Since DTE[Mode]=0 is prohibited on SNP-enabled system 3617 * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without 3618 * setting up IOMMUv1 page table. 3619 */ 3620 return amd_iommu_v2_present && !amd_iommu_snp_en; 3621 } 3622 EXPORT_SYMBOL(amd_iommu_v2_supported); 3623 3624 struct amd_iommu *get_amd_iommu(unsigned int idx) 3625 { 3626 unsigned int i = 0; 3627 struct amd_iommu *iommu; 3628 3629 for_each_iommu(iommu) 3630 if (i++ == idx) 3631 return iommu; 3632 return NULL; 3633 } 3634 3635 /**************************************************************************** 3636 * 3637 * IOMMU EFR Performance Counter support functionality. This code allows 3638 * access to the IOMMU PC functionality. 3639 * 3640 ****************************************************************************/ 3641 3642 u8 amd_iommu_pc_get_max_banks(unsigned int idx) 3643 { 3644 struct amd_iommu *iommu = get_amd_iommu(idx); 3645 3646 if (iommu) 3647 return iommu->max_banks; 3648 3649 return 0; 3650 } 3651 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); 3652 3653 bool amd_iommu_pc_supported(void) 3654 { 3655 return amd_iommu_pc_present; 3656 } 3657 EXPORT_SYMBOL(amd_iommu_pc_supported); 3658 3659 u8 amd_iommu_pc_get_max_counters(unsigned int idx) 3660 { 3661 struct amd_iommu *iommu = get_amd_iommu(idx); 3662 3663 if (iommu) 3664 return iommu->max_counters; 3665 3666 return 0; 3667 } 3668 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); 3669 3670 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 3671 u8 fxn, u64 *value, bool is_write) 3672 { 3673 u32 offset; 3674 u32 max_offset_lim; 3675 3676 /* Make sure the IOMMU PC resource is available */ 3677 if (!amd_iommu_pc_present) 3678 return -ENODEV; 3679 3680 /* Check for valid iommu and pc register indexing */ 3681 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) 3682 return -ENODEV; 3683 3684 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn); 3685 3686 /* Limit the offset to the hw defined mmio region aperture */ 3687 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | 3688 (iommu->max_counters << 8) | 0x28); 3689 if ((offset < MMIO_CNTR_REG_OFFSET) || 3690 (offset > max_offset_lim)) 3691 return -EINVAL; 3692 3693 if (is_write) { 3694 u64 val = *value & GENMASK_ULL(47, 0); 3695 3696 writel((u32)val, iommu->mmio_base + offset); 3697 writel((val >> 32), iommu->mmio_base + offset + 4); 3698 } else { 3699 *value = readl(iommu->mmio_base + offset + 4); 3700 *value <<= 32; 3701 *value |= readl(iommu->mmio_base + offset); 3702 *value &= GENMASK_ULL(47, 0); 3703 } 3704 3705 return 0; 3706 } 3707 3708 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3709 { 3710 if (!iommu) 3711 return -EINVAL; 3712 3713 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); 3714 } 3715 3716 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3717 { 3718 if (!iommu) 3719 return -EINVAL; 3720 3721 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); 3722 } 3723 3724 #ifdef CONFIG_AMD_MEM_ENCRYPT 3725 int amd_iommu_snp_enable(void) 3726 { 3727 /* 3728 * The SNP support requires that IOMMU must be enabled, and is 3729 * not configured in the passthrough mode. 3730 */ 3731 if (no_iommu || iommu_default_passthrough()) { 3732 pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported"); 3733 return -EINVAL; 3734 } 3735 3736 /* 3737 * Prevent enabling SNP after IOMMU_ENABLED state because this process 3738 * affect how IOMMU driver sets up data structures and configures 3739 * IOMMU hardware. 3740 */ 3741 if (init_state > IOMMU_ENABLED) { 3742 pr_err("SNP: Too late to enable SNP for IOMMU.\n"); 3743 return -EINVAL; 3744 } 3745 3746 amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP); 3747 if (!amd_iommu_snp_en) 3748 return -EINVAL; 3749 3750 pr_info("SNP enabled\n"); 3751 3752 /* Enforce IOMMU v1 pagetable when SNP is enabled. */ 3753 if (amd_iommu_pgtable != AMD_IOMMU_V1) { 3754 pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n"); 3755 amd_iommu_pgtable = AMD_IOMMU_V1; 3756 } 3757 3758 return 0; 3759 } 3760 #endif 3761