1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "AMD-Vi: " fmt 9 #define dev_fmt(fmt) pr_fmt(fmt) 10 11 #include <linux/pci.h> 12 #include <linux/acpi.h> 13 #include <linux/list.h> 14 #include <linux/bitmap.h> 15 #include <linux/slab.h> 16 #include <linux/syscore_ops.h> 17 #include <linux/interrupt.h> 18 #include <linux/msi.h> 19 #include <linux/irq.h> 20 #include <linux/amd-iommu.h> 21 #include <linux/export.h> 22 #include <linux/kmemleak.h> 23 #include <linux/cc_platform.h> 24 #include <linux/iopoll.h> 25 #include <asm/pci-direct.h> 26 #include <asm/iommu.h> 27 #include <asm/apic.h> 28 #include <asm/gart.h> 29 #include <asm/x86_init.h> 30 #include <asm/io_apic.h> 31 #include <asm/irq_remapping.h> 32 #include <asm/set_memory.h> 33 34 #include <linux/crash_dump.h> 35 36 #include "amd_iommu.h" 37 #include "../irq_remapping.h" 38 39 /* 40 * definitions for the ACPI scanning code 41 */ 42 #define IVRS_HEADER_LENGTH 48 43 44 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40 45 #define ACPI_IVMD_TYPE_ALL 0x20 46 #define ACPI_IVMD_TYPE 0x21 47 #define ACPI_IVMD_TYPE_RANGE 0x22 48 49 #define IVHD_DEV_ALL 0x01 50 #define IVHD_DEV_SELECT 0x02 51 #define IVHD_DEV_SELECT_RANGE_START 0x03 52 #define IVHD_DEV_RANGE_END 0x04 53 #define IVHD_DEV_ALIAS 0x42 54 #define IVHD_DEV_ALIAS_RANGE 0x43 55 #define IVHD_DEV_EXT_SELECT 0x46 56 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 57 #define IVHD_DEV_SPECIAL 0x48 58 #define IVHD_DEV_ACPI_HID 0xf0 59 60 #define UID_NOT_PRESENT 0 61 #define UID_IS_INTEGER 1 62 #define UID_IS_CHARACTER 2 63 64 #define IVHD_SPECIAL_IOAPIC 1 65 #define IVHD_SPECIAL_HPET 2 66 67 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 68 #define IVHD_FLAG_PASSPW_EN_MASK 0x02 69 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 70 #define IVHD_FLAG_ISOC_EN_MASK 0x08 71 72 #define IVMD_FLAG_EXCL_RANGE 0x08 73 #define IVMD_FLAG_IW 0x04 74 #define IVMD_FLAG_IR 0x02 75 #define IVMD_FLAG_UNITY_MAP 0x01 76 77 #define ACPI_DEVFLAG_INITPASS 0x01 78 #define ACPI_DEVFLAG_EXTINT 0x02 79 #define ACPI_DEVFLAG_NMI 0x04 80 #define ACPI_DEVFLAG_SYSMGT1 0x10 81 #define ACPI_DEVFLAG_SYSMGT2 0x20 82 #define ACPI_DEVFLAG_LINT0 0x40 83 #define ACPI_DEVFLAG_LINT1 0x80 84 #define ACPI_DEVFLAG_ATSDIS 0x10000000 85 86 #define LOOP_TIMEOUT 2000000 87 88 #define IVRS_GET_SBDF_ID(seg, bus, dev, fd) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \ 89 | ((dev & 0x1f) << 3) | (fn & 0x7)) 90 91 /* 92 * ACPI table definitions 93 * 94 * These data structures are laid over the table to parse the important values 95 * out of it. 96 */ 97 98 extern const struct iommu_ops amd_iommu_ops; 99 100 /* 101 * structure describing one IOMMU in the ACPI table. Typically followed by one 102 * or more ivhd_entrys. 103 */ 104 struct ivhd_header { 105 u8 type; 106 u8 flags; 107 u16 length; 108 u16 devid; 109 u16 cap_ptr; 110 u64 mmio_phys; 111 u16 pci_seg; 112 u16 info; 113 u32 efr_attr; 114 115 /* Following only valid on IVHD type 11h and 40h */ 116 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */ 117 u64 efr_reg2; 118 } __attribute__((packed)); 119 120 /* 121 * A device entry describing which devices a specific IOMMU translates and 122 * which requestor ids they use. 123 */ 124 struct ivhd_entry { 125 u8 type; 126 u16 devid; 127 u8 flags; 128 struct_group(ext_hid, 129 u32 ext; 130 u32 hidh; 131 ); 132 u64 cid; 133 u8 uidf; 134 u8 uidl; 135 u8 uid; 136 } __attribute__((packed)); 137 138 /* 139 * An AMD IOMMU memory definition structure. It defines things like exclusion 140 * ranges for devices and regions that should be unity mapped. 141 */ 142 struct ivmd_header { 143 u8 type; 144 u8 flags; 145 u16 length; 146 u16 devid; 147 u16 aux; 148 u16 pci_seg; 149 u8 resv[6]; 150 u64 range_start; 151 u64 range_length; 152 } __attribute__((packed)); 153 154 bool amd_iommu_dump; 155 bool amd_iommu_irq_remap __read_mostly; 156 157 enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1; 158 159 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 160 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; 161 162 static bool amd_iommu_detected; 163 static bool amd_iommu_disabled __initdata; 164 static bool amd_iommu_force_enable __initdata; 165 static int amd_iommu_target_ivhd_type; 166 167 /* Global EFR and EFR2 registers */ 168 u64 amd_iommu_efr; 169 u64 amd_iommu_efr2; 170 171 /* SNP is enabled on the system? */ 172 bool amd_iommu_snp_en; 173 EXPORT_SYMBOL(amd_iommu_snp_en); 174 175 LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */ 176 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 177 system */ 178 179 /* Array to assign indices to IOMMUs*/ 180 struct amd_iommu *amd_iommus[MAX_IOMMUS]; 181 182 /* Number of IOMMUs present in the system */ 183 static int amd_iommus_present; 184 185 /* IOMMUs have a non-present cache? */ 186 bool amd_iommu_np_cache __read_mostly; 187 bool amd_iommu_iotlb_sup __read_mostly = true; 188 189 u32 amd_iommu_max_pasid __read_mostly = ~0; 190 191 bool amd_iommu_v2_present __read_mostly; 192 static bool amd_iommu_pc_present __read_mostly; 193 bool amdr_ivrs_remap_support __read_mostly; 194 195 bool amd_iommu_force_isolation __read_mostly; 196 197 /* 198 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap 199 * to know which ones are already in use. 200 */ 201 unsigned long *amd_iommu_pd_alloc_bitmap; 202 203 enum iommu_init_state { 204 IOMMU_START_STATE, 205 IOMMU_IVRS_DETECTED, 206 IOMMU_ACPI_FINISHED, 207 IOMMU_ENABLED, 208 IOMMU_PCI_INIT, 209 IOMMU_INTERRUPTS_EN, 210 IOMMU_INITIALIZED, 211 IOMMU_NOT_FOUND, 212 IOMMU_INIT_ERROR, 213 IOMMU_CMDLINE_DISABLED, 214 }; 215 216 /* Early ioapic and hpet maps from kernel command line */ 217 #define EARLY_MAP_SIZE 4 218 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; 219 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; 220 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE]; 221 222 static int __initdata early_ioapic_map_size; 223 static int __initdata early_hpet_map_size; 224 static int __initdata early_acpihid_map_size; 225 226 static bool __initdata cmdline_maps; 227 228 static enum iommu_init_state init_state = IOMMU_START_STATE; 229 230 static int amd_iommu_enable_interrupts(void); 231 static int __init iommu_go_to_state(enum iommu_init_state state); 232 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg); 233 234 static bool amd_iommu_pre_enabled = true; 235 236 static u32 amd_iommu_ivinfo __initdata; 237 238 bool translation_pre_enabled(struct amd_iommu *iommu) 239 { 240 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); 241 } 242 243 static void clear_translation_pre_enabled(struct amd_iommu *iommu) 244 { 245 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 246 } 247 248 static void init_translation_status(struct amd_iommu *iommu) 249 { 250 u64 ctrl; 251 252 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 253 if (ctrl & (1<<CONTROL_IOMMU_EN)) 254 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 255 } 256 257 static inline unsigned long tbl_size(int entry_size, int last_bdf) 258 { 259 unsigned shift = PAGE_SHIFT + 260 get_order((last_bdf + 1) * entry_size); 261 262 return 1UL << shift; 263 } 264 265 int amd_iommu_get_num_iommus(void) 266 { 267 return amd_iommus_present; 268 } 269 270 /* 271 * Iterate through all the IOMMUs to get common EFR 272 * masks among all IOMMUs and warn if found inconsistency. 273 */ 274 static void get_global_efr(void) 275 { 276 struct amd_iommu *iommu; 277 278 for_each_iommu(iommu) { 279 u64 tmp = iommu->features; 280 u64 tmp2 = iommu->features2; 281 282 if (list_is_first(&iommu->list, &amd_iommu_list)) { 283 amd_iommu_efr = tmp; 284 amd_iommu_efr2 = tmp2; 285 continue; 286 } 287 288 if (amd_iommu_efr == tmp && 289 amd_iommu_efr2 == tmp2) 290 continue; 291 292 pr_err(FW_BUG 293 "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n", 294 tmp, tmp2, amd_iommu_efr, amd_iommu_efr2, 295 iommu->index, iommu->pci_seg->id, 296 PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid), 297 PCI_FUNC(iommu->devid)); 298 299 amd_iommu_efr &= tmp; 300 amd_iommu_efr2 &= tmp2; 301 } 302 303 pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2); 304 } 305 306 static bool check_feature_on_all_iommus(u64 mask) 307 { 308 return !!(amd_iommu_efr & mask); 309 } 310 311 /* 312 * For IVHD type 0x11/0x40, EFR is also available via IVHD. 313 * Default to IVHD EFR since it is available sooner 314 * (i.e. before PCI init). 315 */ 316 static void __init early_iommu_features_init(struct amd_iommu *iommu, 317 struct ivhd_header *h) 318 { 319 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) { 320 iommu->features = h->efr_reg; 321 iommu->features2 = h->efr_reg2; 322 } 323 if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP) 324 amdr_ivrs_remap_support = true; 325 } 326 327 /* Access to l1 and l2 indexed register spaces */ 328 329 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) 330 { 331 u32 val; 332 333 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 334 pci_read_config_dword(iommu->dev, 0xfc, &val); 335 return val; 336 } 337 338 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) 339 { 340 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); 341 pci_write_config_dword(iommu->dev, 0xfc, val); 342 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 343 } 344 345 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) 346 { 347 u32 val; 348 349 pci_write_config_dword(iommu->dev, 0xf0, address); 350 pci_read_config_dword(iommu->dev, 0xf4, &val); 351 return val; 352 } 353 354 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) 355 { 356 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); 357 pci_write_config_dword(iommu->dev, 0xf4, val); 358 } 359 360 /**************************************************************************** 361 * 362 * AMD IOMMU MMIO register space handling functions 363 * 364 * These functions are used to program the IOMMU device registers in 365 * MMIO space required for that driver. 366 * 367 ****************************************************************************/ 368 369 /* 370 * This function set the exclusion range in the IOMMU. DMA accesses to the 371 * exclusion range are passed through untranslated 372 */ 373 static void iommu_set_exclusion_range(struct amd_iommu *iommu) 374 { 375 u64 start = iommu->exclusion_start & PAGE_MASK; 376 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; 377 u64 entry; 378 379 if (!iommu->exclusion_start) 380 return; 381 382 entry = start | MMIO_EXCL_ENABLE_MASK; 383 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 384 &entry, sizeof(entry)); 385 386 entry = limit; 387 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 388 &entry, sizeof(entry)); 389 } 390 391 static void iommu_set_cwwb_range(struct amd_iommu *iommu) 392 { 393 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); 394 u64 entry = start & PM_ADDR_MASK; 395 396 if (!check_feature_on_all_iommus(FEATURE_SNP)) 397 return; 398 399 /* Note: 400 * Re-purpose Exclusion base/limit registers for Completion wait 401 * write-back base/limit. 402 */ 403 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 404 &entry, sizeof(entry)); 405 406 /* Note: 407 * Default to 4 Kbytes, which can be specified by setting base 408 * address equal to the limit address. 409 */ 410 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 411 &entry, sizeof(entry)); 412 } 413 414 /* Programs the physical address of the device table into the IOMMU hardware */ 415 static void iommu_set_device_table(struct amd_iommu *iommu) 416 { 417 u64 entry; 418 u32 dev_table_size = iommu->pci_seg->dev_table_size; 419 void *dev_table = (void *)get_dev_table(iommu); 420 421 BUG_ON(iommu->mmio_base == NULL); 422 423 entry = iommu_virt_to_phys(dev_table); 424 entry |= (dev_table_size >> 12) - 1; 425 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, 426 &entry, sizeof(entry)); 427 } 428 429 /* Generic functions to enable/disable certain features of the IOMMU. */ 430 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) 431 { 432 u64 ctrl; 433 434 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 435 ctrl |= (1ULL << bit); 436 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 437 } 438 439 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) 440 { 441 u64 ctrl; 442 443 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 444 ctrl &= ~(1ULL << bit); 445 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 446 } 447 448 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) 449 { 450 u64 ctrl; 451 452 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 453 ctrl &= ~CTRL_INV_TO_MASK; 454 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; 455 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 456 } 457 458 /* Function to enable the hardware */ 459 static void iommu_enable(struct amd_iommu *iommu) 460 { 461 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 462 } 463 464 static void iommu_disable(struct amd_iommu *iommu) 465 { 466 if (!iommu->mmio_base) 467 return; 468 469 /* Disable command buffer */ 470 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 471 472 /* Disable event logging and event interrupts */ 473 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); 474 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 475 476 /* Disable IOMMU GA_LOG */ 477 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 478 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 479 480 /* Disable IOMMU hardware itself */ 481 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); 482 } 483 484 /* 485 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 486 * the system has one. 487 */ 488 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) 489 { 490 if (!request_mem_region(address, end, "amd_iommu")) { 491 pr_err("Can not reserve memory region %llx-%llx for mmio\n", 492 address, end); 493 pr_err("This is a BIOS bug. Please contact your hardware vendor\n"); 494 return NULL; 495 } 496 497 return (u8 __iomem *)ioremap(address, end); 498 } 499 500 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) 501 { 502 if (iommu->mmio_base) 503 iounmap(iommu->mmio_base); 504 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); 505 } 506 507 static inline u32 get_ivhd_header_size(struct ivhd_header *h) 508 { 509 u32 size = 0; 510 511 switch (h->type) { 512 case 0x10: 513 size = 24; 514 break; 515 case 0x11: 516 case 0x40: 517 size = 40; 518 break; 519 } 520 return size; 521 } 522 523 /**************************************************************************** 524 * 525 * The functions below belong to the first pass of AMD IOMMU ACPI table 526 * parsing. In this pass we try to find out the highest device id this 527 * code has to handle. Upon this information the size of the shared data 528 * structures is determined later. 529 * 530 ****************************************************************************/ 531 532 /* 533 * This function calculates the length of a given IVHD entry 534 */ 535 static inline int ivhd_entry_length(u8 *ivhd) 536 { 537 u32 type = ((struct ivhd_entry *)ivhd)->type; 538 539 if (type < 0x80) { 540 return 0x04 << (*ivhd >> 6); 541 } else if (type == IVHD_DEV_ACPI_HID) { 542 /* For ACPI_HID, offset 21 is uid len */ 543 return *((u8 *)ivhd + 21) + 22; 544 } 545 return 0; 546 } 547 548 /* 549 * After reading the highest device id from the IOMMU PCI capability header 550 * this function looks if there is a higher device id defined in the ACPI table 551 */ 552 static int __init find_last_devid_from_ivhd(struct ivhd_header *h) 553 { 554 u8 *p = (void *)h, *end = (void *)h; 555 struct ivhd_entry *dev; 556 int last_devid = -EINVAL; 557 558 u32 ivhd_size = get_ivhd_header_size(h); 559 560 if (!ivhd_size) { 561 pr_err("Unsupported IVHD type %#x\n", h->type); 562 return -EINVAL; 563 } 564 565 p += ivhd_size; 566 end += h->length; 567 568 while (p < end) { 569 dev = (struct ivhd_entry *)p; 570 switch (dev->type) { 571 case IVHD_DEV_ALL: 572 /* Use maximum BDF value for DEV_ALL */ 573 return 0xffff; 574 case IVHD_DEV_SELECT: 575 case IVHD_DEV_RANGE_END: 576 case IVHD_DEV_ALIAS: 577 case IVHD_DEV_EXT_SELECT: 578 /* all the above subfield types refer to device ids */ 579 if (dev->devid > last_devid) 580 last_devid = dev->devid; 581 break; 582 default: 583 break; 584 } 585 p += ivhd_entry_length(p); 586 } 587 588 WARN_ON(p != end); 589 590 return last_devid; 591 } 592 593 static int __init check_ivrs_checksum(struct acpi_table_header *table) 594 { 595 int i; 596 u8 checksum = 0, *p = (u8 *)table; 597 598 for (i = 0; i < table->length; ++i) 599 checksum += p[i]; 600 if (checksum != 0) { 601 /* ACPI table corrupt */ 602 pr_err(FW_BUG "IVRS invalid checksum\n"); 603 return -ENODEV; 604 } 605 606 return 0; 607 } 608 609 /* 610 * Iterate over all IVHD entries in the ACPI table and find the highest device 611 * id which we need to handle. This is the first of three functions which parse 612 * the ACPI table. So we check the checksum here. 613 */ 614 static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg) 615 { 616 u8 *p = (u8 *)table, *end = (u8 *)table; 617 struct ivhd_header *h; 618 int last_devid, last_bdf = 0; 619 620 p += IVRS_HEADER_LENGTH; 621 622 end += table->length; 623 while (p < end) { 624 h = (struct ivhd_header *)p; 625 if (h->pci_seg == pci_seg && 626 h->type == amd_iommu_target_ivhd_type) { 627 last_devid = find_last_devid_from_ivhd(h); 628 629 if (last_devid < 0) 630 return -EINVAL; 631 if (last_devid > last_bdf) 632 last_bdf = last_devid; 633 } 634 p += h->length; 635 } 636 WARN_ON(p != end); 637 638 return last_bdf; 639 } 640 641 /**************************************************************************** 642 * 643 * The following functions belong to the code path which parses the ACPI table 644 * the second time. In this ACPI parsing iteration we allocate IOMMU specific 645 * data structures, initialize the per PCI segment device/alias/rlookup table 646 * and also basically initialize the hardware. 647 * 648 ****************************************************************************/ 649 650 /* Allocate per PCI segment device table */ 651 static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg) 652 { 653 pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, 654 get_order(pci_seg->dev_table_size)); 655 if (!pci_seg->dev_table) 656 return -ENOMEM; 657 658 return 0; 659 } 660 661 static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg) 662 { 663 free_pages((unsigned long)pci_seg->dev_table, 664 get_order(pci_seg->dev_table_size)); 665 pci_seg->dev_table = NULL; 666 } 667 668 /* Allocate per PCI segment IOMMU rlookup table. */ 669 static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg) 670 { 671 pci_seg->rlookup_table = (void *)__get_free_pages( 672 GFP_KERNEL | __GFP_ZERO, 673 get_order(pci_seg->rlookup_table_size)); 674 if (pci_seg->rlookup_table == NULL) 675 return -ENOMEM; 676 677 return 0; 678 } 679 680 static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg) 681 { 682 free_pages((unsigned long)pci_seg->rlookup_table, 683 get_order(pci_seg->rlookup_table_size)); 684 pci_seg->rlookup_table = NULL; 685 } 686 687 static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) 688 { 689 pci_seg->irq_lookup_table = (void *)__get_free_pages( 690 GFP_KERNEL | __GFP_ZERO, 691 get_order(pci_seg->rlookup_table_size)); 692 kmemleak_alloc(pci_seg->irq_lookup_table, 693 pci_seg->rlookup_table_size, 1, GFP_KERNEL); 694 if (pci_seg->irq_lookup_table == NULL) 695 return -ENOMEM; 696 697 return 0; 698 } 699 700 static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) 701 { 702 kmemleak_free(pci_seg->irq_lookup_table); 703 free_pages((unsigned long)pci_seg->irq_lookup_table, 704 get_order(pci_seg->rlookup_table_size)); 705 pci_seg->irq_lookup_table = NULL; 706 } 707 708 static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg) 709 { 710 int i; 711 712 pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL, 713 get_order(pci_seg->alias_table_size)); 714 if (!pci_seg->alias_table) 715 return -ENOMEM; 716 717 /* 718 * let all alias entries point to itself 719 */ 720 for (i = 0; i <= pci_seg->last_bdf; ++i) 721 pci_seg->alias_table[i] = i; 722 723 return 0; 724 } 725 726 static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg) 727 { 728 free_pages((unsigned long)pci_seg->alias_table, 729 get_order(pci_seg->alias_table_size)); 730 pci_seg->alias_table = NULL; 731 } 732 733 /* 734 * Allocates the command buffer. This buffer is per AMD IOMMU. We can 735 * write commands to that buffer later and the IOMMU will execute them 736 * asynchronously 737 */ 738 static int __init alloc_command_buffer(struct amd_iommu *iommu) 739 { 740 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 741 get_order(CMD_BUFFER_SIZE)); 742 743 return iommu->cmd_buf ? 0 : -ENOMEM; 744 } 745 746 /* 747 * This function restarts event logging in case the IOMMU experienced 748 * an event log buffer overflow. 749 */ 750 void amd_iommu_restart_event_logging(struct amd_iommu *iommu) 751 { 752 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 753 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 754 } 755 756 /* 757 * This function resets the command buffer if the IOMMU stopped fetching 758 * commands from it. 759 */ 760 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) 761 { 762 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 763 764 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 765 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 766 iommu->cmd_buf_head = 0; 767 iommu->cmd_buf_tail = 0; 768 769 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 770 } 771 772 /* 773 * This function writes the command buffer address to the hardware and 774 * enables it. 775 */ 776 static void iommu_enable_command_buffer(struct amd_iommu *iommu) 777 { 778 u64 entry; 779 780 BUG_ON(iommu->cmd_buf == NULL); 781 782 entry = iommu_virt_to_phys(iommu->cmd_buf); 783 entry |= MMIO_CMD_SIZE_512; 784 785 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 786 &entry, sizeof(entry)); 787 788 amd_iommu_reset_cmd_buffer(iommu); 789 } 790 791 /* 792 * This function disables the command buffer 793 */ 794 static void iommu_disable_command_buffer(struct amd_iommu *iommu) 795 { 796 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 797 } 798 799 static void __init free_command_buffer(struct amd_iommu *iommu) 800 { 801 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); 802 } 803 804 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, 805 gfp_t gfp, size_t size) 806 { 807 int order = get_order(size); 808 void *buf = (void *)__get_free_pages(gfp, order); 809 810 if (buf && 811 check_feature_on_all_iommus(FEATURE_SNP) && 812 set_memory_4k((unsigned long)buf, (1 << order))) { 813 free_pages((unsigned long)buf, order); 814 buf = NULL; 815 } 816 817 return buf; 818 } 819 820 /* allocates the memory where the IOMMU will log its events to */ 821 static int __init alloc_event_buffer(struct amd_iommu *iommu) 822 { 823 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 824 EVT_BUFFER_SIZE); 825 826 return iommu->evt_buf ? 0 : -ENOMEM; 827 } 828 829 static void iommu_enable_event_buffer(struct amd_iommu *iommu) 830 { 831 u64 entry; 832 833 BUG_ON(iommu->evt_buf == NULL); 834 835 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; 836 837 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, 838 &entry, sizeof(entry)); 839 840 /* set head and tail to zero manually */ 841 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 842 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 843 844 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 845 } 846 847 /* 848 * This function disables the event log buffer 849 */ 850 static void iommu_disable_event_buffer(struct amd_iommu *iommu) 851 { 852 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 853 } 854 855 static void __init free_event_buffer(struct amd_iommu *iommu) 856 { 857 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); 858 } 859 860 /* allocates the memory where the IOMMU will log its events to */ 861 static int __init alloc_ppr_log(struct amd_iommu *iommu) 862 { 863 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 864 PPR_LOG_SIZE); 865 866 return iommu->ppr_log ? 0 : -ENOMEM; 867 } 868 869 static void iommu_enable_ppr_log(struct amd_iommu *iommu) 870 { 871 u64 entry; 872 873 if (iommu->ppr_log == NULL) 874 return; 875 876 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; 877 878 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, 879 &entry, sizeof(entry)); 880 881 /* set head and tail to zero manually */ 882 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 883 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 884 885 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); 886 iommu_feature_enable(iommu, CONTROL_PPR_EN); 887 } 888 889 static void __init free_ppr_log(struct amd_iommu *iommu) 890 { 891 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); 892 } 893 894 static void free_ga_log(struct amd_iommu *iommu) 895 { 896 #ifdef CONFIG_IRQ_REMAP 897 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE)); 898 free_pages((unsigned long)iommu->ga_log_tail, get_order(8)); 899 #endif 900 } 901 902 static int iommu_ga_log_enable(struct amd_iommu *iommu) 903 { 904 #ifdef CONFIG_IRQ_REMAP 905 u32 status, i; 906 u64 entry; 907 908 if (!iommu->ga_log) 909 return -EINVAL; 910 911 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; 912 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, 913 &entry, sizeof(entry)); 914 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & 915 (BIT_ULL(52)-1)) & ~7ULL; 916 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, 917 &entry, sizeof(entry)); 918 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 919 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); 920 921 922 iommu_feature_enable(iommu, CONTROL_GAINT_EN); 923 iommu_feature_enable(iommu, CONTROL_GALOG_EN); 924 925 for (i = 0; i < LOOP_TIMEOUT; ++i) { 926 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 927 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 928 break; 929 udelay(10); 930 } 931 932 if (WARN_ON(i >= LOOP_TIMEOUT)) 933 return -EINVAL; 934 #endif /* CONFIG_IRQ_REMAP */ 935 return 0; 936 } 937 938 static int iommu_init_ga_log(struct amd_iommu *iommu) 939 { 940 #ifdef CONFIG_IRQ_REMAP 941 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 942 return 0; 943 944 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 945 get_order(GA_LOG_SIZE)); 946 if (!iommu->ga_log) 947 goto err_out; 948 949 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 950 get_order(8)); 951 if (!iommu->ga_log_tail) 952 goto err_out; 953 954 return 0; 955 err_out: 956 free_ga_log(iommu); 957 return -EINVAL; 958 #else 959 return 0; 960 #endif /* CONFIG_IRQ_REMAP */ 961 } 962 963 static int __init alloc_cwwb_sem(struct amd_iommu *iommu) 964 { 965 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1); 966 967 return iommu->cmd_sem ? 0 : -ENOMEM; 968 } 969 970 static void __init free_cwwb_sem(struct amd_iommu *iommu) 971 { 972 if (iommu->cmd_sem) 973 free_page((unsigned long)iommu->cmd_sem); 974 } 975 976 static void iommu_enable_xt(struct amd_iommu *iommu) 977 { 978 #ifdef CONFIG_IRQ_REMAP 979 /* 980 * XT mode (32-bit APIC destination ID) requires 981 * GA mode (128-bit IRTE support) as a prerequisite. 982 */ 983 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) && 984 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 985 iommu_feature_enable(iommu, CONTROL_XT_EN); 986 #endif /* CONFIG_IRQ_REMAP */ 987 } 988 989 static void iommu_enable_gt(struct amd_iommu *iommu) 990 { 991 if (!iommu_feature(iommu, FEATURE_GT)) 992 return; 993 994 iommu_feature_enable(iommu, CONTROL_GT_EN); 995 } 996 997 /* sets a specific bit in the device table entry. */ 998 static void __set_dev_entry_bit(struct dev_table_entry *dev_table, 999 u16 devid, u8 bit) 1000 { 1001 int i = (bit >> 6) & 0x03; 1002 int _bit = bit & 0x3f; 1003 1004 dev_table[devid].data[i] |= (1UL << _bit); 1005 } 1006 1007 static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit) 1008 { 1009 struct dev_table_entry *dev_table = get_dev_table(iommu); 1010 1011 return __set_dev_entry_bit(dev_table, devid, bit); 1012 } 1013 1014 static int __get_dev_entry_bit(struct dev_table_entry *dev_table, 1015 u16 devid, u8 bit) 1016 { 1017 int i = (bit >> 6) & 0x03; 1018 int _bit = bit & 0x3f; 1019 1020 return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit; 1021 } 1022 1023 static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit) 1024 { 1025 struct dev_table_entry *dev_table = get_dev_table(iommu); 1026 1027 return __get_dev_entry_bit(dev_table, devid, bit); 1028 } 1029 1030 static bool __copy_device_table(struct amd_iommu *iommu) 1031 { 1032 u64 int_ctl, int_tab_len, entry = 0; 1033 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 1034 struct dev_table_entry *old_devtb = NULL; 1035 u32 lo, hi, devid, old_devtb_size; 1036 phys_addr_t old_devtb_phys; 1037 u16 dom_id, dte_v, irq_v; 1038 gfp_t gfp_flag; 1039 u64 tmp; 1040 1041 /* Each IOMMU use separate device table with the same size */ 1042 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); 1043 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); 1044 entry = (((u64) hi) << 32) + lo; 1045 1046 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; 1047 if (old_devtb_size != pci_seg->dev_table_size) { 1048 pr_err("The device table size of IOMMU:%d is not expected!\n", 1049 iommu->index); 1050 return false; 1051 } 1052 1053 /* 1054 * When SME is enabled in the first kernel, the entry includes the 1055 * memory encryption mask(sme_me_mask), we must remove the memory 1056 * encryption mask to obtain the true physical address in kdump kernel. 1057 */ 1058 old_devtb_phys = __sme_clr(entry) & PAGE_MASK; 1059 1060 if (old_devtb_phys >= 0x100000000ULL) { 1061 pr_err("The address of old device table is above 4G, not trustworthy!\n"); 1062 return false; 1063 } 1064 old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel()) 1065 ? (__force void *)ioremap_encrypted(old_devtb_phys, 1066 pci_seg->dev_table_size) 1067 : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB); 1068 1069 if (!old_devtb) 1070 return false; 1071 1072 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32; 1073 pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, 1074 get_order(pci_seg->dev_table_size)); 1075 if (pci_seg->old_dev_tbl_cpy == NULL) { 1076 pr_err("Failed to allocate memory for copying old device table!\n"); 1077 memunmap(old_devtb); 1078 return false; 1079 } 1080 1081 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { 1082 pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid]; 1083 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; 1084 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; 1085 1086 if (dte_v && dom_id) { 1087 pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; 1088 pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; 1089 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap); 1090 /* If gcr3 table existed, mask it out */ 1091 if (old_devtb[devid].data[0] & DTE_FLAG_GV) { 1092 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; 1093 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; 1094 pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp; 1095 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A; 1096 tmp |= DTE_FLAG_GV; 1097 pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp; 1098 } 1099 } 1100 1101 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; 1102 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; 1103 int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK; 1104 if (irq_v && (int_ctl || int_tab_len)) { 1105 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || 1106 (int_tab_len != DTE_INTTABLEN)) { 1107 pr_err("Wrong old irq remapping flag: %#x\n", devid); 1108 memunmap(old_devtb); 1109 return false; 1110 } 1111 1112 pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; 1113 } 1114 } 1115 memunmap(old_devtb); 1116 1117 return true; 1118 } 1119 1120 static bool copy_device_table(void) 1121 { 1122 struct amd_iommu *iommu; 1123 struct amd_iommu_pci_seg *pci_seg; 1124 1125 if (!amd_iommu_pre_enabled) 1126 return false; 1127 1128 pr_warn("Translation is already enabled - trying to copy translation structures\n"); 1129 1130 /* 1131 * All IOMMUs within PCI segment shares common device table. 1132 * Hence copy device table only once per PCI segment. 1133 */ 1134 for_each_pci_segment(pci_seg) { 1135 for_each_iommu(iommu) { 1136 if (pci_seg->id != iommu->pci_seg->id) 1137 continue; 1138 if (!__copy_device_table(iommu)) 1139 return false; 1140 break; 1141 } 1142 } 1143 1144 return true; 1145 } 1146 1147 void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid) 1148 { 1149 int sysmgt; 1150 1151 sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) | 1152 (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1); 1153 1154 if (sysmgt == 0x01) 1155 set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW); 1156 } 1157 1158 /* 1159 * This function takes the device specific flags read from the ACPI 1160 * table and sets up the device table entry with that information 1161 */ 1162 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, 1163 u16 devid, u32 flags, u32 ext_flags) 1164 { 1165 if (flags & ACPI_DEVFLAG_INITPASS) 1166 set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS); 1167 if (flags & ACPI_DEVFLAG_EXTINT) 1168 set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS); 1169 if (flags & ACPI_DEVFLAG_NMI) 1170 set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS); 1171 if (flags & ACPI_DEVFLAG_SYSMGT1) 1172 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1); 1173 if (flags & ACPI_DEVFLAG_SYSMGT2) 1174 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2); 1175 if (flags & ACPI_DEVFLAG_LINT0) 1176 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS); 1177 if (flags & ACPI_DEVFLAG_LINT1) 1178 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS); 1179 1180 amd_iommu_apply_erratum_63(iommu, devid); 1181 1182 amd_iommu_set_rlookup_table(iommu, devid); 1183 } 1184 1185 int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line) 1186 { 1187 struct devid_map *entry; 1188 struct list_head *list; 1189 1190 if (type == IVHD_SPECIAL_IOAPIC) 1191 list = &ioapic_map; 1192 else if (type == IVHD_SPECIAL_HPET) 1193 list = &hpet_map; 1194 else 1195 return -EINVAL; 1196 1197 list_for_each_entry(entry, list, list) { 1198 if (!(entry->id == id && entry->cmd_line)) 1199 continue; 1200 1201 pr_info("Command-line override present for %s id %d - ignoring\n", 1202 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); 1203 1204 *devid = entry->devid; 1205 1206 return 0; 1207 } 1208 1209 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1210 if (!entry) 1211 return -ENOMEM; 1212 1213 entry->id = id; 1214 entry->devid = *devid; 1215 entry->cmd_line = cmd_line; 1216 1217 list_add_tail(&entry->list, list); 1218 1219 return 0; 1220 } 1221 1222 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid, 1223 bool cmd_line) 1224 { 1225 struct acpihid_map_entry *entry; 1226 struct list_head *list = &acpihid_map; 1227 1228 list_for_each_entry(entry, list, list) { 1229 if (strcmp(entry->hid, hid) || 1230 (*uid && *entry->uid && strcmp(entry->uid, uid)) || 1231 !entry->cmd_line) 1232 continue; 1233 1234 pr_info("Command-line override for hid:%s uid:%s\n", 1235 hid, uid); 1236 *devid = entry->devid; 1237 return 0; 1238 } 1239 1240 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1241 if (!entry) 1242 return -ENOMEM; 1243 1244 memcpy(entry->uid, uid, strlen(uid)); 1245 memcpy(entry->hid, hid, strlen(hid)); 1246 entry->devid = *devid; 1247 entry->cmd_line = cmd_line; 1248 entry->root_devid = (entry->devid & (~0x7)); 1249 1250 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n", 1251 entry->cmd_line ? "cmd" : "ivrs", 1252 entry->hid, entry->uid, entry->root_devid); 1253 1254 list_add_tail(&entry->list, list); 1255 return 0; 1256 } 1257 1258 static int __init add_early_maps(void) 1259 { 1260 int i, ret; 1261 1262 for (i = 0; i < early_ioapic_map_size; ++i) { 1263 ret = add_special_device(IVHD_SPECIAL_IOAPIC, 1264 early_ioapic_map[i].id, 1265 &early_ioapic_map[i].devid, 1266 early_ioapic_map[i].cmd_line); 1267 if (ret) 1268 return ret; 1269 } 1270 1271 for (i = 0; i < early_hpet_map_size; ++i) { 1272 ret = add_special_device(IVHD_SPECIAL_HPET, 1273 early_hpet_map[i].id, 1274 &early_hpet_map[i].devid, 1275 early_hpet_map[i].cmd_line); 1276 if (ret) 1277 return ret; 1278 } 1279 1280 for (i = 0; i < early_acpihid_map_size; ++i) { 1281 ret = add_acpi_hid_device(early_acpihid_map[i].hid, 1282 early_acpihid_map[i].uid, 1283 &early_acpihid_map[i].devid, 1284 early_acpihid_map[i].cmd_line); 1285 if (ret) 1286 return ret; 1287 } 1288 1289 return 0; 1290 } 1291 1292 /* 1293 * Takes a pointer to an AMD IOMMU entry in the ACPI table and 1294 * initializes the hardware and our data structures with it. 1295 */ 1296 static int __init init_iommu_from_acpi(struct amd_iommu *iommu, 1297 struct ivhd_header *h) 1298 { 1299 u8 *p = (u8 *)h; 1300 u8 *end = p, flags = 0; 1301 u16 devid = 0, devid_start = 0, devid_to = 0, seg_id; 1302 u32 dev_i, ext_flags = 0; 1303 bool alias = false; 1304 struct ivhd_entry *e; 1305 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; 1306 u32 ivhd_size; 1307 int ret; 1308 1309 1310 ret = add_early_maps(); 1311 if (ret) 1312 return ret; 1313 1314 amd_iommu_apply_ivrs_quirks(); 1315 1316 /* 1317 * First save the recommended feature enable bits from ACPI 1318 */ 1319 iommu->acpi_flags = h->flags; 1320 1321 /* 1322 * Done. Now parse the device entries 1323 */ 1324 ivhd_size = get_ivhd_header_size(h); 1325 if (!ivhd_size) { 1326 pr_err("Unsupported IVHD type %#x\n", h->type); 1327 return -EINVAL; 1328 } 1329 1330 p += ivhd_size; 1331 1332 end += h->length; 1333 1334 1335 while (p < end) { 1336 e = (struct ivhd_entry *)p; 1337 seg_id = pci_seg->id; 1338 1339 switch (e->type) { 1340 case IVHD_DEV_ALL: 1341 1342 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags); 1343 1344 for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i) 1345 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); 1346 break; 1347 case IVHD_DEV_SELECT: 1348 1349 DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x " 1350 "flags: %02x\n", 1351 seg_id, PCI_BUS_NUM(e->devid), 1352 PCI_SLOT(e->devid), 1353 PCI_FUNC(e->devid), 1354 e->flags); 1355 1356 devid = e->devid; 1357 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1358 break; 1359 case IVHD_DEV_SELECT_RANGE_START: 1360 1361 DUMP_printk(" DEV_SELECT_RANGE_START\t " 1362 "devid: %04x:%02x:%02x.%x flags: %02x\n", 1363 seg_id, PCI_BUS_NUM(e->devid), 1364 PCI_SLOT(e->devid), 1365 PCI_FUNC(e->devid), 1366 e->flags); 1367 1368 devid_start = e->devid; 1369 flags = e->flags; 1370 ext_flags = 0; 1371 alias = false; 1372 break; 1373 case IVHD_DEV_ALIAS: 1374 1375 DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x " 1376 "flags: %02x devid_to: %02x:%02x.%x\n", 1377 seg_id, PCI_BUS_NUM(e->devid), 1378 PCI_SLOT(e->devid), 1379 PCI_FUNC(e->devid), 1380 e->flags, 1381 PCI_BUS_NUM(e->ext >> 8), 1382 PCI_SLOT(e->ext >> 8), 1383 PCI_FUNC(e->ext >> 8)); 1384 1385 devid = e->devid; 1386 devid_to = e->ext >> 8; 1387 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); 1388 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); 1389 pci_seg->alias_table[devid] = devid_to; 1390 break; 1391 case IVHD_DEV_ALIAS_RANGE: 1392 1393 DUMP_printk(" DEV_ALIAS_RANGE\t\t " 1394 "devid: %04x:%02x:%02x.%x flags: %02x " 1395 "devid_to: %04x:%02x:%02x.%x\n", 1396 seg_id, PCI_BUS_NUM(e->devid), 1397 PCI_SLOT(e->devid), 1398 PCI_FUNC(e->devid), 1399 e->flags, 1400 seg_id, PCI_BUS_NUM(e->ext >> 8), 1401 PCI_SLOT(e->ext >> 8), 1402 PCI_FUNC(e->ext >> 8)); 1403 1404 devid_start = e->devid; 1405 flags = e->flags; 1406 devid_to = e->ext >> 8; 1407 ext_flags = 0; 1408 alias = true; 1409 break; 1410 case IVHD_DEV_EXT_SELECT: 1411 1412 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x " 1413 "flags: %02x ext: %08x\n", 1414 seg_id, PCI_BUS_NUM(e->devid), 1415 PCI_SLOT(e->devid), 1416 PCI_FUNC(e->devid), 1417 e->flags, e->ext); 1418 1419 devid = e->devid; 1420 set_dev_entry_from_acpi(iommu, devid, e->flags, 1421 e->ext); 1422 break; 1423 case IVHD_DEV_EXT_SELECT_RANGE: 1424 1425 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " 1426 "%04x:%02x:%02x.%x flags: %02x ext: %08x\n", 1427 seg_id, PCI_BUS_NUM(e->devid), 1428 PCI_SLOT(e->devid), 1429 PCI_FUNC(e->devid), 1430 e->flags, e->ext); 1431 1432 devid_start = e->devid; 1433 flags = e->flags; 1434 ext_flags = e->ext; 1435 alias = false; 1436 break; 1437 case IVHD_DEV_RANGE_END: 1438 1439 DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n", 1440 seg_id, PCI_BUS_NUM(e->devid), 1441 PCI_SLOT(e->devid), 1442 PCI_FUNC(e->devid)); 1443 1444 devid = e->devid; 1445 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { 1446 if (alias) { 1447 pci_seg->alias_table[dev_i] = devid_to; 1448 set_dev_entry_from_acpi(iommu, 1449 devid_to, flags, ext_flags); 1450 } 1451 set_dev_entry_from_acpi(iommu, dev_i, 1452 flags, ext_flags); 1453 } 1454 break; 1455 case IVHD_DEV_SPECIAL: { 1456 u8 handle, type; 1457 const char *var; 1458 u32 devid; 1459 int ret; 1460 1461 handle = e->ext & 0xff; 1462 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8)); 1463 type = (e->ext >> 24) & 0xff; 1464 1465 if (type == IVHD_SPECIAL_IOAPIC) 1466 var = "IOAPIC"; 1467 else if (type == IVHD_SPECIAL_HPET) 1468 var = "HPET"; 1469 else 1470 var = "UNKNOWN"; 1471 1472 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n", 1473 var, (int)handle, 1474 seg_id, PCI_BUS_NUM(devid), 1475 PCI_SLOT(devid), 1476 PCI_FUNC(devid)); 1477 1478 ret = add_special_device(type, handle, &devid, false); 1479 if (ret) 1480 return ret; 1481 1482 /* 1483 * add_special_device might update the devid in case a 1484 * command-line override is present. So call 1485 * set_dev_entry_from_acpi after add_special_device. 1486 */ 1487 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1488 1489 break; 1490 } 1491 case IVHD_DEV_ACPI_HID: { 1492 u32 devid; 1493 u8 hid[ACPIHID_HID_LEN]; 1494 u8 uid[ACPIHID_UID_LEN]; 1495 int ret; 1496 1497 if (h->type != 0x40) { 1498 pr_err(FW_BUG "Invalid IVHD device type %#x\n", 1499 e->type); 1500 break; 1501 } 1502 1503 BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1); 1504 memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1); 1505 hid[ACPIHID_HID_LEN - 1] = '\0'; 1506 1507 if (!(*hid)) { 1508 pr_err(FW_BUG "Invalid HID.\n"); 1509 break; 1510 } 1511 1512 uid[0] = '\0'; 1513 switch (e->uidf) { 1514 case UID_NOT_PRESENT: 1515 1516 if (e->uidl != 0) 1517 pr_warn(FW_BUG "Invalid UID length.\n"); 1518 1519 break; 1520 case UID_IS_INTEGER: 1521 1522 sprintf(uid, "%d", e->uid); 1523 1524 break; 1525 case UID_IS_CHARACTER: 1526 1527 memcpy(uid, &e->uid, e->uidl); 1528 uid[e->uidl] = '\0'; 1529 1530 break; 1531 default: 1532 break; 1533 } 1534 1535 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid); 1536 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n", 1537 hid, uid, seg_id, 1538 PCI_BUS_NUM(devid), 1539 PCI_SLOT(devid), 1540 PCI_FUNC(devid)); 1541 1542 flags = e->flags; 1543 1544 ret = add_acpi_hid_device(hid, uid, &devid, false); 1545 if (ret) 1546 return ret; 1547 1548 /* 1549 * add_special_device might update the devid in case a 1550 * command-line override is present. So call 1551 * set_dev_entry_from_acpi after add_special_device. 1552 */ 1553 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1554 1555 break; 1556 } 1557 default: 1558 break; 1559 } 1560 1561 p += ivhd_entry_length(p); 1562 } 1563 1564 return 0; 1565 } 1566 1567 /* Allocate PCI segment data structure */ 1568 static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id, 1569 struct acpi_table_header *ivrs_base) 1570 { 1571 struct amd_iommu_pci_seg *pci_seg; 1572 int last_bdf; 1573 1574 /* 1575 * First parse ACPI tables to find the largest Bus/Dev/Func we need to 1576 * handle in this PCI segment. Upon this information the shared data 1577 * structures for the PCI segments in the system will be allocated. 1578 */ 1579 last_bdf = find_last_devid_acpi(ivrs_base, id); 1580 if (last_bdf < 0) 1581 return NULL; 1582 1583 pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL); 1584 if (pci_seg == NULL) 1585 return NULL; 1586 1587 pci_seg->last_bdf = last_bdf; 1588 DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf); 1589 pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf); 1590 pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf); 1591 pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf); 1592 1593 pci_seg->id = id; 1594 init_llist_head(&pci_seg->dev_data_list); 1595 INIT_LIST_HEAD(&pci_seg->unity_map); 1596 list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list); 1597 1598 if (alloc_dev_table(pci_seg)) 1599 return NULL; 1600 if (alloc_alias_table(pci_seg)) 1601 return NULL; 1602 if (alloc_rlookup_table(pci_seg)) 1603 return NULL; 1604 1605 return pci_seg; 1606 } 1607 1608 static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id, 1609 struct acpi_table_header *ivrs_base) 1610 { 1611 struct amd_iommu_pci_seg *pci_seg; 1612 1613 for_each_pci_segment(pci_seg) { 1614 if (pci_seg->id == id) 1615 return pci_seg; 1616 } 1617 1618 return alloc_pci_segment(id, ivrs_base); 1619 } 1620 1621 static void __init free_pci_segments(void) 1622 { 1623 struct amd_iommu_pci_seg *pci_seg, *next; 1624 1625 for_each_pci_segment_safe(pci_seg, next) { 1626 list_del(&pci_seg->list); 1627 free_irq_lookup_table(pci_seg); 1628 free_rlookup_table(pci_seg); 1629 free_alias_table(pci_seg); 1630 free_dev_table(pci_seg); 1631 kfree(pci_seg); 1632 } 1633 } 1634 1635 static void __init free_iommu_one(struct amd_iommu *iommu) 1636 { 1637 free_cwwb_sem(iommu); 1638 free_command_buffer(iommu); 1639 free_event_buffer(iommu); 1640 free_ppr_log(iommu); 1641 free_ga_log(iommu); 1642 iommu_unmap_mmio_space(iommu); 1643 } 1644 1645 static void __init free_iommu_all(void) 1646 { 1647 struct amd_iommu *iommu, *next; 1648 1649 for_each_iommu_safe(iommu, next) { 1650 list_del(&iommu->list); 1651 free_iommu_one(iommu); 1652 kfree(iommu); 1653 } 1654 } 1655 1656 /* 1657 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) 1658 * Workaround: 1659 * BIOS should disable L2B micellaneous clock gating by setting 1660 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b 1661 */ 1662 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) 1663 { 1664 u32 value; 1665 1666 if ((boot_cpu_data.x86 != 0x15) || 1667 (boot_cpu_data.x86_model < 0x10) || 1668 (boot_cpu_data.x86_model > 0x1f)) 1669 return; 1670 1671 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1672 pci_read_config_dword(iommu->dev, 0xf4, &value); 1673 1674 if (value & BIT(2)) 1675 return; 1676 1677 /* Select NB indirect register 0x90 and enable writing */ 1678 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); 1679 1680 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); 1681 pci_info(iommu->dev, "Applying erratum 746 workaround\n"); 1682 1683 /* Clear the enable writing bit */ 1684 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1685 } 1686 1687 /* 1688 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission) 1689 * Workaround: 1690 * BIOS should enable ATS write permission check by setting 1691 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b 1692 */ 1693 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) 1694 { 1695 u32 value; 1696 1697 if ((boot_cpu_data.x86 != 0x15) || 1698 (boot_cpu_data.x86_model < 0x30) || 1699 (boot_cpu_data.x86_model > 0x3f)) 1700 return; 1701 1702 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */ 1703 value = iommu_read_l2(iommu, 0x47); 1704 1705 if (value & BIT(0)) 1706 return; 1707 1708 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */ 1709 iommu_write_l2(iommu, 0x47, value | BIT(0)); 1710 1711 pci_info(iommu->dev, "Applying ATS write check workaround\n"); 1712 } 1713 1714 /* 1715 * This function glues the initialization function for one IOMMU 1716 * together and also allocates the command buffer and programs the 1717 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1718 */ 1719 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h, 1720 struct acpi_table_header *ivrs_base) 1721 { 1722 struct amd_iommu_pci_seg *pci_seg; 1723 1724 pci_seg = get_pci_segment(h->pci_seg, ivrs_base); 1725 if (pci_seg == NULL) 1726 return -ENOMEM; 1727 iommu->pci_seg = pci_seg; 1728 1729 raw_spin_lock_init(&iommu->lock); 1730 iommu->cmd_sem_val = 0; 1731 1732 /* Add IOMMU to internal data structures */ 1733 list_add_tail(&iommu->list, &amd_iommu_list); 1734 iommu->index = amd_iommus_present++; 1735 1736 if (unlikely(iommu->index >= MAX_IOMMUS)) { 1737 WARN(1, "System has more IOMMUs than supported by this driver\n"); 1738 return -ENOSYS; 1739 } 1740 1741 /* Index is fine - add IOMMU to the array */ 1742 amd_iommus[iommu->index] = iommu; 1743 1744 /* 1745 * Copy data from ACPI table entry to the iommu struct 1746 */ 1747 iommu->devid = h->devid; 1748 iommu->cap_ptr = h->cap_ptr; 1749 iommu->mmio_phys = h->mmio_phys; 1750 1751 switch (h->type) { 1752 case 0x10: 1753 /* Check if IVHD EFR contains proper max banks/counters */ 1754 if ((h->efr_attr != 0) && 1755 ((h->efr_attr & (0xF << 13)) != 0) && 1756 ((h->efr_attr & (0x3F << 17)) != 0)) 1757 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1758 else 1759 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1760 1761 /* 1762 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. 1763 * GAM also requires GA mode. Therefore, we need to 1764 * check cmpxchg16b support before enabling it. 1765 */ 1766 if (!boot_cpu_has(X86_FEATURE_CX16) || 1767 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) 1768 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1769 break; 1770 case 0x11: 1771 case 0x40: 1772 if (h->efr_reg & (1 << 9)) 1773 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1774 else 1775 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1776 1777 /* 1778 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. 1779 * XT, GAM also requires GA mode. Therefore, we need to 1780 * check cmpxchg16b support before enabling them. 1781 */ 1782 if (!boot_cpu_has(X86_FEATURE_CX16) || 1783 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { 1784 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1785 break; 1786 } 1787 1788 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) 1789 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; 1790 1791 early_iommu_features_init(iommu, h); 1792 1793 break; 1794 default: 1795 return -EINVAL; 1796 } 1797 1798 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, 1799 iommu->mmio_phys_end); 1800 if (!iommu->mmio_base) 1801 return -ENOMEM; 1802 1803 return init_iommu_from_acpi(iommu, h); 1804 } 1805 1806 static int __init init_iommu_one_late(struct amd_iommu *iommu) 1807 { 1808 int ret; 1809 1810 if (alloc_cwwb_sem(iommu)) 1811 return -ENOMEM; 1812 1813 if (alloc_command_buffer(iommu)) 1814 return -ENOMEM; 1815 1816 if (alloc_event_buffer(iommu)) 1817 return -ENOMEM; 1818 1819 iommu->int_enabled = false; 1820 1821 init_translation_status(iommu); 1822 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { 1823 iommu_disable(iommu); 1824 clear_translation_pre_enabled(iommu); 1825 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", 1826 iommu->index); 1827 } 1828 if (amd_iommu_pre_enabled) 1829 amd_iommu_pre_enabled = translation_pre_enabled(iommu); 1830 1831 if (amd_iommu_irq_remap) { 1832 ret = amd_iommu_create_irq_domain(iommu); 1833 if (ret) 1834 return ret; 1835 } 1836 1837 /* 1838 * Make sure IOMMU is not considered to translate itself. The IVRS 1839 * table tells us so, but this is a lie! 1840 */ 1841 iommu->pci_seg->rlookup_table[iommu->devid] = NULL; 1842 1843 return 0; 1844 } 1845 1846 /** 1847 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type 1848 * @ivrs: Pointer to the IVRS header 1849 * 1850 * This function search through all IVDB of the maximum supported IVHD 1851 */ 1852 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs) 1853 { 1854 u8 *base = (u8 *)ivrs; 1855 struct ivhd_header *ivhd = (struct ivhd_header *) 1856 (base + IVRS_HEADER_LENGTH); 1857 u8 last_type = ivhd->type; 1858 u16 devid = ivhd->devid; 1859 1860 while (((u8 *)ivhd - base < ivrs->length) && 1861 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { 1862 u8 *p = (u8 *) ivhd; 1863 1864 if (ivhd->devid == devid) 1865 last_type = ivhd->type; 1866 ivhd = (struct ivhd_header *)(p + ivhd->length); 1867 } 1868 1869 return last_type; 1870 } 1871 1872 /* 1873 * Iterates over all IOMMU entries in the ACPI table, allocates the 1874 * IOMMU structure and initializes it with init_iommu_one() 1875 */ 1876 static int __init init_iommu_all(struct acpi_table_header *table) 1877 { 1878 u8 *p = (u8 *)table, *end = (u8 *)table; 1879 struct ivhd_header *h; 1880 struct amd_iommu *iommu; 1881 int ret; 1882 1883 end += table->length; 1884 p += IVRS_HEADER_LENGTH; 1885 1886 /* Phase 1: Process all IVHD blocks */ 1887 while (p < end) { 1888 h = (struct ivhd_header *)p; 1889 if (*p == amd_iommu_target_ivhd_type) { 1890 1891 DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x " 1892 "flags: %01x info %04x\n", 1893 h->pci_seg, PCI_BUS_NUM(h->devid), 1894 PCI_SLOT(h->devid), PCI_FUNC(h->devid), 1895 h->cap_ptr, h->flags, h->info); 1896 DUMP_printk(" mmio-addr: %016llx\n", 1897 h->mmio_phys); 1898 1899 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 1900 if (iommu == NULL) 1901 return -ENOMEM; 1902 1903 ret = init_iommu_one(iommu, h, table); 1904 if (ret) 1905 return ret; 1906 } 1907 p += h->length; 1908 1909 } 1910 WARN_ON(p != end); 1911 1912 /* Phase 2 : Early feature support check */ 1913 get_global_efr(); 1914 1915 /* Phase 3 : Enabling IOMMU features */ 1916 for_each_iommu(iommu) { 1917 ret = init_iommu_one_late(iommu); 1918 if (ret) 1919 return ret; 1920 } 1921 1922 return 0; 1923 } 1924 1925 static void init_iommu_perf_ctr(struct amd_iommu *iommu) 1926 { 1927 u64 val; 1928 struct pci_dev *pdev = iommu->dev; 1929 1930 if (!iommu_feature(iommu, FEATURE_PC)) 1931 return; 1932 1933 amd_iommu_pc_present = true; 1934 1935 pci_info(pdev, "IOMMU performance counters supported\n"); 1936 1937 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); 1938 iommu->max_banks = (u8) ((val >> 12) & 0x3f); 1939 iommu->max_counters = (u8) ((val >> 7) & 0xf); 1940 1941 return; 1942 } 1943 1944 static ssize_t amd_iommu_show_cap(struct device *dev, 1945 struct device_attribute *attr, 1946 char *buf) 1947 { 1948 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1949 return sprintf(buf, "%x\n", iommu->cap); 1950 } 1951 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); 1952 1953 static ssize_t amd_iommu_show_features(struct device *dev, 1954 struct device_attribute *attr, 1955 char *buf) 1956 { 1957 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1958 return sprintf(buf, "%llx:%llx\n", iommu->features2, iommu->features); 1959 } 1960 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); 1961 1962 static struct attribute *amd_iommu_attrs[] = { 1963 &dev_attr_cap.attr, 1964 &dev_attr_features.attr, 1965 NULL, 1966 }; 1967 1968 static struct attribute_group amd_iommu_group = { 1969 .name = "amd-iommu", 1970 .attrs = amd_iommu_attrs, 1971 }; 1972 1973 static const struct attribute_group *amd_iommu_groups[] = { 1974 &amd_iommu_group, 1975 NULL, 1976 }; 1977 1978 /* 1979 * Note: IVHD 0x11 and 0x40 also contains exact copy 1980 * of the IOMMU Extended Feature Register [MMIO Offset 0030h]. 1981 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init). 1982 */ 1983 static void __init late_iommu_features_init(struct amd_iommu *iommu) 1984 { 1985 u64 features, features2; 1986 1987 if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) 1988 return; 1989 1990 /* read extended feature bits */ 1991 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); 1992 features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2); 1993 1994 if (!iommu->features) { 1995 iommu->features = features; 1996 iommu->features2 = features2; 1997 return; 1998 } 1999 2000 /* 2001 * Sanity check and warn if EFR values from 2002 * IVHD and MMIO conflict. 2003 */ 2004 if (features != iommu->features || 2005 features2 != iommu->features2) { 2006 pr_warn(FW_WARN 2007 "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n", 2008 features, iommu->features, 2009 features2, iommu->features2); 2010 } 2011 } 2012 2013 static int __init iommu_init_pci(struct amd_iommu *iommu) 2014 { 2015 int cap_ptr = iommu->cap_ptr; 2016 int ret; 2017 2018 iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, 2019 PCI_BUS_NUM(iommu->devid), 2020 iommu->devid & 0xff); 2021 if (!iommu->dev) 2022 return -ENODEV; 2023 2024 /* Prevent binding other PCI device drivers to IOMMU devices */ 2025 iommu->dev->match_driver = false; 2026 2027 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 2028 &iommu->cap); 2029 2030 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) 2031 amd_iommu_iotlb_sup = false; 2032 2033 late_iommu_features_init(iommu); 2034 2035 if (iommu_feature(iommu, FEATURE_GT)) { 2036 int glxval; 2037 u32 max_pasid; 2038 u64 pasmax; 2039 2040 pasmax = iommu->features & FEATURE_PASID_MASK; 2041 pasmax >>= FEATURE_PASID_SHIFT; 2042 max_pasid = (1 << (pasmax + 1)) - 1; 2043 2044 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); 2045 2046 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); 2047 2048 glxval = iommu->features & FEATURE_GLXVAL_MASK; 2049 glxval >>= FEATURE_GLXVAL_SHIFT; 2050 2051 if (amd_iommu_max_glx_val == -1) 2052 amd_iommu_max_glx_val = glxval; 2053 else 2054 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); 2055 } 2056 2057 if (iommu_feature(iommu, FEATURE_GT) && 2058 iommu_feature(iommu, FEATURE_PPR)) { 2059 iommu->is_iommu_v2 = true; 2060 amd_iommu_v2_present = true; 2061 } 2062 2063 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) 2064 return -ENOMEM; 2065 2066 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) { 2067 pr_info("Using strict mode due to virtualization\n"); 2068 iommu_set_dma_strict(); 2069 amd_iommu_np_cache = true; 2070 } 2071 2072 init_iommu_perf_ctr(iommu); 2073 2074 if (is_rd890_iommu(iommu->dev)) { 2075 int i, j; 2076 2077 iommu->root_pdev = 2078 pci_get_domain_bus_and_slot(iommu->pci_seg->id, 2079 iommu->dev->bus->number, 2080 PCI_DEVFN(0, 0)); 2081 2082 /* 2083 * Some rd890 systems may not be fully reconfigured by the 2084 * BIOS, so it's necessary for us to store this information so 2085 * it can be reprogrammed on resume 2086 */ 2087 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, 2088 &iommu->stored_addr_lo); 2089 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, 2090 &iommu->stored_addr_hi); 2091 2092 /* Low bit locks writes to configuration space */ 2093 iommu->stored_addr_lo &= ~1; 2094 2095 for (i = 0; i < 6; i++) 2096 for (j = 0; j < 0x12; j++) 2097 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); 2098 2099 for (i = 0; i < 0x83; i++) 2100 iommu->stored_l2[i] = iommu_read_l2(iommu, i); 2101 } 2102 2103 amd_iommu_erratum_746_workaround(iommu); 2104 amd_iommu_ats_write_check_workaround(iommu); 2105 2106 ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, 2107 amd_iommu_groups, "ivhd%d", iommu->index); 2108 if (ret) 2109 return ret; 2110 2111 iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); 2112 2113 return pci_enable_device(iommu->dev); 2114 } 2115 2116 static void print_iommu_info(void) 2117 { 2118 static const char * const feat_str[] = { 2119 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", 2120 "IA", "GA", "HE", "PC" 2121 }; 2122 struct amd_iommu *iommu; 2123 2124 for_each_iommu(iommu) { 2125 struct pci_dev *pdev = iommu->dev; 2126 int i; 2127 2128 pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr); 2129 2130 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { 2131 pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2); 2132 2133 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { 2134 if (iommu_feature(iommu, (1ULL << i))) 2135 pr_cont(" %s", feat_str[i]); 2136 } 2137 2138 if (iommu->features & FEATURE_GAM_VAPIC) 2139 pr_cont(" GA_vAPIC"); 2140 2141 if (iommu->features & FEATURE_SNP) 2142 pr_cont(" SNP"); 2143 2144 pr_cont("\n"); 2145 } 2146 } 2147 if (irq_remapping_enabled) { 2148 pr_info("Interrupt remapping enabled\n"); 2149 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 2150 pr_info("X2APIC enabled\n"); 2151 } 2152 } 2153 2154 static int __init amd_iommu_init_pci(void) 2155 { 2156 struct amd_iommu *iommu; 2157 struct amd_iommu_pci_seg *pci_seg; 2158 int ret; 2159 2160 for_each_iommu(iommu) { 2161 ret = iommu_init_pci(iommu); 2162 if (ret) { 2163 pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n", 2164 iommu->index, ret); 2165 goto out; 2166 } 2167 /* Need to setup range after PCI init */ 2168 iommu_set_cwwb_range(iommu); 2169 } 2170 2171 /* 2172 * Order is important here to make sure any unity map requirements are 2173 * fulfilled. The unity mappings are created and written to the device 2174 * table during the amd_iommu_init_api() call. 2175 * 2176 * After that we call init_device_table_dma() to make sure any 2177 * uninitialized DTE will block DMA, and in the end we flush the caches 2178 * of all IOMMUs to make sure the changes to the device table are 2179 * active. 2180 */ 2181 ret = amd_iommu_init_api(); 2182 if (ret) { 2183 pr_err("IOMMU: Failed to initialize IOMMU-API interface (error=%d)!\n", 2184 ret); 2185 goto out; 2186 } 2187 2188 for_each_pci_segment(pci_seg) 2189 init_device_table_dma(pci_seg); 2190 2191 for_each_iommu(iommu) 2192 iommu_flush_all_caches(iommu); 2193 2194 print_iommu_info(); 2195 2196 out: 2197 return ret; 2198 } 2199 2200 /**************************************************************************** 2201 * 2202 * The following functions initialize the MSI interrupts for all IOMMUs 2203 * in the system. It's a bit challenging because there could be multiple 2204 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per 2205 * pci_dev. 2206 * 2207 ****************************************************************************/ 2208 2209 static int iommu_setup_msi(struct amd_iommu *iommu) 2210 { 2211 int r; 2212 2213 r = pci_enable_msi(iommu->dev); 2214 if (r) 2215 return r; 2216 2217 r = request_threaded_irq(iommu->dev->irq, 2218 amd_iommu_int_handler, 2219 amd_iommu_int_thread, 2220 0, "AMD-Vi", 2221 iommu); 2222 2223 if (r) { 2224 pci_disable_msi(iommu->dev); 2225 return r; 2226 } 2227 2228 return 0; 2229 } 2230 2231 union intcapxt { 2232 u64 capxt; 2233 struct { 2234 u64 reserved_0 : 2, 2235 dest_mode_logical : 1, 2236 reserved_1 : 5, 2237 destid_0_23 : 24, 2238 vector : 8, 2239 reserved_2 : 16, 2240 destid_24_31 : 8; 2241 }; 2242 } __attribute__ ((packed)); 2243 2244 2245 static struct irq_chip intcapxt_controller; 2246 2247 static int intcapxt_irqdomain_activate(struct irq_domain *domain, 2248 struct irq_data *irqd, bool reserve) 2249 { 2250 return 0; 2251 } 2252 2253 static void intcapxt_irqdomain_deactivate(struct irq_domain *domain, 2254 struct irq_data *irqd) 2255 { 2256 } 2257 2258 2259 static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, 2260 unsigned int nr_irqs, void *arg) 2261 { 2262 struct irq_alloc_info *info = arg; 2263 int i, ret; 2264 2265 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI) 2266 return -EINVAL; 2267 2268 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 2269 if (ret < 0) 2270 return ret; 2271 2272 for (i = virq; i < virq + nr_irqs; i++) { 2273 struct irq_data *irqd = irq_domain_get_irq_data(domain, i); 2274 2275 irqd->chip = &intcapxt_controller; 2276 irqd->chip_data = info->data; 2277 __irq_set_handler(i, handle_edge_irq, 0, "edge"); 2278 } 2279 2280 return ret; 2281 } 2282 2283 static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq, 2284 unsigned int nr_irqs) 2285 { 2286 irq_domain_free_irqs_top(domain, virq, nr_irqs); 2287 } 2288 2289 2290 static void intcapxt_unmask_irq(struct irq_data *irqd) 2291 { 2292 struct amd_iommu *iommu = irqd->chip_data; 2293 struct irq_cfg *cfg = irqd_cfg(irqd); 2294 union intcapxt xt; 2295 2296 xt.capxt = 0ULL; 2297 xt.dest_mode_logical = apic->dest_mode_logical; 2298 xt.vector = cfg->vector; 2299 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); 2300 xt.destid_24_31 = cfg->dest_apicid >> 24; 2301 2302 /** 2303 * Current IOMMU implementation uses the same IRQ for all 2304 * 3 IOMMU interrupts. 2305 */ 2306 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); 2307 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); 2308 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); 2309 } 2310 2311 static void intcapxt_mask_irq(struct irq_data *irqd) 2312 { 2313 struct amd_iommu *iommu = irqd->chip_data; 2314 2315 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); 2316 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); 2317 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); 2318 } 2319 2320 2321 static int intcapxt_set_affinity(struct irq_data *irqd, 2322 const struct cpumask *mask, bool force) 2323 { 2324 struct irq_data *parent = irqd->parent_data; 2325 int ret; 2326 2327 ret = parent->chip->irq_set_affinity(parent, mask, force); 2328 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) 2329 return ret; 2330 return 0; 2331 } 2332 2333 static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on) 2334 { 2335 return on ? -EOPNOTSUPP : 0; 2336 } 2337 2338 static struct irq_chip intcapxt_controller = { 2339 .name = "IOMMU-MSI", 2340 .irq_unmask = intcapxt_unmask_irq, 2341 .irq_mask = intcapxt_mask_irq, 2342 .irq_ack = irq_chip_ack_parent, 2343 .irq_retrigger = irq_chip_retrigger_hierarchy, 2344 .irq_set_affinity = intcapxt_set_affinity, 2345 .irq_set_wake = intcapxt_set_wake, 2346 .flags = IRQCHIP_MASK_ON_SUSPEND, 2347 }; 2348 2349 static const struct irq_domain_ops intcapxt_domain_ops = { 2350 .alloc = intcapxt_irqdomain_alloc, 2351 .free = intcapxt_irqdomain_free, 2352 .activate = intcapxt_irqdomain_activate, 2353 .deactivate = intcapxt_irqdomain_deactivate, 2354 }; 2355 2356 2357 static struct irq_domain *iommu_irqdomain; 2358 2359 static struct irq_domain *iommu_get_irqdomain(void) 2360 { 2361 struct fwnode_handle *fn; 2362 2363 /* No need for locking here (yet) as the init is single-threaded */ 2364 if (iommu_irqdomain) 2365 return iommu_irqdomain; 2366 2367 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI"); 2368 if (!fn) 2369 return NULL; 2370 2371 iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0, 2372 fn, &intcapxt_domain_ops, 2373 NULL); 2374 if (!iommu_irqdomain) 2375 irq_domain_free_fwnode(fn); 2376 2377 return iommu_irqdomain; 2378 } 2379 2380 static int iommu_setup_intcapxt(struct amd_iommu *iommu) 2381 { 2382 struct irq_domain *domain; 2383 struct irq_alloc_info info; 2384 int irq, ret; 2385 2386 domain = iommu_get_irqdomain(); 2387 if (!domain) 2388 return -ENXIO; 2389 2390 init_irq_alloc_info(&info, NULL); 2391 info.type = X86_IRQ_ALLOC_TYPE_AMDVI; 2392 info.data = iommu; 2393 2394 irq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info); 2395 if (irq < 0) { 2396 irq_domain_remove(domain); 2397 return irq; 2398 } 2399 2400 ret = request_threaded_irq(irq, amd_iommu_int_handler, 2401 amd_iommu_int_thread, 0, "AMD-Vi", iommu); 2402 if (ret) { 2403 irq_domain_free_irqs(irq, 1); 2404 irq_domain_remove(domain); 2405 return ret; 2406 } 2407 2408 return 0; 2409 } 2410 2411 static int iommu_init_irq(struct amd_iommu *iommu) 2412 { 2413 int ret; 2414 2415 if (iommu->int_enabled) 2416 goto enable_faults; 2417 2418 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 2419 ret = iommu_setup_intcapxt(iommu); 2420 else if (iommu->dev->msi_cap) 2421 ret = iommu_setup_msi(iommu); 2422 else 2423 ret = -ENODEV; 2424 2425 if (ret) 2426 return ret; 2427 2428 iommu->int_enabled = true; 2429 enable_faults: 2430 2431 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 2432 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); 2433 2434 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 2435 2436 if (iommu->ppr_log != NULL) 2437 iommu_feature_enable(iommu, CONTROL_PPRINT_EN); 2438 return 0; 2439 } 2440 2441 /**************************************************************************** 2442 * 2443 * The next functions belong to the third pass of parsing the ACPI 2444 * table. In this last pass the memory mapping requirements are 2445 * gathered (like exclusion and unity mapping ranges). 2446 * 2447 ****************************************************************************/ 2448 2449 static void __init free_unity_maps(void) 2450 { 2451 struct unity_map_entry *entry, *next; 2452 struct amd_iommu_pci_seg *p, *pci_seg; 2453 2454 for_each_pci_segment_safe(pci_seg, p) { 2455 list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) { 2456 list_del(&entry->list); 2457 kfree(entry); 2458 } 2459 } 2460 } 2461 2462 /* called for unity map ACPI definition */ 2463 static int __init init_unity_map_range(struct ivmd_header *m, 2464 struct acpi_table_header *ivrs_base) 2465 { 2466 struct unity_map_entry *e = NULL; 2467 struct amd_iommu_pci_seg *pci_seg; 2468 char *s; 2469 2470 pci_seg = get_pci_segment(m->pci_seg, ivrs_base); 2471 if (pci_seg == NULL) 2472 return -ENOMEM; 2473 2474 e = kzalloc(sizeof(*e), GFP_KERNEL); 2475 if (e == NULL) 2476 return -ENOMEM; 2477 2478 switch (m->type) { 2479 default: 2480 kfree(e); 2481 return 0; 2482 case ACPI_IVMD_TYPE: 2483 s = "IVMD_TYPEi\t\t\t"; 2484 e->devid_start = e->devid_end = m->devid; 2485 break; 2486 case ACPI_IVMD_TYPE_ALL: 2487 s = "IVMD_TYPE_ALL\t\t"; 2488 e->devid_start = 0; 2489 e->devid_end = pci_seg->last_bdf; 2490 break; 2491 case ACPI_IVMD_TYPE_RANGE: 2492 s = "IVMD_TYPE_RANGE\t\t"; 2493 e->devid_start = m->devid; 2494 e->devid_end = m->aux; 2495 break; 2496 } 2497 e->address_start = PAGE_ALIGN(m->range_start); 2498 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); 2499 e->prot = m->flags >> 1; 2500 2501 /* 2502 * Treat per-device exclusion ranges as r/w unity-mapped regions 2503 * since some buggy BIOSes might lead to the overwritten exclusion 2504 * range (exclusion_start and exclusion_length members). This 2505 * happens when there are multiple exclusion ranges (IVMD entries) 2506 * defined in ACPI table. 2507 */ 2508 if (m->flags & IVMD_FLAG_EXCL_RANGE) 2509 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1; 2510 2511 DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: " 2512 "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx" 2513 " flags: %x\n", s, m->pci_seg, 2514 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), 2515 PCI_FUNC(e->devid_start), m->pci_seg, 2516 PCI_BUS_NUM(e->devid_end), 2517 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), 2518 e->address_start, e->address_end, m->flags); 2519 2520 list_add_tail(&e->list, &pci_seg->unity_map); 2521 2522 return 0; 2523 } 2524 2525 /* iterates over all memory definitions we find in the ACPI table */ 2526 static int __init init_memory_definitions(struct acpi_table_header *table) 2527 { 2528 u8 *p = (u8 *)table, *end = (u8 *)table; 2529 struct ivmd_header *m; 2530 2531 end += table->length; 2532 p += IVRS_HEADER_LENGTH; 2533 2534 while (p < end) { 2535 m = (struct ivmd_header *)p; 2536 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) 2537 init_unity_map_range(m, table); 2538 2539 p += m->length; 2540 } 2541 2542 return 0; 2543 } 2544 2545 /* 2546 * Init the device table to not allow DMA access for devices 2547 */ 2548 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg) 2549 { 2550 u32 devid; 2551 struct dev_table_entry *dev_table = pci_seg->dev_table; 2552 2553 if (dev_table == NULL) 2554 return; 2555 2556 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { 2557 __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID); 2558 if (!amd_iommu_snp_en) 2559 __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION); 2560 } 2561 } 2562 2563 static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg) 2564 { 2565 u32 devid; 2566 struct dev_table_entry *dev_table = pci_seg->dev_table; 2567 2568 if (dev_table == NULL) 2569 return; 2570 2571 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { 2572 dev_table[devid].data[0] = 0ULL; 2573 dev_table[devid].data[1] = 0ULL; 2574 } 2575 } 2576 2577 static void init_device_table(void) 2578 { 2579 struct amd_iommu_pci_seg *pci_seg; 2580 u32 devid; 2581 2582 if (!amd_iommu_irq_remap) 2583 return; 2584 2585 for_each_pci_segment(pci_seg) { 2586 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) 2587 __set_dev_entry_bit(pci_seg->dev_table, 2588 devid, DEV_ENTRY_IRQ_TBL_EN); 2589 } 2590 } 2591 2592 static void iommu_init_flags(struct amd_iommu *iommu) 2593 { 2594 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 2595 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 2596 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 2597 2598 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? 2599 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 2600 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 2601 2602 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 2603 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 2604 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 2605 2606 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? 2607 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 2608 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 2609 2610 /* 2611 * make IOMMU memory accesses cache coherent 2612 */ 2613 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 2614 2615 /* Set IOTLB invalidation timeout to 1s */ 2616 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); 2617 } 2618 2619 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) 2620 { 2621 int i, j; 2622 u32 ioc_feature_control; 2623 struct pci_dev *pdev = iommu->root_pdev; 2624 2625 /* RD890 BIOSes may not have completely reconfigured the iommu */ 2626 if (!is_rd890_iommu(iommu->dev) || !pdev) 2627 return; 2628 2629 /* 2630 * First, we need to ensure that the iommu is enabled. This is 2631 * controlled by a register in the northbridge 2632 */ 2633 2634 /* Select Northbridge indirect register 0x75 and enable writing */ 2635 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); 2636 pci_read_config_dword(pdev, 0x64, &ioc_feature_control); 2637 2638 /* Enable the iommu */ 2639 if (!(ioc_feature_control & 0x1)) 2640 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); 2641 2642 /* Restore the iommu BAR */ 2643 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2644 iommu->stored_addr_lo); 2645 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, 2646 iommu->stored_addr_hi); 2647 2648 /* Restore the l1 indirect regs for each of the 6 l1s */ 2649 for (i = 0; i < 6; i++) 2650 for (j = 0; j < 0x12; j++) 2651 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); 2652 2653 /* Restore the l2 indirect regs */ 2654 for (i = 0; i < 0x83; i++) 2655 iommu_write_l2(iommu, i, iommu->stored_l2[i]); 2656 2657 /* Lock PCI setup registers */ 2658 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2659 iommu->stored_addr_lo | 1); 2660 } 2661 2662 static void iommu_enable_ga(struct amd_iommu *iommu) 2663 { 2664 #ifdef CONFIG_IRQ_REMAP 2665 switch (amd_iommu_guest_ir) { 2666 case AMD_IOMMU_GUEST_IR_VAPIC: 2667 case AMD_IOMMU_GUEST_IR_LEGACY_GA: 2668 iommu_feature_enable(iommu, CONTROL_GA_EN); 2669 iommu->irte_ops = &irte_128_ops; 2670 break; 2671 default: 2672 iommu->irte_ops = &irte_32_ops; 2673 break; 2674 } 2675 #endif 2676 } 2677 2678 static void early_enable_iommu(struct amd_iommu *iommu) 2679 { 2680 iommu_disable(iommu); 2681 iommu_init_flags(iommu); 2682 iommu_set_device_table(iommu); 2683 iommu_enable_command_buffer(iommu); 2684 iommu_enable_event_buffer(iommu); 2685 iommu_set_exclusion_range(iommu); 2686 iommu_enable_ga(iommu); 2687 iommu_enable_xt(iommu); 2688 iommu_enable(iommu); 2689 iommu_flush_all_caches(iommu); 2690 } 2691 2692 /* 2693 * This function finally enables all IOMMUs found in the system after 2694 * they have been initialized. 2695 * 2696 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy 2697 * the old content of device table entries. Not this case or copy failed, 2698 * just continue as normal kernel does. 2699 */ 2700 static void early_enable_iommus(void) 2701 { 2702 struct amd_iommu *iommu; 2703 struct amd_iommu_pci_seg *pci_seg; 2704 2705 if (!copy_device_table()) { 2706 /* 2707 * If come here because of failure in copying device table from old 2708 * kernel with all IOMMUs enabled, print error message and try to 2709 * free allocated old_dev_tbl_cpy. 2710 */ 2711 if (amd_iommu_pre_enabled) 2712 pr_err("Failed to copy DEV table from previous kernel.\n"); 2713 2714 for_each_pci_segment(pci_seg) { 2715 if (pci_seg->old_dev_tbl_cpy != NULL) { 2716 free_pages((unsigned long)pci_seg->old_dev_tbl_cpy, 2717 get_order(pci_seg->dev_table_size)); 2718 pci_seg->old_dev_tbl_cpy = NULL; 2719 } 2720 } 2721 2722 for_each_iommu(iommu) { 2723 clear_translation_pre_enabled(iommu); 2724 early_enable_iommu(iommu); 2725 } 2726 } else { 2727 pr_info("Copied DEV table from previous kernel.\n"); 2728 2729 for_each_pci_segment(pci_seg) { 2730 free_pages((unsigned long)pci_seg->dev_table, 2731 get_order(pci_seg->dev_table_size)); 2732 pci_seg->dev_table = pci_seg->old_dev_tbl_cpy; 2733 } 2734 2735 for_each_iommu(iommu) { 2736 iommu_disable_command_buffer(iommu); 2737 iommu_disable_event_buffer(iommu); 2738 iommu_enable_command_buffer(iommu); 2739 iommu_enable_event_buffer(iommu); 2740 iommu_enable_ga(iommu); 2741 iommu_enable_xt(iommu); 2742 iommu_set_device_table(iommu); 2743 iommu_flush_all_caches(iommu); 2744 } 2745 } 2746 } 2747 2748 static void enable_iommus_v2(void) 2749 { 2750 struct amd_iommu *iommu; 2751 2752 for_each_iommu(iommu) { 2753 iommu_enable_ppr_log(iommu); 2754 iommu_enable_gt(iommu); 2755 } 2756 } 2757 2758 static void enable_iommus_vapic(void) 2759 { 2760 #ifdef CONFIG_IRQ_REMAP 2761 u32 status, i; 2762 struct amd_iommu *iommu; 2763 2764 for_each_iommu(iommu) { 2765 /* 2766 * Disable GALog if already running. It could have been enabled 2767 * in the previous boot before kdump. 2768 */ 2769 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 2770 if (!(status & MMIO_STATUS_GALOG_RUN_MASK)) 2771 continue; 2772 2773 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 2774 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 2775 2776 /* 2777 * Need to set and poll check the GALOGRun bit to zero before 2778 * we can set/ modify GA Log registers safely. 2779 */ 2780 for (i = 0; i < LOOP_TIMEOUT; ++i) { 2781 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 2782 if (!(status & MMIO_STATUS_GALOG_RUN_MASK)) 2783 break; 2784 udelay(10); 2785 } 2786 2787 if (WARN_ON(i >= LOOP_TIMEOUT)) 2788 return; 2789 } 2790 2791 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && 2792 !check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) { 2793 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 2794 return; 2795 } 2796 2797 if (amd_iommu_snp_en && 2798 !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) { 2799 pr_warn("Force to disable Virtual APIC due to SNP\n"); 2800 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 2801 return; 2802 } 2803 2804 /* Enabling GAM and SNPAVIC support */ 2805 for_each_iommu(iommu) { 2806 if (iommu_init_ga_log(iommu) || 2807 iommu_ga_log_enable(iommu)) 2808 return; 2809 2810 iommu_feature_enable(iommu, CONTROL_GAM_EN); 2811 if (amd_iommu_snp_en) 2812 iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN); 2813 } 2814 2815 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); 2816 pr_info("Virtual APIC enabled\n"); 2817 #endif 2818 } 2819 2820 static void enable_iommus(void) 2821 { 2822 early_enable_iommus(); 2823 enable_iommus_vapic(); 2824 enable_iommus_v2(); 2825 } 2826 2827 static void disable_iommus(void) 2828 { 2829 struct amd_iommu *iommu; 2830 2831 for_each_iommu(iommu) 2832 iommu_disable(iommu); 2833 2834 #ifdef CONFIG_IRQ_REMAP 2835 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 2836 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP); 2837 #endif 2838 } 2839 2840 /* 2841 * Suspend/Resume support 2842 * disable suspend until real resume implemented 2843 */ 2844 2845 static void amd_iommu_resume(void) 2846 { 2847 struct amd_iommu *iommu; 2848 2849 for_each_iommu(iommu) 2850 iommu_apply_resume_quirks(iommu); 2851 2852 /* re-load the hardware */ 2853 enable_iommus(); 2854 2855 amd_iommu_enable_interrupts(); 2856 } 2857 2858 static int amd_iommu_suspend(void) 2859 { 2860 /* disable IOMMUs to go out of the way for BIOS */ 2861 disable_iommus(); 2862 2863 return 0; 2864 } 2865 2866 static struct syscore_ops amd_iommu_syscore_ops = { 2867 .suspend = amd_iommu_suspend, 2868 .resume = amd_iommu_resume, 2869 }; 2870 2871 static void __init free_iommu_resources(void) 2872 { 2873 kmem_cache_destroy(amd_iommu_irq_cache); 2874 amd_iommu_irq_cache = NULL; 2875 2876 free_iommu_all(); 2877 free_pci_segments(); 2878 } 2879 2880 /* SB IOAPIC is always on this device in AMD systems */ 2881 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) 2882 2883 static bool __init check_ioapic_information(void) 2884 { 2885 const char *fw_bug = FW_BUG; 2886 bool ret, has_sb_ioapic; 2887 int idx; 2888 2889 has_sb_ioapic = false; 2890 ret = false; 2891 2892 /* 2893 * If we have map overrides on the kernel command line the 2894 * messages in this function might not describe firmware bugs 2895 * anymore - so be careful 2896 */ 2897 if (cmdline_maps) 2898 fw_bug = ""; 2899 2900 for (idx = 0; idx < nr_ioapics; idx++) { 2901 int devid, id = mpc_ioapic_id(idx); 2902 2903 devid = get_ioapic_devid(id); 2904 if (devid < 0) { 2905 pr_err("%s: IOAPIC[%d] not in IVRS table\n", 2906 fw_bug, id); 2907 ret = false; 2908 } else if (devid == IOAPIC_SB_DEVID) { 2909 has_sb_ioapic = true; 2910 ret = true; 2911 } 2912 } 2913 2914 if (!has_sb_ioapic) { 2915 /* 2916 * We expect the SB IOAPIC to be listed in the IVRS 2917 * table. The system timer is connected to the SB IOAPIC 2918 * and if we don't have it in the list the system will 2919 * panic at boot time. This situation usually happens 2920 * when the BIOS is buggy and provides us the wrong 2921 * device id for the IOAPIC in the system. 2922 */ 2923 pr_err("%s: No southbridge IOAPIC found\n", fw_bug); 2924 } 2925 2926 if (!ret) 2927 pr_err("Disabling interrupt remapping\n"); 2928 2929 return ret; 2930 } 2931 2932 static void __init free_dma_resources(void) 2933 { 2934 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 2935 get_order(MAX_DOMAIN_ID/8)); 2936 amd_iommu_pd_alloc_bitmap = NULL; 2937 2938 free_unity_maps(); 2939 } 2940 2941 static void __init ivinfo_init(void *ivrs) 2942 { 2943 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET)); 2944 } 2945 2946 /* 2947 * This is the hardware init function for AMD IOMMU in the system. 2948 * This function is called either from amd_iommu_init or from the interrupt 2949 * remapping setup code. 2950 * 2951 * This function basically parses the ACPI table for AMD IOMMU (IVRS) 2952 * four times: 2953 * 2954 * 1 pass) Discover the most comprehensive IVHD type to use. 2955 * 2956 * 2 pass) Find the highest PCI device id the driver has to handle. 2957 * Upon this information the size of the data structures is 2958 * determined that needs to be allocated. 2959 * 2960 * 3 pass) Initialize the data structures just allocated with the 2961 * information in the ACPI table about available AMD IOMMUs 2962 * in the system. It also maps the PCI devices in the 2963 * system to specific IOMMUs 2964 * 2965 * 4 pass) After the basic data structures are allocated and 2966 * initialized we update them with information about memory 2967 * remapping requirements parsed out of the ACPI table in 2968 * this last pass. 2969 * 2970 * After everything is set up the IOMMUs are enabled and the necessary 2971 * hotplug and suspend notifiers are registered. 2972 */ 2973 static int __init early_amd_iommu_init(void) 2974 { 2975 struct acpi_table_header *ivrs_base; 2976 int remap_cache_sz, ret; 2977 acpi_status status; 2978 2979 if (!amd_iommu_detected) 2980 return -ENODEV; 2981 2982 status = acpi_get_table("IVRS", 0, &ivrs_base); 2983 if (status == AE_NOT_FOUND) 2984 return -ENODEV; 2985 else if (ACPI_FAILURE(status)) { 2986 const char *err = acpi_format_exception(status); 2987 pr_err("IVRS table error: %s\n", err); 2988 return -EINVAL; 2989 } 2990 2991 /* 2992 * Validate checksum here so we don't need to do it when 2993 * we actually parse the table 2994 */ 2995 ret = check_ivrs_checksum(ivrs_base); 2996 if (ret) 2997 goto out; 2998 2999 ivinfo_init(ivrs_base); 3000 3001 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); 3002 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); 3003 3004 /* Device table - directly used by all IOMMUs */ 3005 ret = -ENOMEM; 3006 3007 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( 3008 GFP_KERNEL | __GFP_ZERO, 3009 get_order(MAX_DOMAIN_ID/8)); 3010 if (amd_iommu_pd_alloc_bitmap == NULL) 3011 goto out; 3012 3013 /* 3014 * never allocate domain 0 because its used as the non-allocated and 3015 * error value placeholder 3016 */ 3017 __set_bit(0, amd_iommu_pd_alloc_bitmap); 3018 3019 /* 3020 * now the data structures are allocated and basically initialized 3021 * start the real acpi table scan 3022 */ 3023 ret = init_iommu_all(ivrs_base); 3024 if (ret) 3025 goto out; 3026 3027 /* Disable any previously enabled IOMMUs */ 3028 if (!is_kdump_kernel() || amd_iommu_disabled) 3029 disable_iommus(); 3030 3031 if (amd_iommu_irq_remap) 3032 amd_iommu_irq_remap = check_ioapic_information(); 3033 3034 if (amd_iommu_irq_remap) { 3035 struct amd_iommu_pci_seg *pci_seg; 3036 /* 3037 * Interrupt remapping enabled, create kmem_cache for the 3038 * remapping tables. 3039 */ 3040 ret = -ENOMEM; 3041 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) 3042 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32); 3043 else 3044 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2); 3045 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", 3046 remap_cache_sz, 3047 DTE_INTTAB_ALIGNMENT, 3048 0, NULL); 3049 if (!amd_iommu_irq_cache) 3050 goto out; 3051 3052 for_each_pci_segment(pci_seg) { 3053 if (alloc_irq_lookup_table(pci_seg)) 3054 goto out; 3055 } 3056 } 3057 3058 ret = init_memory_definitions(ivrs_base); 3059 if (ret) 3060 goto out; 3061 3062 /* init the device table */ 3063 init_device_table(); 3064 3065 out: 3066 /* Don't leak any ACPI memory */ 3067 acpi_put_table(ivrs_base); 3068 3069 return ret; 3070 } 3071 3072 static int amd_iommu_enable_interrupts(void) 3073 { 3074 struct amd_iommu *iommu; 3075 int ret = 0; 3076 3077 for_each_iommu(iommu) { 3078 ret = iommu_init_irq(iommu); 3079 if (ret) 3080 goto out; 3081 } 3082 3083 out: 3084 return ret; 3085 } 3086 3087 static bool __init detect_ivrs(void) 3088 { 3089 struct acpi_table_header *ivrs_base; 3090 acpi_status status; 3091 int i; 3092 3093 status = acpi_get_table("IVRS", 0, &ivrs_base); 3094 if (status == AE_NOT_FOUND) 3095 return false; 3096 else if (ACPI_FAILURE(status)) { 3097 const char *err = acpi_format_exception(status); 3098 pr_err("IVRS table error: %s\n", err); 3099 return false; 3100 } 3101 3102 acpi_put_table(ivrs_base); 3103 3104 if (amd_iommu_force_enable) 3105 goto out; 3106 3107 /* Don't use IOMMU if there is Stoney Ridge graphics */ 3108 for (i = 0; i < 32; i++) { 3109 u32 pci_id; 3110 3111 pci_id = read_pci_config(0, i, 0, 0); 3112 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { 3113 pr_info("Disable IOMMU on Stoney Ridge\n"); 3114 return false; 3115 } 3116 } 3117 3118 out: 3119 /* Make sure ACS will be enabled during PCI probe */ 3120 pci_request_acs(); 3121 3122 return true; 3123 } 3124 3125 /**************************************************************************** 3126 * 3127 * AMD IOMMU Initialization State Machine 3128 * 3129 ****************************************************************************/ 3130 3131 static int __init state_next(void) 3132 { 3133 int ret = 0; 3134 3135 switch (init_state) { 3136 case IOMMU_START_STATE: 3137 if (!detect_ivrs()) { 3138 init_state = IOMMU_NOT_FOUND; 3139 ret = -ENODEV; 3140 } else { 3141 init_state = IOMMU_IVRS_DETECTED; 3142 } 3143 break; 3144 case IOMMU_IVRS_DETECTED: 3145 if (amd_iommu_disabled) { 3146 init_state = IOMMU_CMDLINE_DISABLED; 3147 ret = -EINVAL; 3148 } else { 3149 ret = early_amd_iommu_init(); 3150 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; 3151 } 3152 break; 3153 case IOMMU_ACPI_FINISHED: 3154 early_enable_iommus(); 3155 x86_platform.iommu_shutdown = disable_iommus; 3156 init_state = IOMMU_ENABLED; 3157 break; 3158 case IOMMU_ENABLED: 3159 register_syscore_ops(&amd_iommu_syscore_ops); 3160 ret = amd_iommu_init_pci(); 3161 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; 3162 enable_iommus_vapic(); 3163 enable_iommus_v2(); 3164 break; 3165 case IOMMU_PCI_INIT: 3166 ret = amd_iommu_enable_interrupts(); 3167 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 3168 break; 3169 case IOMMU_INTERRUPTS_EN: 3170 init_state = IOMMU_INITIALIZED; 3171 break; 3172 case IOMMU_INITIALIZED: 3173 /* Nothing to do */ 3174 break; 3175 case IOMMU_NOT_FOUND: 3176 case IOMMU_INIT_ERROR: 3177 case IOMMU_CMDLINE_DISABLED: 3178 /* Error states => do nothing */ 3179 ret = -EINVAL; 3180 break; 3181 default: 3182 /* Unknown state */ 3183 BUG(); 3184 } 3185 3186 if (ret) { 3187 free_dma_resources(); 3188 if (!irq_remapping_enabled) { 3189 disable_iommus(); 3190 free_iommu_resources(); 3191 } else { 3192 struct amd_iommu *iommu; 3193 struct amd_iommu_pci_seg *pci_seg; 3194 3195 for_each_pci_segment(pci_seg) 3196 uninit_device_table_dma(pci_seg); 3197 3198 for_each_iommu(iommu) 3199 iommu_flush_all_caches(iommu); 3200 } 3201 } 3202 return ret; 3203 } 3204 3205 static int __init iommu_go_to_state(enum iommu_init_state state) 3206 { 3207 int ret = -EINVAL; 3208 3209 while (init_state != state) { 3210 if (init_state == IOMMU_NOT_FOUND || 3211 init_state == IOMMU_INIT_ERROR || 3212 init_state == IOMMU_CMDLINE_DISABLED) 3213 break; 3214 ret = state_next(); 3215 } 3216 3217 return ret; 3218 } 3219 3220 #ifdef CONFIG_IRQ_REMAP 3221 int __init amd_iommu_prepare(void) 3222 { 3223 int ret; 3224 3225 amd_iommu_irq_remap = true; 3226 3227 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); 3228 if (ret) { 3229 amd_iommu_irq_remap = false; 3230 return ret; 3231 } 3232 3233 return amd_iommu_irq_remap ? 0 : -ENODEV; 3234 } 3235 3236 int __init amd_iommu_enable(void) 3237 { 3238 int ret; 3239 3240 ret = iommu_go_to_state(IOMMU_ENABLED); 3241 if (ret) 3242 return ret; 3243 3244 irq_remapping_enabled = 1; 3245 return amd_iommu_xt_mode; 3246 } 3247 3248 void amd_iommu_disable(void) 3249 { 3250 amd_iommu_suspend(); 3251 } 3252 3253 int amd_iommu_reenable(int mode) 3254 { 3255 amd_iommu_resume(); 3256 3257 return 0; 3258 } 3259 3260 int __init amd_iommu_enable_faulting(void) 3261 { 3262 /* We enable MSI later when PCI is initialized */ 3263 return 0; 3264 } 3265 #endif 3266 3267 /* 3268 * This is the core init function for AMD IOMMU hardware in the system. 3269 * This function is called from the generic x86 DMA layer initialization 3270 * code. 3271 */ 3272 static int __init amd_iommu_init(void) 3273 { 3274 struct amd_iommu *iommu; 3275 int ret; 3276 3277 ret = iommu_go_to_state(IOMMU_INITIALIZED); 3278 #ifdef CONFIG_GART_IOMMU 3279 if (ret && list_empty(&amd_iommu_list)) { 3280 /* 3281 * We failed to initialize the AMD IOMMU - try fallback 3282 * to GART if possible. 3283 */ 3284 gart_iommu_init(); 3285 } 3286 #endif 3287 3288 for_each_iommu(iommu) 3289 amd_iommu_debugfs_setup(iommu); 3290 3291 return ret; 3292 } 3293 3294 static bool amd_iommu_sme_check(void) 3295 { 3296 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) || 3297 (boot_cpu_data.x86 != 0x17)) 3298 return true; 3299 3300 /* For Fam17h, a specific level of support is required */ 3301 if (boot_cpu_data.microcode >= 0x08001205) 3302 return true; 3303 3304 if ((boot_cpu_data.microcode >= 0x08001126) && 3305 (boot_cpu_data.microcode <= 0x080011ff)) 3306 return true; 3307 3308 pr_notice("IOMMU not currently supported when SME is active\n"); 3309 3310 return false; 3311 } 3312 3313 /**************************************************************************** 3314 * 3315 * Early detect code. This code runs at IOMMU detection time in the DMA 3316 * layer. It just looks if there is an IVRS ACPI table to detect AMD 3317 * IOMMUs 3318 * 3319 ****************************************************************************/ 3320 int __init amd_iommu_detect(void) 3321 { 3322 int ret; 3323 3324 if (no_iommu || (iommu_detected && !gart_iommu_aperture)) 3325 return -ENODEV; 3326 3327 if (!amd_iommu_sme_check()) 3328 return -ENODEV; 3329 3330 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); 3331 if (ret) 3332 return ret; 3333 3334 amd_iommu_detected = true; 3335 iommu_detected = 1; 3336 x86_init.iommu.iommu_init = amd_iommu_init; 3337 3338 return 1; 3339 } 3340 3341 /**************************************************************************** 3342 * 3343 * Parsing functions for the AMD IOMMU specific kernel command line 3344 * options. 3345 * 3346 ****************************************************************************/ 3347 3348 static int __init parse_amd_iommu_dump(char *str) 3349 { 3350 amd_iommu_dump = true; 3351 3352 return 1; 3353 } 3354 3355 static int __init parse_amd_iommu_intr(char *str) 3356 { 3357 for (; *str; ++str) { 3358 if (strncmp(str, "legacy", 6) == 0) { 3359 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 3360 break; 3361 } 3362 if (strncmp(str, "vapic", 5) == 0) { 3363 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 3364 break; 3365 } 3366 } 3367 return 1; 3368 } 3369 3370 static int __init parse_amd_iommu_options(char *str) 3371 { 3372 for (; *str; ++str) { 3373 if (strncmp(str, "fullflush", 9) == 0) { 3374 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n"); 3375 iommu_set_dma_strict(); 3376 } 3377 if (strncmp(str, "force_enable", 12) == 0) 3378 amd_iommu_force_enable = true; 3379 if (strncmp(str, "off", 3) == 0) 3380 amd_iommu_disabled = true; 3381 if (strncmp(str, "force_isolation", 15) == 0) 3382 amd_iommu_force_isolation = true; 3383 } 3384 3385 return 1; 3386 } 3387 3388 static int __init parse_ivrs_ioapic(char *str) 3389 { 3390 u32 seg = 0, bus, dev, fn; 3391 int ret, id, i; 3392 u32 devid; 3393 3394 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 3395 if (ret != 4) { 3396 ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn); 3397 if (ret != 5) { 3398 pr_err("Invalid command line: ivrs_ioapic%s\n", str); 3399 return 1; 3400 } 3401 } 3402 3403 if (early_ioapic_map_size == EARLY_MAP_SIZE) { 3404 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", 3405 str); 3406 return 1; 3407 } 3408 3409 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); 3410 3411 cmdline_maps = true; 3412 i = early_ioapic_map_size++; 3413 early_ioapic_map[i].id = id; 3414 early_ioapic_map[i].devid = devid; 3415 early_ioapic_map[i].cmd_line = true; 3416 3417 return 1; 3418 } 3419 3420 static int __init parse_ivrs_hpet(char *str) 3421 { 3422 u32 seg = 0, bus, dev, fn; 3423 int ret, id, i; 3424 u32 devid; 3425 3426 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 3427 if (ret != 4) { 3428 ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn); 3429 if (ret != 5) { 3430 pr_err("Invalid command line: ivrs_hpet%s\n", str); 3431 return 1; 3432 } 3433 } 3434 3435 if (early_hpet_map_size == EARLY_MAP_SIZE) { 3436 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", 3437 str); 3438 return 1; 3439 } 3440 3441 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); 3442 3443 cmdline_maps = true; 3444 i = early_hpet_map_size++; 3445 early_hpet_map[i].id = id; 3446 early_hpet_map[i].devid = devid; 3447 early_hpet_map[i].cmd_line = true; 3448 3449 return 1; 3450 } 3451 3452 static int __init parse_ivrs_acpihid(char *str) 3453 { 3454 u32 seg = 0, bus, dev, fn; 3455 char *hid, *uid, *p; 3456 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0}; 3457 int ret, i; 3458 3459 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid); 3460 if (ret != 4) { 3461 ret = sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid); 3462 if (ret != 5) { 3463 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str); 3464 return 1; 3465 } 3466 } 3467 3468 p = acpiid; 3469 hid = strsep(&p, ":"); 3470 uid = p; 3471 3472 if (!hid || !(*hid) || !uid) { 3473 pr_err("Invalid command line: hid or uid\n"); 3474 return 1; 3475 } 3476 3477 i = early_acpihid_map_size++; 3478 memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); 3479 memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); 3480 early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); 3481 early_acpihid_map[i].cmd_line = true; 3482 3483 return 1; 3484 } 3485 3486 __setup("amd_iommu_dump", parse_amd_iommu_dump); 3487 __setup("amd_iommu=", parse_amd_iommu_options); 3488 __setup("amd_iommu_intr=", parse_amd_iommu_intr); 3489 __setup("ivrs_ioapic", parse_ivrs_ioapic); 3490 __setup("ivrs_hpet", parse_ivrs_hpet); 3491 __setup("ivrs_acpihid", parse_ivrs_acpihid); 3492 3493 bool amd_iommu_v2_supported(void) 3494 { 3495 /* 3496 * Since DTE[Mode]=0 is prohibited on SNP-enabled system 3497 * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without 3498 * setting up IOMMUv1 page table. 3499 */ 3500 return amd_iommu_v2_present && !amd_iommu_snp_en; 3501 } 3502 EXPORT_SYMBOL(amd_iommu_v2_supported); 3503 3504 struct amd_iommu *get_amd_iommu(unsigned int idx) 3505 { 3506 unsigned int i = 0; 3507 struct amd_iommu *iommu; 3508 3509 for_each_iommu(iommu) 3510 if (i++ == idx) 3511 return iommu; 3512 return NULL; 3513 } 3514 3515 /**************************************************************************** 3516 * 3517 * IOMMU EFR Performance Counter support functionality. This code allows 3518 * access to the IOMMU PC functionality. 3519 * 3520 ****************************************************************************/ 3521 3522 u8 amd_iommu_pc_get_max_banks(unsigned int idx) 3523 { 3524 struct amd_iommu *iommu = get_amd_iommu(idx); 3525 3526 if (iommu) 3527 return iommu->max_banks; 3528 3529 return 0; 3530 } 3531 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); 3532 3533 bool amd_iommu_pc_supported(void) 3534 { 3535 return amd_iommu_pc_present; 3536 } 3537 EXPORT_SYMBOL(amd_iommu_pc_supported); 3538 3539 u8 amd_iommu_pc_get_max_counters(unsigned int idx) 3540 { 3541 struct amd_iommu *iommu = get_amd_iommu(idx); 3542 3543 if (iommu) 3544 return iommu->max_counters; 3545 3546 return 0; 3547 } 3548 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); 3549 3550 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 3551 u8 fxn, u64 *value, bool is_write) 3552 { 3553 u32 offset; 3554 u32 max_offset_lim; 3555 3556 /* Make sure the IOMMU PC resource is available */ 3557 if (!amd_iommu_pc_present) 3558 return -ENODEV; 3559 3560 /* Check for valid iommu and pc register indexing */ 3561 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) 3562 return -ENODEV; 3563 3564 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn); 3565 3566 /* Limit the offset to the hw defined mmio region aperture */ 3567 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | 3568 (iommu->max_counters << 8) | 0x28); 3569 if ((offset < MMIO_CNTR_REG_OFFSET) || 3570 (offset > max_offset_lim)) 3571 return -EINVAL; 3572 3573 if (is_write) { 3574 u64 val = *value & GENMASK_ULL(47, 0); 3575 3576 writel((u32)val, iommu->mmio_base + offset); 3577 writel((val >> 32), iommu->mmio_base + offset + 4); 3578 } else { 3579 *value = readl(iommu->mmio_base + offset + 4); 3580 *value <<= 32; 3581 *value |= readl(iommu->mmio_base + offset); 3582 *value &= GENMASK_ULL(47, 0); 3583 } 3584 3585 return 0; 3586 } 3587 3588 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3589 { 3590 if (!iommu) 3591 return -EINVAL; 3592 3593 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); 3594 } 3595 3596 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3597 { 3598 if (!iommu) 3599 return -EINVAL; 3600 3601 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); 3602 } 3603 3604 #ifdef CONFIG_AMD_MEM_ENCRYPT 3605 int amd_iommu_snp_enable(void) 3606 { 3607 /* 3608 * The SNP support requires that IOMMU must be enabled, and is 3609 * not configured in the passthrough mode. 3610 */ 3611 if (no_iommu || iommu_default_passthrough()) { 3612 pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported"); 3613 return -EINVAL; 3614 } 3615 3616 /* 3617 * Prevent enabling SNP after IOMMU_ENABLED state because this process 3618 * affect how IOMMU driver sets up data structures and configures 3619 * IOMMU hardware. 3620 */ 3621 if (init_state > IOMMU_ENABLED) { 3622 pr_err("SNP: Too late to enable SNP for IOMMU.\n"); 3623 return -EINVAL; 3624 } 3625 3626 amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP); 3627 if (!amd_iommu_snp_en) 3628 return -EINVAL; 3629 3630 pr_info("SNP enabled\n"); 3631 3632 /* Enforce IOMMU v1 pagetable when SNP is enabled. */ 3633 if (amd_iommu_pgtable != AMD_IOMMU_V1) { 3634 pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n"); 3635 amd_iommu_pgtable = AMD_IOMMU_V1; 3636 } 3637 3638 return 0; 3639 } 3640 #endif 3641