1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "AMD-Vi: " fmt 9 #define dev_fmt(fmt) pr_fmt(fmt) 10 11 #include <linux/pci.h> 12 #include <linux/acpi.h> 13 #include <linux/list.h> 14 #include <linux/bitmap.h> 15 #include <linux/slab.h> 16 #include <linux/syscore_ops.h> 17 #include <linux/interrupt.h> 18 #include <linux/msi.h> 19 #include <linux/amd-iommu.h> 20 #include <linux/export.h> 21 #include <linux/kmemleak.h> 22 #include <linux/mem_encrypt.h> 23 #include <asm/pci-direct.h> 24 #include <asm/iommu.h> 25 #include <asm/apic.h> 26 #include <asm/msidef.h> 27 #include <asm/gart.h> 28 #include <asm/x86_init.h> 29 #include <asm/iommu_table.h> 30 #include <asm/io_apic.h> 31 #include <asm/irq_remapping.h> 32 33 #include <linux/crash_dump.h> 34 35 #include "amd_iommu.h" 36 #include "../irq_remapping.h" 37 38 /* 39 * definitions for the ACPI scanning code 40 */ 41 #define IVRS_HEADER_LENGTH 48 42 43 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40 44 #define ACPI_IVMD_TYPE_ALL 0x20 45 #define ACPI_IVMD_TYPE 0x21 46 #define ACPI_IVMD_TYPE_RANGE 0x22 47 48 #define IVHD_DEV_ALL 0x01 49 #define IVHD_DEV_SELECT 0x02 50 #define IVHD_DEV_SELECT_RANGE_START 0x03 51 #define IVHD_DEV_RANGE_END 0x04 52 #define IVHD_DEV_ALIAS 0x42 53 #define IVHD_DEV_ALIAS_RANGE 0x43 54 #define IVHD_DEV_EXT_SELECT 0x46 55 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 56 #define IVHD_DEV_SPECIAL 0x48 57 #define IVHD_DEV_ACPI_HID 0xf0 58 59 #define UID_NOT_PRESENT 0 60 #define UID_IS_INTEGER 1 61 #define UID_IS_CHARACTER 2 62 63 #define IVHD_SPECIAL_IOAPIC 1 64 #define IVHD_SPECIAL_HPET 2 65 66 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 67 #define IVHD_FLAG_PASSPW_EN_MASK 0x02 68 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 69 #define IVHD_FLAG_ISOC_EN_MASK 0x08 70 71 #define IVMD_FLAG_EXCL_RANGE 0x08 72 #define IVMD_FLAG_IW 0x04 73 #define IVMD_FLAG_IR 0x02 74 #define IVMD_FLAG_UNITY_MAP 0x01 75 76 #define ACPI_DEVFLAG_INITPASS 0x01 77 #define ACPI_DEVFLAG_EXTINT 0x02 78 #define ACPI_DEVFLAG_NMI 0x04 79 #define ACPI_DEVFLAG_SYSMGT1 0x10 80 #define ACPI_DEVFLAG_SYSMGT2 0x20 81 #define ACPI_DEVFLAG_LINT0 0x40 82 #define ACPI_DEVFLAG_LINT1 0x80 83 #define ACPI_DEVFLAG_ATSDIS 0x10000000 84 85 #define LOOP_TIMEOUT 100000 86 /* 87 * ACPI table definitions 88 * 89 * These data structures are laid over the table to parse the important values 90 * out of it. 91 */ 92 93 extern const struct iommu_ops amd_iommu_ops; 94 95 /* 96 * structure describing one IOMMU in the ACPI table. Typically followed by one 97 * or more ivhd_entrys. 98 */ 99 struct ivhd_header { 100 u8 type; 101 u8 flags; 102 u16 length; 103 u16 devid; 104 u16 cap_ptr; 105 u64 mmio_phys; 106 u16 pci_seg; 107 u16 info; 108 u32 efr_attr; 109 110 /* Following only valid on IVHD type 11h and 40h */ 111 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */ 112 u64 res; 113 } __attribute__((packed)); 114 115 /* 116 * A device entry describing which devices a specific IOMMU translates and 117 * which requestor ids they use. 118 */ 119 struct ivhd_entry { 120 u8 type; 121 u16 devid; 122 u8 flags; 123 u32 ext; 124 u32 hidh; 125 u64 cid; 126 u8 uidf; 127 u8 uidl; 128 u8 uid; 129 } __attribute__((packed)); 130 131 /* 132 * An AMD IOMMU memory definition structure. It defines things like exclusion 133 * ranges for devices and regions that should be unity mapped. 134 */ 135 struct ivmd_header { 136 u8 type; 137 u8 flags; 138 u16 length; 139 u16 devid; 140 u16 aux; 141 u64 resv; 142 u64 range_start; 143 u64 range_length; 144 } __attribute__((packed)); 145 146 bool amd_iommu_dump; 147 bool amd_iommu_irq_remap __read_mostly; 148 149 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 150 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; 151 152 static bool amd_iommu_detected; 153 static bool __initdata amd_iommu_disabled; 154 static int amd_iommu_target_ivhd_type; 155 156 u16 amd_iommu_last_bdf; /* largest PCI device id we have 157 to handle */ 158 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 159 we find in ACPI */ 160 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 161 162 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 163 system */ 164 165 /* Array to assign indices to IOMMUs*/ 166 struct amd_iommu *amd_iommus[MAX_IOMMUS]; 167 168 /* Number of IOMMUs present in the system */ 169 static int amd_iommus_present; 170 171 /* IOMMUs have a non-present cache? */ 172 bool amd_iommu_np_cache __read_mostly; 173 bool amd_iommu_iotlb_sup __read_mostly = true; 174 175 u32 amd_iommu_max_pasid __read_mostly = ~0; 176 177 bool amd_iommu_v2_present __read_mostly; 178 static bool amd_iommu_pc_present __read_mostly; 179 180 bool amd_iommu_force_isolation __read_mostly; 181 182 /* 183 * Pointer to the device table which is shared by all AMD IOMMUs 184 * it is indexed by the PCI device id or the HT unit id and contains 185 * information about the domain the device belongs to as well as the 186 * page table root pointer. 187 */ 188 struct dev_table_entry *amd_iommu_dev_table; 189 /* 190 * Pointer to a device table which the content of old device table 191 * will be copied to. It's only be used in kdump kernel. 192 */ 193 static struct dev_table_entry *old_dev_tbl_cpy; 194 195 /* 196 * The alias table is a driver specific data structure which contains the 197 * mappings of the PCI device ids to the actual requestor ids on the IOMMU. 198 * More than one device can share the same requestor id. 199 */ 200 u16 *amd_iommu_alias_table; 201 202 /* 203 * The rlookup table is used to find the IOMMU which is responsible 204 * for a specific device. It is also indexed by the PCI device id. 205 */ 206 struct amd_iommu **amd_iommu_rlookup_table; 207 EXPORT_SYMBOL(amd_iommu_rlookup_table); 208 209 /* 210 * This table is used to find the irq remapping table for a given device id 211 * quickly. 212 */ 213 struct irq_remap_table **irq_lookup_table; 214 215 /* 216 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap 217 * to know which ones are already in use. 218 */ 219 unsigned long *amd_iommu_pd_alloc_bitmap; 220 221 static u32 dev_table_size; /* size of the device table */ 222 static u32 alias_table_size; /* size of the alias table */ 223 static u32 rlookup_table_size; /* size if the rlookup table */ 224 225 enum iommu_init_state { 226 IOMMU_START_STATE, 227 IOMMU_IVRS_DETECTED, 228 IOMMU_ACPI_FINISHED, 229 IOMMU_ENABLED, 230 IOMMU_PCI_INIT, 231 IOMMU_INTERRUPTS_EN, 232 IOMMU_DMA_OPS, 233 IOMMU_INITIALIZED, 234 IOMMU_NOT_FOUND, 235 IOMMU_INIT_ERROR, 236 IOMMU_CMDLINE_DISABLED, 237 }; 238 239 /* Early ioapic and hpet maps from kernel command line */ 240 #define EARLY_MAP_SIZE 4 241 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; 242 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; 243 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE]; 244 245 static int __initdata early_ioapic_map_size; 246 static int __initdata early_hpet_map_size; 247 static int __initdata early_acpihid_map_size; 248 249 static bool __initdata cmdline_maps; 250 251 static enum iommu_init_state init_state = IOMMU_START_STATE; 252 253 static int amd_iommu_enable_interrupts(void); 254 static int __init iommu_go_to_state(enum iommu_init_state state); 255 static void init_device_table_dma(void); 256 257 static bool amd_iommu_pre_enabled = true; 258 259 bool translation_pre_enabled(struct amd_iommu *iommu) 260 { 261 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); 262 } 263 EXPORT_SYMBOL(translation_pre_enabled); 264 265 static void clear_translation_pre_enabled(struct amd_iommu *iommu) 266 { 267 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 268 } 269 270 static void init_translation_status(struct amd_iommu *iommu) 271 { 272 u64 ctrl; 273 274 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 275 if (ctrl & (1<<CONTROL_IOMMU_EN)) 276 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 277 } 278 279 static inline void update_last_devid(u16 devid) 280 { 281 if (devid > amd_iommu_last_bdf) 282 amd_iommu_last_bdf = devid; 283 } 284 285 static inline unsigned long tbl_size(int entry_size) 286 { 287 unsigned shift = PAGE_SHIFT + 288 get_order(((int)amd_iommu_last_bdf + 1) * entry_size); 289 290 return 1UL << shift; 291 } 292 293 int amd_iommu_get_num_iommus(void) 294 { 295 return amd_iommus_present; 296 } 297 298 /* Access to l1 and l2 indexed register spaces */ 299 300 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) 301 { 302 u32 val; 303 304 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 305 pci_read_config_dword(iommu->dev, 0xfc, &val); 306 return val; 307 } 308 309 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) 310 { 311 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); 312 pci_write_config_dword(iommu->dev, 0xfc, val); 313 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 314 } 315 316 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) 317 { 318 u32 val; 319 320 pci_write_config_dword(iommu->dev, 0xf0, address); 321 pci_read_config_dword(iommu->dev, 0xf4, &val); 322 return val; 323 } 324 325 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) 326 { 327 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); 328 pci_write_config_dword(iommu->dev, 0xf4, val); 329 } 330 331 /**************************************************************************** 332 * 333 * AMD IOMMU MMIO register space handling functions 334 * 335 * These functions are used to program the IOMMU device registers in 336 * MMIO space required for that driver. 337 * 338 ****************************************************************************/ 339 340 /* 341 * This function set the exclusion range in the IOMMU. DMA accesses to the 342 * exclusion range are passed through untranslated 343 */ 344 static void iommu_set_exclusion_range(struct amd_iommu *iommu) 345 { 346 u64 start = iommu->exclusion_start & PAGE_MASK; 347 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; 348 u64 entry; 349 350 if (!iommu->exclusion_start) 351 return; 352 353 entry = start | MMIO_EXCL_ENABLE_MASK; 354 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 355 &entry, sizeof(entry)); 356 357 entry = limit; 358 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 359 &entry, sizeof(entry)); 360 } 361 362 /* Programs the physical address of the device table into the IOMMU hardware */ 363 static void iommu_set_device_table(struct amd_iommu *iommu) 364 { 365 u64 entry; 366 367 BUG_ON(iommu->mmio_base == NULL); 368 369 entry = iommu_virt_to_phys(amd_iommu_dev_table); 370 entry |= (dev_table_size >> 12) - 1; 371 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, 372 &entry, sizeof(entry)); 373 } 374 375 /* Generic functions to enable/disable certain features of the IOMMU. */ 376 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) 377 { 378 u64 ctrl; 379 380 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 381 ctrl |= (1ULL << bit); 382 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 383 } 384 385 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) 386 { 387 u64 ctrl; 388 389 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 390 ctrl &= ~(1ULL << bit); 391 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 392 } 393 394 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) 395 { 396 u64 ctrl; 397 398 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 399 ctrl &= ~CTRL_INV_TO_MASK; 400 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; 401 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 402 } 403 404 /* Function to enable the hardware */ 405 static void iommu_enable(struct amd_iommu *iommu) 406 { 407 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 408 } 409 410 static void iommu_disable(struct amd_iommu *iommu) 411 { 412 if (!iommu->mmio_base) 413 return; 414 415 /* Disable command buffer */ 416 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 417 418 /* Disable event logging and event interrupts */ 419 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); 420 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 421 422 /* Disable IOMMU GA_LOG */ 423 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 424 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 425 426 /* Disable IOMMU hardware itself */ 427 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); 428 } 429 430 /* 431 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 432 * the system has one. 433 */ 434 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) 435 { 436 if (!request_mem_region(address, end, "amd_iommu")) { 437 pr_err("Can not reserve memory region %llx-%llx for mmio\n", 438 address, end); 439 pr_err("This is a BIOS bug. Please contact your hardware vendor\n"); 440 return NULL; 441 } 442 443 return (u8 __iomem *)ioremap(address, end); 444 } 445 446 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) 447 { 448 if (iommu->mmio_base) 449 iounmap(iommu->mmio_base); 450 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); 451 } 452 453 static inline u32 get_ivhd_header_size(struct ivhd_header *h) 454 { 455 u32 size = 0; 456 457 switch (h->type) { 458 case 0x10: 459 size = 24; 460 break; 461 case 0x11: 462 case 0x40: 463 size = 40; 464 break; 465 } 466 return size; 467 } 468 469 /**************************************************************************** 470 * 471 * The functions below belong to the first pass of AMD IOMMU ACPI table 472 * parsing. In this pass we try to find out the highest device id this 473 * code has to handle. Upon this information the size of the shared data 474 * structures is determined later. 475 * 476 ****************************************************************************/ 477 478 /* 479 * This function calculates the length of a given IVHD entry 480 */ 481 static inline int ivhd_entry_length(u8 *ivhd) 482 { 483 u32 type = ((struct ivhd_entry *)ivhd)->type; 484 485 if (type < 0x80) { 486 return 0x04 << (*ivhd >> 6); 487 } else if (type == IVHD_DEV_ACPI_HID) { 488 /* For ACPI_HID, offset 21 is uid len */ 489 return *((u8 *)ivhd + 21) + 22; 490 } 491 return 0; 492 } 493 494 /* 495 * After reading the highest device id from the IOMMU PCI capability header 496 * this function looks if there is a higher device id defined in the ACPI table 497 */ 498 static int __init find_last_devid_from_ivhd(struct ivhd_header *h) 499 { 500 u8 *p = (void *)h, *end = (void *)h; 501 struct ivhd_entry *dev; 502 503 u32 ivhd_size = get_ivhd_header_size(h); 504 505 if (!ivhd_size) { 506 pr_err("Unsupported IVHD type %#x\n", h->type); 507 return -EINVAL; 508 } 509 510 p += ivhd_size; 511 end += h->length; 512 513 while (p < end) { 514 dev = (struct ivhd_entry *)p; 515 switch (dev->type) { 516 case IVHD_DEV_ALL: 517 /* Use maximum BDF value for DEV_ALL */ 518 update_last_devid(0xffff); 519 break; 520 case IVHD_DEV_SELECT: 521 case IVHD_DEV_RANGE_END: 522 case IVHD_DEV_ALIAS: 523 case IVHD_DEV_EXT_SELECT: 524 /* all the above subfield types refer to device ids */ 525 update_last_devid(dev->devid); 526 break; 527 default: 528 break; 529 } 530 p += ivhd_entry_length(p); 531 } 532 533 WARN_ON(p != end); 534 535 return 0; 536 } 537 538 static int __init check_ivrs_checksum(struct acpi_table_header *table) 539 { 540 int i; 541 u8 checksum = 0, *p = (u8 *)table; 542 543 for (i = 0; i < table->length; ++i) 544 checksum += p[i]; 545 if (checksum != 0) { 546 /* ACPI table corrupt */ 547 pr_err(FW_BUG "IVRS invalid checksum\n"); 548 return -ENODEV; 549 } 550 551 return 0; 552 } 553 554 /* 555 * Iterate over all IVHD entries in the ACPI table and find the highest device 556 * id which we need to handle. This is the first of three functions which parse 557 * the ACPI table. So we check the checksum here. 558 */ 559 static int __init find_last_devid_acpi(struct acpi_table_header *table) 560 { 561 u8 *p = (u8 *)table, *end = (u8 *)table; 562 struct ivhd_header *h; 563 564 p += IVRS_HEADER_LENGTH; 565 566 end += table->length; 567 while (p < end) { 568 h = (struct ivhd_header *)p; 569 if (h->type == amd_iommu_target_ivhd_type) { 570 int ret = find_last_devid_from_ivhd(h); 571 572 if (ret) 573 return ret; 574 } 575 p += h->length; 576 } 577 WARN_ON(p != end); 578 579 return 0; 580 } 581 582 /**************************************************************************** 583 * 584 * The following functions belong to the code path which parses the ACPI table 585 * the second time. In this ACPI parsing iteration we allocate IOMMU specific 586 * data structures, initialize the device/alias/rlookup table and also 587 * basically initialize the hardware. 588 * 589 ****************************************************************************/ 590 591 /* 592 * Allocates the command buffer. This buffer is per AMD IOMMU. We can 593 * write commands to that buffer later and the IOMMU will execute them 594 * asynchronously 595 */ 596 static int __init alloc_command_buffer(struct amd_iommu *iommu) 597 { 598 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 599 get_order(CMD_BUFFER_SIZE)); 600 601 return iommu->cmd_buf ? 0 : -ENOMEM; 602 } 603 604 /* 605 * This function resets the command buffer if the IOMMU stopped fetching 606 * commands from it. 607 */ 608 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) 609 { 610 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 611 612 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 613 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 614 iommu->cmd_buf_head = 0; 615 iommu->cmd_buf_tail = 0; 616 617 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 618 } 619 620 /* 621 * This function writes the command buffer address to the hardware and 622 * enables it. 623 */ 624 static void iommu_enable_command_buffer(struct amd_iommu *iommu) 625 { 626 u64 entry; 627 628 BUG_ON(iommu->cmd_buf == NULL); 629 630 entry = iommu_virt_to_phys(iommu->cmd_buf); 631 entry |= MMIO_CMD_SIZE_512; 632 633 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 634 &entry, sizeof(entry)); 635 636 amd_iommu_reset_cmd_buffer(iommu); 637 } 638 639 /* 640 * This function disables the command buffer 641 */ 642 static void iommu_disable_command_buffer(struct amd_iommu *iommu) 643 { 644 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 645 } 646 647 static void __init free_command_buffer(struct amd_iommu *iommu) 648 { 649 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); 650 } 651 652 /* allocates the memory where the IOMMU will log its events to */ 653 static int __init alloc_event_buffer(struct amd_iommu *iommu) 654 { 655 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 656 get_order(EVT_BUFFER_SIZE)); 657 658 return iommu->evt_buf ? 0 : -ENOMEM; 659 } 660 661 static void iommu_enable_event_buffer(struct amd_iommu *iommu) 662 { 663 u64 entry; 664 665 BUG_ON(iommu->evt_buf == NULL); 666 667 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; 668 669 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, 670 &entry, sizeof(entry)); 671 672 /* set head and tail to zero manually */ 673 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 674 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 675 676 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 677 } 678 679 /* 680 * This function disables the event log buffer 681 */ 682 static void iommu_disable_event_buffer(struct amd_iommu *iommu) 683 { 684 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 685 } 686 687 static void __init free_event_buffer(struct amd_iommu *iommu) 688 { 689 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); 690 } 691 692 /* allocates the memory where the IOMMU will log its events to */ 693 static int __init alloc_ppr_log(struct amd_iommu *iommu) 694 { 695 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 696 get_order(PPR_LOG_SIZE)); 697 698 return iommu->ppr_log ? 0 : -ENOMEM; 699 } 700 701 static void iommu_enable_ppr_log(struct amd_iommu *iommu) 702 { 703 u64 entry; 704 705 if (iommu->ppr_log == NULL) 706 return; 707 708 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; 709 710 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, 711 &entry, sizeof(entry)); 712 713 /* set head and tail to zero manually */ 714 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 715 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 716 717 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); 718 iommu_feature_enable(iommu, CONTROL_PPR_EN); 719 } 720 721 static void __init free_ppr_log(struct amd_iommu *iommu) 722 { 723 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); 724 } 725 726 static void free_ga_log(struct amd_iommu *iommu) 727 { 728 #ifdef CONFIG_IRQ_REMAP 729 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE)); 730 free_pages((unsigned long)iommu->ga_log_tail, get_order(8)); 731 #endif 732 } 733 734 static int iommu_ga_log_enable(struct amd_iommu *iommu) 735 { 736 #ifdef CONFIG_IRQ_REMAP 737 u32 status, i; 738 739 if (!iommu->ga_log) 740 return -EINVAL; 741 742 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 743 744 /* Check if already running */ 745 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 746 return 0; 747 748 iommu_feature_enable(iommu, CONTROL_GAINT_EN); 749 iommu_feature_enable(iommu, CONTROL_GALOG_EN); 750 751 for (i = 0; i < LOOP_TIMEOUT; ++i) { 752 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 753 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 754 break; 755 } 756 757 if (i >= LOOP_TIMEOUT) 758 return -EINVAL; 759 #endif /* CONFIG_IRQ_REMAP */ 760 return 0; 761 } 762 763 #ifdef CONFIG_IRQ_REMAP 764 static int iommu_init_ga_log(struct amd_iommu *iommu) 765 { 766 u64 entry; 767 768 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 769 return 0; 770 771 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 772 get_order(GA_LOG_SIZE)); 773 if (!iommu->ga_log) 774 goto err_out; 775 776 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 777 get_order(8)); 778 if (!iommu->ga_log_tail) 779 goto err_out; 780 781 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; 782 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, 783 &entry, sizeof(entry)); 784 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & 785 (BIT_ULL(52)-1)) & ~7ULL; 786 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, 787 &entry, sizeof(entry)); 788 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 789 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); 790 791 return 0; 792 err_out: 793 free_ga_log(iommu); 794 return -EINVAL; 795 } 796 #endif /* CONFIG_IRQ_REMAP */ 797 798 static int iommu_init_ga(struct amd_iommu *iommu) 799 { 800 int ret = 0; 801 802 #ifdef CONFIG_IRQ_REMAP 803 /* Note: We have already checked GASup from IVRS table. 804 * Now, we need to make sure that GAMSup is set. 805 */ 806 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && 807 !iommu_feature(iommu, FEATURE_GAM_VAPIC)) 808 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 809 810 ret = iommu_init_ga_log(iommu); 811 #endif /* CONFIG_IRQ_REMAP */ 812 813 return ret; 814 } 815 816 static void iommu_enable_xt(struct amd_iommu *iommu) 817 { 818 #ifdef CONFIG_IRQ_REMAP 819 /* 820 * XT mode (32-bit APIC destination ID) requires 821 * GA mode (128-bit IRTE support) as a prerequisite. 822 */ 823 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) && 824 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 825 iommu_feature_enable(iommu, CONTROL_XT_EN); 826 #endif /* CONFIG_IRQ_REMAP */ 827 } 828 829 static void iommu_enable_gt(struct amd_iommu *iommu) 830 { 831 if (!iommu_feature(iommu, FEATURE_GT)) 832 return; 833 834 iommu_feature_enable(iommu, CONTROL_GT_EN); 835 } 836 837 /* sets a specific bit in the device table entry. */ 838 static void set_dev_entry_bit(u16 devid, u8 bit) 839 { 840 int i = (bit >> 6) & 0x03; 841 int _bit = bit & 0x3f; 842 843 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); 844 } 845 846 static int get_dev_entry_bit(u16 devid, u8 bit) 847 { 848 int i = (bit >> 6) & 0x03; 849 int _bit = bit & 0x3f; 850 851 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; 852 } 853 854 855 static bool copy_device_table(void) 856 { 857 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0; 858 struct dev_table_entry *old_devtb = NULL; 859 u32 lo, hi, devid, old_devtb_size; 860 phys_addr_t old_devtb_phys; 861 struct amd_iommu *iommu; 862 u16 dom_id, dte_v, irq_v; 863 gfp_t gfp_flag; 864 u64 tmp; 865 866 if (!amd_iommu_pre_enabled) 867 return false; 868 869 pr_warn("Translation is already enabled - trying to copy translation structures\n"); 870 for_each_iommu(iommu) { 871 /* All IOMMUs should use the same device table with the same size */ 872 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); 873 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); 874 entry = (((u64) hi) << 32) + lo; 875 if (last_entry && last_entry != entry) { 876 pr_err("IOMMU:%d should use the same dev table as others!\n", 877 iommu->index); 878 return false; 879 } 880 last_entry = entry; 881 882 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; 883 if (old_devtb_size != dev_table_size) { 884 pr_err("The device table size of IOMMU:%d is not expected!\n", 885 iommu->index); 886 return false; 887 } 888 } 889 890 /* 891 * When SME is enabled in the first kernel, the entry includes the 892 * memory encryption mask(sme_me_mask), we must remove the memory 893 * encryption mask to obtain the true physical address in kdump kernel. 894 */ 895 old_devtb_phys = __sme_clr(entry) & PAGE_MASK; 896 897 if (old_devtb_phys >= 0x100000000ULL) { 898 pr_err("The address of old device table is above 4G, not trustworthy!\n"); 899 return false; 900 } 901 old_devtb = (sme_active() && is_kdump_kernel()) 902 ? (__force void *)ioremap_encrypted(old_devtb_phys, 903 dev_table_size) 904 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); 905 906 if (!old_devtb) 907 return false; 908 909 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32; 910 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, 911 get_order(dev_table_size)); 912 if (old_dev_tbl_cpy == NULL) { 913 pr_err("Failed to allocate memory for copying old device table!\n"); 914 return false; 915 } 916 917 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 918 old_dev_tbl_cpy[devid] = old_devtb[devid]; 919 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; 920 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; 921 922 if (dte_v && dom_id) { 923 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; 924 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; 925 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap); 926 /* If gcr3 table existed, mask it out */ 927 if (old_devtb[devid].data[0] & DTE_FLAG_GV) { 928 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; 929 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; 930 old_dev_tbl_cpy[devid].data[1] &= ~tmp; 931 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A; 932 tmp |= DTE_FLAG_GV; 933 old_dev_tbl_cpy[devid].data[0] &= ~tmp; 934 } 935 } 936 937 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; 938 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; 939 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK; 940 if (irq_v && (int_ctl || int_tab_len)) { 941 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || 942 (int_tab_len != DTE_IRQ_TABLE_LEN)) { 943 pr_err("Wrong old irq remapping flag: %#x\n", devid); 944 return false; 945 } 946 947 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; 948 } 949 } 950 memunmap(old_devtb); 951 952 return true; 953 } 954 955 void amd_iommu_apply_erratum_63(u16 devid) 956 { 957 int sysmgt; 958 959 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | 960 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); 961 962 if (sysmgt == 0x01) 963 set_dev_entry_bit(devid, DEV_ENTRY_IW); 964 } 965 966 /* Writes the specific IOMMU for a device into the rlookup table */ 967 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) 968 { 969 amd_iommu_rlookup_table[devid] = iommu; 970 } 971 972 /* 973 * This function takes the device specific flags read from the ACPI 974 * table and sets up the device table entry with that information 975 */ 976 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, 977 u16 devid, u32 flags, u32 ext_flags) 978 { 979 if (flags & ACPI_DEVFLAG_INITPASS) 980 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); 981 if (flags & ACPI_DEVFLAG_EXTINT) 982 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); 983 if (flags & ACPI_DEVFLAG_NMI) 984 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); 985 if (flags & ACPI_DEVFLAG_SYSMGT1) 986 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); 987 if (flags & ACPI_DEVFLAG_SYSMGT2) 988 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); 989 if (flags & ACPI_DEVFLAG_LINT0) 990 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); 991 if (flags & ACPI_DEVFLAG_LINT1) 992 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); 993 994 amd_iommu_apply_erratum_63(devid); 995 996 set_iommu_for_device(iommu, devid); 997 } 998 999 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) 1000 { 1001 struct devid_map *entry; 1002 struct list_head *list; 1003 1004 if (type == IVHD_SPECIAL_IOAPIC) 1005 list = &ioapic_map; 1006 else if (type == IVHD_SPECIAL_HPET) 1007 list = &hpet_map; 1008 else 1009 return -EINVAL; 1010 1011 list_for_each_entry(entry, list, list) { 1012 if (!(entry->id == id && entry->cmd_line)) 1013 continue; 1014 1015 pr_info("Command-line override present for %s id %d - ignoring\n", 1016 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); 1017 1018 *devid = entry->devid; 1019 1020 return 0; 1021 } 1022 1023 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1024 if (!entry) 1025 return -ENOMEM; 1026 1027 entry->id = id; 1028 entry->devid = *devid; 1029 entry->cmd_line = cmd_line; 1030 1031 list_add_tail(&entry->list, list); 1032 1033 return 0; 1034 } 1035 1036 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid, 1037 bool cmd_line) 1038 { 1039 struct acpihid_map_entry *entry; 1040 struct list_head *list = &acpihid_map; 1041 1042 list_for_each_entry(entry, list, list) { 1043 if (strcmp(entry->hid, hid) || 1044 (*uid && *entry->uid && strcmp(entry->uid, uid)) || 1045 !entry->cmd_line) 1046 continue; 1047 1048 pr_info("Command-line override for hid:%s uid:%s\n", 1049 hid, uid); 1050 *devid = entry->devid; 1051 return 0; 1052 } 1053 1054 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1055 if (!entry) 1056 return -ENOMEM; 1057 1058 memcpy(entry->uid, uid, strlen(uid)); 1059 memcpy(entry->hid, hid, strlen(hid)); 1060 entry->devid = *devid; 1061 entry->cmd_line = cmd_line; 1062 entry->root_devid = (entry->devid & (~0x7)); 1063 1064 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n", 1065 entry->cmd_line ? "cmd" : "ivrs", 1066 entry->hid, entry->uid, entry->root_devid); 1067 1068 list_add_tail(&entry->list, list); 1069 return 0; 1070 } 1071 1072 static int __init add_early_maps(void) 1073 { 1074 int i, ret; 1075 1076 for (i = 0; i < early_ioapic_map_size; ++i) { 1077 ret = add_special_device(IVHD_SPECIAL_IOAPIC, 1078 early_ioapic_map[i].id, 1079 &early_ioapic_map[i].devid, 1080 early_ioapic_map[i].cmd_line); 1081 if (ret) 1082 return ret; 1083 } 1084 1085 for (i = 0; i < early_hpet_map_size; ++i) { 1086 ret = add_special_device(IVHD_SPECIAL_HPET, 1087 early_hpet_map[i].id, 1088 &early_hpet_map[i].devid, 1089 early_hpet_map[i].cmd_line); 1090 if (ret) 1091 return ret; 1092 } 1093 1094 for (i = 0; i < early_acpihid_map_size; ++i) { 1095 ret = add_acpi_hid_device(early_acpihid_map[i].hid, 1096 early_acpihid_map[i].uid, 1097 &early_acpihid_map[i].devid, 1098 early_acpihid_map[i].cmd_line); 1099 if (ret) 1100 return ret; 1101 } 1102 1103 return 0; 1104 } 1105 1106 /* 1107 * Reads the device exclusion range from ACPI and initializes the IOMMU with 1108 * it 1109 */ 1110 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) 1111 { 1112 if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) 1113 return; 1114 1115 /* 1116 * Treat per-device exclusion ranges as r/w unity-mapped regions 1117 * since some buggy BIOSes might lead to the overwritten exclusion 1118 * range (exclusion_start and exclusion_length members). This 1119 * happens when there are multiple exclusion ranges (IVMD entries) 1120 * defined in ACPI table. 1121 */ 1122 m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP); 1123 } 1124 1125 /* 1126 * Takes a pointer to an AMD IOMMU entry in the ACPI table and 1127 * initializes the hardware and our data structures with it. 1128 */ 1129 static int __init init_iommu_from_acpi(struct amd_iommu *iommu, 1130 struct ivhd_header *h) 1131 { 1132 u8 *p = (u8 *)h; 1133 u8 *end = p, flags = 0; 1134 u16 devid = 0, devid_start = 0, devid_to = 0; 1135 u32 dev_i, ext_flags = 0; 1136 bool alias = false; 1137 struct ivhd_entry *e; 1138 u32 ivhd_size; 1139 int ret; 1140 1141 1142 ret = add_early_maps(); 1143 if (ret) 1144 return ret; 1145 1146 amd_iommu_apply_ivrs_quirks(); 1147 1148 /* 1149 * First save the recommended feature enable bits from ACPI 1150 */ 1151 iommu->acpi_flags = h->flags; 1152 1153 /* 1154 * Done. Now parse the device entries 1155 */ 1156 ivhd_size = get_ivhd_header_size(h); 1157 if (!ivhd_size) { 1158 pr_err("Unsupported IVHD type %#x\n", h->type); 1159 return -EINVAL; 1160 } 1161 1162 p += ivhd_size; 1163 1164 end += h->length; 1165 1166 1167 while (p < end) { 1168 e = (struct ivhd_entry *)p; 1169 switch (e->type) { 1170 case IVHD_DEV_ALL: 1171 1172 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags); 1173 1174 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i) 1175 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); 1176 break; 1177 case IVHD_DEV_SELECT: 1178 1179 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " 1180 "flags: %02x\n", 1181 PCI_BUS_NUM(e->devid), 1182 PCI_SLOT(e->devid), 1183 PCI_FUNC(e->devid), 1184 e->flags); 1185 1186 devid = e->devid; 1187 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1188 break; 1189 case IVHD_DEV_SELECT_RANGE_START: 1190 1191 DUMP_printk(" DEV_SELECT_RANGE_START\t " 1192 "devid: %02x:%02x.%x flags: %02x\n", 1193 PCI_BUS_NUM(e->devid), 1194 PCI_SLOT(e->devid), 1195 PCI_FUNC(e->devid), 1196 e->flags); 1197 1198 devid_start = e->devid; 1199 flags = e->flags; 1200 ext_flags = 0; 1201 alias = false; 1202 break; 1203 case IVHD_DEV_ALIAS: 1204 1205 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " 1206 "flags: %02x devid_to: %02x:%02x.%x\n", 1207 PCI_BUS_NUM(e->devid), 1208 PCI_SLOT(e->devid), 1209 PCI_FUNC(e->devid), 1210 e->flags, 1211 PCI_BUS_NUM(e->ext >> 8), 1212 PCI_SLOT(e->ext >> 8), 1213 PCI_FUNC(e->ext >> 8)); 1214 1215 devid = e->devid; 1216 devid_to = e->ext >> 8; 1217 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); 1218 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); 1219 amd_iommu_alias_table[devid] = devid_to; 1220 break; 1221 case IVHD_DEV_ALIAS_RANGE: 1222 1223 DUMP_printk(" DEV_ALIAS_RANGE\t\t " 1224 "devid: %02x:%02x.%x flags: %02x " 1225 "devid_to: %02x:%02x.%x\n", 1226 PCI_BUS_NUM(e->devid), 1227 PCI_SLOT(e->devid), 1228 PCI_FUNC(e->devid), 1229 e->flags, 1230 PCI_BUS_NUM(e->ext >> 8), 1231 PCI_SLOT(e->ext >> 8), 1232 PCI_FUNC(e->ext >> 8)); 1233 1234 devid_start = e->devid; 1235 flags = e->flags; 1236 devid_to = e->ext >> 8; 1237 ext_flags = 0; 1238 alias = true; 1239 break; 1240 case IVHD_DEV_EXT_SELECT: 1241 1242 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " 1243 "flags: %02x ext: %08x\n", 1244 PCI_BUS_NUM(e->devid), 1245 PCI_SLOT(e->devid), 1246 PCI_FUNC(e->devid), 1247 e->flags, e->ext); 1248 1249 devid = e->devid; 1250 set_dev_entry_from_acpi(iommu, devid, e->flags, 1251 e->ext); 1252 break; 1253 case IVHD_DEV_EXT_SELECT_RANGE: 1254 1255 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " 1256 "%02x:%02x.%x flags: %02x ext: %08x\n", 1257 PCI_BUS_NUM(e->devid), 1258 PCI_SLOT(e->devid), 1259 PCI_FUNC(e->devid), 1260 e->flags, e->ext); 1261 1262 devid_start = e->devid; 1263 flags = e->flags; 1264 ext_flags = e->ext; 1265 alias = false; 1266 break; 1267 case IVHD_DEV_RANGE_END: 1268 1269 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", 1270 PCI_BUS_NUM(e->devid), 1271 PCI_SLOT(e->devid), 1272 PCI_FUNC(e->devid)); 1273 1274 devid = e->devid; 1275 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { 1276 if (alias) { 1277 amd_iommu_alias_table[dev_i] = devid_to; 1278 set_dev_entry_from_acpi(iommu, 1279 devid_to, flags, ext_flags); 1280 } 1281 set_dev_entry_from_acpi(iommu, dev_i, 1282 flags, ext_flags); 1283 } 1284 break; 1285 case IVHD_DEV_SPECIAL: { 1286 u8 handle, type; 1287 const char *var; 1288 u16 devid; 1289 int ret; 1290 1291 handle = e->ext & 0xff; 1292 devid = (e->ext >> 8) & 0xffff; 1293 type = (e->ext >> 24) & 0xff; 1294 1295 if (type == IVHD_SPECIAL_IOAPIC) 1296 var = "IOAPIC"; 1297 else if (type == IVHD_SPECIAL_HPET) 1298 var = "HPET"; 1299 else 1300 var = "UNKNOWN"; 1301 1302 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n", 1303 var, (int)handle, 1304 PCI_BUS_NUM(devid), 1305 PCI_SLOT(devid), 1306 PCI_FUNC(devid)); 1307 1308 ret = add_special_device(type, handle, &devid, false); 1309 if (ret) 1310 return ret; 1311 1312 /* 1313 * add_special_device might update the devid in case a 1314 * command-line override is present. So call 1315 * set_dev_entry_from_acpi after add_special_device. 1316 */ 1317 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1318 1319 break; 1320 } 1321 case IVHD_DEV_ACPI_HID: { 1322 u16 devid; 1323 u8 hid[ACPIHID_HID_LEN]; 1324 u8 uid[ACPIHID_UID_LEN]; 1325 int ret; 1326 1327 if (h->type != 0x40) { 1328 pr_err(FW_BUG "Invalid IVHD device type %#x\n", 1329 e->type); 1330 break; 1331 } 1332 1333 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1); 1334 hid[ACPIHID_HID_LEN - 1] = '\0'; 1335 1336 if (!(*hid)) { 1337 pr_err(FW_BUG "Invalid HID.\n"); 1338 break; 1339 } 1340 1341 uid[0] = '\0'; 1342 switch (e->uidf) { 1343 case UID_NOT_PRESENT: 1344 1345 if (e->uidl != 0) 1346 pr_warn(FW_BUG "Invalid UID length.\n"); 1347 1348 break; 1349 case UID_IS_INTEGER: 1350 1351 sprintf(uid, "%d", e->uid); 1352 1353 break; 1354 case UID_IS_CHARACTER: 1355 1356 memcpy(uid, &e->uid, e->uidl); 1357 uid[e->uidl] = '\0'; 1358 1359 break; 1360 default: 1361 break; 1362 } 1363 1364 devid = e->devid; 1365 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n", 1366 hid, uid, 1367 PCI_BUS_NUM(devid), 1368 PCI_SLOT(devid), 1369 PCI_FUNC(devid)); 1370 1371 flags = e->flags; 1372 1373 ret = add_acpi_hid_device(hid, uid, &devid, false); 1374 if (ret) 1375 return ret; 1376 1377 /* 1378 * add_special_device might update the devid in case a 1379 * command-line override is present. So call 1380 * set_dev_entry_from_acpi after add_special_device. 1381 */ 1382 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1383 1384 break; 1385 } 1386 default: 1387 break; 1388 } 1389 1390 p += ivhd_entry_length(p); 1391 } 1392 1393 return 0; 1394 } 1395 1396 static void __init free_iommu_one(struct amd_iommu *iommu) 1397 { 1398 free_command_buffer(iommu); 1399 free_event_buffer(iommu); 1400 free_ppr_log(iommu); 1401 free_ga_log(iommu); 1402 iommu_unmap_mmio_space(iommu); 1403 } 1404 1405 static void __init free_iommu_all(void) 1406 { 1407 struct amd_iommu *iommu, *next; 1408 1409 for_each_iommu_safe(iommu, next) { 1410 list_del(&iommu->list); 1411 free_iommu_one(iommu); 1412 kfree(iommu); 1413 } 1414 } 1415 1416 /* 1417 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) 1418 * Workaround: 1419 * BIOS should disable L2B micellaneous clock gating by setting 1420 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b 1421 */ 1422 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) 1423 { 1424 u32 value; 1425 1426 if ((boot_cpu_data.x86 != 0x15) || 1427 (boot_cpu_data.x86_model < 0x10) || 1428 (boot_cpu_data.x86_model > 0x1f)) 1429 return; 1430 1431 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1432 pci_read_config_dword(iommu->dev, 0xf4, &value); 1433 1434 if (value & BIT(2)) 1435 return; 1436 1437 /* Select NB indirect register 0x90 and enable writing */ 1438 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); 1439 1440 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); 1441 pci_info(iommu->dev, "Applying erratum 746 workaround\n"); 1442 1443 /* Clear the enable writing bit */ 1444 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1445 } 1446 1447 /* 1448 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission) 1449 * Workaround: 1450 * BIOS should enable ATS write permission check by setting 1451 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b 1452 */ 1453 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) 1454 { 1455 u32 value; 1456 1457 if ((boot_cpu_data.x86 != 0x15) || 1458 (boot_cpu_data.x86_model < 0x30) || 1459 (boot_cpu_data.x86_model > 0x3f)) 1460 return; 1461 1462 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */ 1463 value = iommu_read_l2(iommu, 0x47); 1464 1465 if (value & BIT(0)) 1466 return; 1467 1468 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */ 1469 iommu_write_l2(iommu, 0x47, value | BIT(0)); 1470 1471 pci_info(iommu->dev, "Applying ATS write check workaround\n"); 1472 } 1473 1474 /* 1475 * This function clues the initialization function for one IOMMU 1476 * together and also allocates the command buffer and programs the 1477 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1478 */ 1479 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) 1480 { 1481 int ret; 1482 1483 raw_spin_lock_init(&iommu->lock); 1484 1485 /* Add IOMMU to internal data structures */ 1486 list_add_tail(&iommu->list, &amd_iommu_list); 1487 iommu->index = amd_iommus_present++; 1488 1489 if (unlikely(iommu->index >= MAX_IOMMUS)) { 1490 WARN(1, "System has more IOMMUs than supported by this driver\n"); 1491 return -ENOSYS; 1492 } 1493 1494 /* Index is fine - add IOMMU to the array */ 1495 amd_iommus[iommu->index] = iommu; 1496 1497 /* 1498 * Copy data from ACPI table entry to the iommu struct 1499 */ 1500 iommu->devid = h->devid; 1501 iommu->cap_ptr = h->cap_ptr; 1502 iommu->pci_seg = h->pci_seg; 1503 iommu->mmio_phys = h->mmio_phys; 1504 1505 switch (h->type) { 1506 case 0x10: 1507 /* Check if IVHD EFR contains proper max banks/counters */ 1508 if ((h->efr_attr != 0) && 1509 ((h->efr_attr & (0xF << 13)) != 0) && 1510 ((h->efr_attr & (0x3F << 17)) != 0)) 1511 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1512 else 1513 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1514 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) 1515 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1516 break; 1517 case 0x11: 1518 case 0x40: 1519 if (h->efr_reg & (1 << 9)) 1520 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1521 else 1522 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1523 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) 1524 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1525 /* 1526 * Note: Since iommu_update_intcapxt() leverages 1527 * the IOMMU MMIO access to MSI capability block registers 1528 * for MSI address lo/hi/data, we need to check both 1529 * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support. 1530 */ 1531 if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) && 1532 (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT))) 1533 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; 1534 break; 1535 default: 1536 return -EINVAL; 1537 } 1538 1539 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, 1540 iommu->mmio_phys_end); 1541 if (!iommu->mmio_base) 1542 return -ENOMEM; 1543 1544 if (alloc_command_buffer(iommu)) 1545 return -ENOMEM; 1546 1547 if (alloc_event_buffer(iommu)) 1548 return -ENOMEM; 1549 1550 iommu->int_enabled = false; 1551 1552 init_translation_status(iommu); 1553 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { 1554 iommu_disable(iommu); 1555 clear_translation_pre_enabled(iommu); 1556 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", 1557 iommu->index); 1558 } 1559 if (amd_iommu_pre_enabled) 1560 amd_iommu_pre_enabled = translation_pre_enabled(iommu); 1561 1562 ret = init_iommu_from_acpi(iommu, h); 1563 if (ret) 1564 return ret; 1565 1566 ret = amd_iommu_create_irq_domain(iommu); 1567 if (ret) 1568 return ret; 1569 1570 /* 1571 * Make sure IOMMU is not considered to translate itself. The IVRS 1572 * table tells us so, but this is a lie! 1573 */ 1574 amd_iommu_rlookup_table[iommu->devid] = NULL; 1575 1576 return 0; 1577 } 1578 1579 /** 1580 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type 1581 * @ivrs Pointer to the IVRS header 1582 * 1583 * This function search through all IVDB of the maximum supported IVHD 1584 */ 1585 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs) 1586 { 1587 u8 *base = (u8 *)ivrs; 1588 struct ivhd_header *ivhd = (struct ivhd_header *) 1589 (base + IVRS_HEADER_LENGTH); 1590 u8 last_type = ivhd->type; 1591 u16 devid = ivhd->devid; 1592 1593 while (((u8 *)ivhd - base < ivrs->length) && 1594 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { 1595 u8 *p = (u8 *) ivhd; 1596 1597 if (ivhd->devid == devid) 1598 last_type = ivhd->type; 1599 ivhd = (struct ivhd_header *)(p + ivhd->length); 1600 } 1601 1602 return last_type; 1603 } 1604 1605 /* 1606 * Iterates over all IOMMU entries in the ACPI table, allocates the 1607 * IOMMU structure and initializes it with init_iommu_one() 1608 */ 1609 static int __init init_iommu_all(struct acpi_table_header *table) 1610 { 1611 u8 *p = (u8 *)table, *end = (u8 *)table; 1612 struct ivhd_header *h; 1613 struct amd_iommu *iommu; 1614 int ret; 1615 1616 end += table->length; 1617 p += IVRS_HEADER_LENGTH; 1618 1619 while (p < end) { 1620 h = (struct ivhd_header *)p; 1621 if (*p == amd_iommu_target_ivhd_type) { 1622 1623 DUMP_printk("device: %02x:%02x.%01x cap: %04x " 1624 "seg: %d flags: %01x info %04x\n", 1625 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), 1626 PCI_FUNC(h->devid), h->cap_ptr, 1627 h->pci_seg, h->flags, h->info); 1628 DUMP_printk(" mmio-addr: %016llx\n", 1629 h->mmio_phys); 1630 1631 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 1632 if (iommu == NULL) 1633 return -ENOMEM; 1634 1635 ret = init_iommu_one(iommu, h); 1636 if (ret) 1637 return ret; 1638 } 1639 p += h->length; 1640 1641 } 1642 WARN_ON(p != end); 1643 1644 return 0; 1645 } 1646 1647 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 1648 u8 fxn, u64 *value, bool is_write); 1649 1650 static void init_iommu_perf_ctr(struct amd_iommu *iommu) 1651 { 1652 struct pci_dev *pdev = iommu->dev; 1653 u64 val = 0xabcd, val2 = 0, save_reg = 0; 1654 1655 if (!iommu_feature(iommu, FEATURE_PC)) 1656 return; 1657 1658 amd_iommu_pc_present = true; 1659 1660 /* save the value to restore, if writable */ 1661 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false)) 1662 goto pc_false; 1663 1664 /* Check if the performance counters can be written to */ 1665 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || 1666 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || 1667 (val != val2)) 1668 goto pc_false; 1669 1670 /* restore */ 1671 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true)) 1672 goto pc_false; 1673 1674 pci_info(pdev, "IOMMU performance counters supported\n"); 1675 1676 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); 1677 iommu->max_banks = (u8) ((val >> 12) & 0x3f); 1678 iommu->max_counters = (u8) ((val >> 7) & 0xf); 1679 1680 return; 1681 1682 pc_false: 1683 pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n"); 1684 amd_iommu_pc_present = false; 1685 return; 1686 } 1687 1688 static ssize_t amd_iommu_show_cap(struct device *dev, 1689 struct device_attribute *attr, 1690 char *buf) 1691 { 1692 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1693 return sprintf(buf, "%x\n", iommu->cap); 1694 } 1695 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); 1696 1697 static ssize_t amd_iommu_show_features(struct device *dev, 1698 struct device_attribute *attr, 1699 char *buf) 1700 { 1701 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1702 return sprintf(buf, "%llx\n", iommu->features); 1703 } 1704 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); 1705 1706 static struct attribute *amd_iommu_attrs[] = { 1707 &dev_attr_cap.attr, 1708 &dev_attr_features.attr, 1709 NULL, 1710 }; 1711 1712 static struct attribute_group amd_iommu_group = { 1713 .name = "amd-iommu", 1714 .attrs = amd_iommu_attrs, 1715 }; 1716 1717 static const struct attribute_group *amd_iommu_groups[] = { 1718 &amd_iommu_group, 1719 NULL, 1720 }; 1721 1722 static int __init iommu_init_pci(struct amd_iommu *iommu) 1723 { 1724 int cap_ptr = iommu->cap_ptr; 1725 int ret; 1726 1727 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid), 1728 iommu->devid & 0xff); 1729 if (!iommu->dev) 1730 return -ENODEV; 1731 1732 /* Prevent binding other PCI device drivers to IOMMU devices */ 1733 iommu->dev->match_driver = false; 1734 1735 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 1736 &iommu->cap); 1737 1738 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) 1739 amd_iommu_iotlb_sup = false; 1740 1741 /* read extended feature bits */ 1742 iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); 1743 1744 if (iommu_feature(iommu, FEATURE_GT)) { 1745 int glxval; 1746 u32 max_pasid; 1747 u64 pasmax; 1748 1749 pasmax = iommu->features & FEATURE_PASID_MASK; 1750 pasmax >>= FEATURE_PASID_SHIFT; 1751 max_pasid = (1 << (pasmax + 1)) - 1; 1752 1753 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); 1754 1755 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); 1756 1757 glxval = iommu->features & FEATURE_GLXVAL_MASK; 1758 glxval >>= FEATURE_GLXVAL_SHIFT; 1759 1760 if (amd_iommu_max_glx_val == -1) 1761 amd_iommu_max_glx_val = glxval; 1762 else 1763 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); 1764 } 1765 1766 if (iommu_feature(iommu, FEATURE_GT) && 1767 iommu_feature(iommu, FEATURE_PPR)) { 1768 iommu->is_iommu_v2 = true; 1769 amd_iommu_v2_present = true; 1770 } 1771 1772 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) 1773 return -ENOMEM; 1774 1775 ret = iommu_init_ga(iommu); 1776 if (ret) 1777 return ret; 1778 1779 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) 1780 amd_iommu_np_cache = true; 1781 1782 init_iommu_perf_ctr(iommu); 1783 1784 if (is_rd890_iommu(iommu->dev)) { 1785 int i, j; 1786 1787 iommu->root_pdev = 1788 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number, 1789 PCI_DEVFN(0, 0)); 1790 1791 /* 1792 * Some rd890 systems may not be fully reconfigured by the 1793 * BIOS, so it's necessary for us to store this information so 1794 * it can be reprogrammed on resume 1795 */ 1796 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, 1797 &iommu->stored_addr_lo); 1798 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, 1799 &iommu->stored_addr_hi); 1800 1801 /* Low bit locks writes to configuration space */ 1802 iommu->stored_addr_lo &= ~1; 1803 1804 for (i = 0; i < 6; i++) 1805 for (j = 0; j < 0x12; j++) 1806 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); 1807 1808 for (i = 0; i < 0x83; i++) 1809 iommu->stored_l2[i] = iommu_read_l2(iommu, i); 1810 } 1811 1812 amd_iommu_erratum_746_workaround(iommu); 1813 amd_iommu_ats_write_check_workaround(iommu); 1814 1815 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, 1816 amd_iommu_groups, "ivhd%d", iommu->index); 1817 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops); 1818 iommu_device_register(&iommu->iommu); 1819 1820 return pci_enable_device(iommu->dev); 1821 } 1822 1823 static void print_iommu_info(void) 1824 { 1825 static const char * const feat_str[] = { 1826 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", 1827 "IA", "GA", "HE", "PC" 1828 }; 1829 struct amd_iommu *iommu; 1830 1831 for_each_iommu(iommu) { 1832 struct pci_dev *pdev = iommu->dev; 1833 int i; 1834 1835 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr); 1836 1837 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { 1838 pci_info(pdev, "Extended features (%#llx):", 1839 iommu->features); 1840 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { 1841 if (iommu_feature(iommu, (1ULL << i))) 1842 pr_cont(" %s", feat_str[i]); 1843 } 1844 1845 if (iommu->features & FEATURE_GAM_VAPIC) 1846 pr_cont(" GA_vAPIC"); 1847 1848 pr_cont("\n"); 1849 } 1850 } 1851 if (irq_remapping_enabled) { 1852 pr_info("Interrupt remapping enabled\n"); 1853 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 1854 pr_info("Virtual APIC enabled\n"); 1855 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 1856 pr_info("X2APIC enabled\n"); 1857 } 1858 } 1859 1860 static int __init amd_iommu_init_pci(void) 1861 { 1862 struct amd_iommu *iommu; 1863 int ret = 0; 1864 1865 for_each_iommu(iommu) { 1866 ret = iommu_init_pci(iommu); 1867 if (ret) 1868 break; 1869 } 1870 1871 /* 1872 * Order is important here to make sure any unity map requirements are 1873 * fulfilled. The unity mappings are created and written to the device 1874 * table during the amd_iommu_init_api() call. 1875 * 1876 * After that we call init_device_table_dma() to make sure any 1877 * uninitialized DTE will block DMA, and in the end we flush the caches 1878 * of all IOMMUs to make sure the changes to the device table are 1879 * active. 1880 */ 1881 ret = amd_iommu_init_api(); 1882 1883 init_device_table_dma(); 1884 1885 for_each_iommu(iommu) 1886 iommu_flush_all_caches(iommu); 1887 1888 if (!ret) 1889 print_iommu_info(); 1890 1891 return ret; 1892 } 1893 1894 /**************************************************************************** 1895 * 1896 * The following functions initialize the MSI interrupts for all IOMMUs 1897 * in the system. It's a bit challenging because there could be multiple 1898 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per 1899 * pci_dev. 1900 * 1901 ****************************************************************************/ 1902 1903 static int iommu_setup_msi(struct amd_iommu *iommu) 1904 { 1905 int r; 1906 1907 r = pci_enable_msi(iommu->dev); 1908 if (r) 1909 return r; 1910 1911 r = request_threaded_irq(iommu->dev->irq, 1912 amd_iommu_int_handler, 1913 amd_iommu_int_thread, 1914 0, "AMD-Vi", 1915 iommu); 1916 1917 if (r) { 1918 pci_disable_msi(iommu->dev); 1919 return r; 1920 } 1921 1922 iommu->int_enabled = true; 1923 1924 return 0; 1925 } 1926 1927 #define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2) 1928 #define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8) 1929 #define XT_INT_VEC(x) (((x) & 0xFFULL) << 32) 1930 #define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56) 1931 1932 /** 1933 * Setup the IntCapXT registers with interrupt routing information 1934 * based on the PCI MSI capability block registers, accessed via 1935 * MMIO MSI address low/hi and MSI data registers. 1936 */ 1937 static void iommu_update_intcapxt(struct amd_iommu *iommu) 1938 { 1939 u64 val; 1940 u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET); 1941 u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET); 1942 u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET); 1943 bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; 1944 u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF); 1945 1946 if (x2apic_enabled()) 1947 dest |= MSI_ADDR_EXT_DEST_ID(addr_hi); 1948 1949 val = XT_INT_VEC(data & 0xFF) | 1950 XT_INT_DEST_MODE(dm) | 1951 XT_INT_DEST_LO(dest) | 1952 XT_INT_DEST_HI(dest); 1953 1954 /** 1955 * Current IOMMU implemtation uses the same IRQ for all 1956 * 3 IOMMU interrupts. 1957 */ 1958 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); 1959 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); 1960 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); 1961 } 1962 1963 static void _irq_notifier_notify(struct irq_affinity_notify *notify, 1964 const cpumask_t *mask) 1965 { 1966 struct amd_iommu *iommu; 1967 1968 for_each_iommu(iommu) { 1969 if (iommu->dev->irq == notify->irq) { 1970 iommu_update_intcapxt(iommu); 1971 break; 1972 } 1973 } 1974 } 1975 1976 static void _irq_notifier_release(struct kref *ref) 1977 { 1978 } 1979 1980 static int iommu_init_intcapxt(struct amd_iommu *iommu) 1981 { 1982 int ret; 1983 struct irq_affinity_notify *notify = &iommu->intcapxt_notify; 1984 1985 /** 1986 * IntCapXT requires XTSup=1 and MsiCapMmioSup=1, 1987 * which can be inferred from amd_iommu_xt_mode. 1988 */ 1989 if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE) 1990 return 0; 1991 1992 /** 1993 * Also, we need to setup notifier to update the IntCapXT registers 1994 * whenever the irq affinity is changed from user-space. 1995 */ 1996 notify->irq = iommu->dev->irq; 1997 notify->notify = _irq_notifier_notify, 1998 notify->release = _irq_notifier_release, 1999 ret = irq_set_affinity_notifier(iommu->dev->irq, notify); 2000 if (ret) { 2001 pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n", 2002 iommu->devid, iommu->dev->irq); 2003 return ret; 2004 } 2005 2006 iommu_update_intcapxt(iommu); 2007 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); 2008 return ret; 2009 } 2010 2011 static int iommu_init_msi(struct amd_iommu *iommu) 2012 { 2013 int ret; 2014 2015 if (iommu->int_enabled) 2016 goto enable_faults; 2017 2018 if (iommu->dev->msi_cap) 2019 ret = iommu_setup_msi(iommu); 2020 else 2021 ret = -ENODEV; 2022 2023 if (ret) 2024 return ret; 2025 2026 enable_faults: 2027 ret = iommu_init_intcapxt(iommu); 2028 if (ret) 2029 return ret; 2030 2031 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 2032 2033 if (iommu->ppr_log != NULL) 2034 iommu_feature_enable(iommu, CONTROL_PPRINT_EN); 2035 2036 iommu_ga_log_enable(iommu); 2037 2038 return 0; 2039 } 2040 2041 /**************************************************************************** 2042 * 2043 * The next functions belong to the third pass of parsing the ACPI 2044 * table. In this last pass the memory mapping requirements are 2045 * gathered (like exclusion and unity mapping ranges). 2046 * 2047 ****************************************************************************/ 2048 2049 static void __init free_unity_maps(void) 2050 { 2051 struct unity_map_entry *entry, *next; 2052 2053 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { 2054 list_del(&entry->list); 2055 kfree(entry); 2056 } 2057 } 2058 2059 /* called when we find an exclusion range definition in ACPI */ 2060 static int __init init_exclusion_range(struct ivmd_header *m) 2061 { 2062 int i; 2063 2064 switch (m->type) { 2065 case ACPI_IVMD_TYPE: 2066 set_device_exclusion_range(m->devid, m); 2067 break; 2068 case ACPI_IVMD_TYPE_ALL: 2069 for (i = 0; i <= amd_iommu_last_bdf; ++i) 2070 set_device_exclusion_range(i, m); 2071 break; 2072 case ACPI_IVMD_TYPE_RANGE: 2073 for (i = m->devid; i <= m->aux; ++i) 2074 set_device_exclusion_range(i, m); 2075 break; 2076 default: 2077 break; 2078 } 2079 2080 return 0; 2081 } 2082 2083 /* called for unity map ACPI definition */ 2084 static int __init init_unity_map_range(struct ivmd_header *m) 2085 { 2086 struct unity_map_entry *e = NULL; 2087 char *s; 2088 2089 e = kzalloc(sizeof(*e), GFP_KERNEL); 2090 if (e == NULL) 2091 return -ENOMEM; 2092 2093 if (m->flags & IVMD_FLAG_EXCL_RANGE) 2094 init_exclusion_range(m); 2095 2096 switch (m->type) { 2097 default: 2098 kfree(e); 2099 return 0; 2100 case ACPI_IVMD_TYPE: 2101 s = "IVMD_TYPEi\t\t\t"; 2102 e->devid_start = e->devid_end = m->devid; 2103 break; 2104 case ACPI_IVMD_TYPE_ALL: 2105 s = "IVMD_TYPE_ALL\t\t"; 2106 e->devid_start = 0; 2107 e->devid_end = amd_iommu_last_bdf; 2108 break; 2109 case ACPI_IVMD_TYPE_RANGE: 2110 s = "IVMD_TYPE_RANGE\t\t"; 2111 e->devid_start = m->devid; 2112 e->devid_end = m->aux; 2113 break; 2114 } 2115 e->address_start = PAGE_ALIGN(m->range_start); 2116 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); 2117 e->prot = m->flags >> 1; 2118 2119 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" 2120 " range_start: %016llx range_end: %016llx flags: %x\n", s, 2121 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), 2122 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end), 2123 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), 2124 e->address_start, e->address_end, m->flags); 2125 2126 list_add_tail(&e->list, &amd_iommu_unity_map); 2127 2128 return 0; 2129 } 2130 2131 /* iterates over all memory definitions we find in the ACPI table */ 2132 static int __init init_memory_definitions(struct acpi_table_header *table) 2133 { 2134 u8 *p = (u8 *)table, *end = (u8 *)table; 2135 struct ivmd_header *m; 2136 2137 end += table->length; 2138 p += IVRS_HEADER_LENGTH; 2139 2140 while (p < end) { 2141 m = (struct ivmd_header *)p; 2142 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) 2143 init_unity_map_range(m); 2144 2145 p += m->length; 2146 } 2147 2148 return 0; 2149 } 2150 2151 /* 2152 * Init the device table to not allow DMA access for devices 2153 */ 2154 static void init_device_table_dma(void) 2155 { 2156 u32 devid; 2157 2158 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 2159 set_dev_entry_bit(devid, DEV_ENTRY_VALID); 2160 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); 2161 } 2162 } 2163 2164 static void __init uninit_device_table_dma(void) 2165 { 2166 u32 devid; 2167 2168 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 2169 amd_iommu_dev_table[devid].data[0] = 0ULL; 2170 amd_iommu_dev_table[devid].data[1] = 0ULL; 2171 } 2172 } 2173 2174 static void init_device_table(void) 2175 { 2176 u32 devid; 2177 2178 if (!amd_iommu_irq_remap) 2179 return; 2180 2181 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) 2182 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); 2183 } 2184 2185 static void iommu_init_flags(struct amd_iommu *iommu) 2186 { 2187 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 2188 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 2189 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 2190 2191 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? 2192 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 2193 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 2194 2195 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 2196 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 2197 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 2198 2199 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? 2200 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 2201 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 2202 2203 /* 2204 * make IOMMU memory accesses cache coherent 2205 */ 2206 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 2207 2208 /* Set IOTLB invalidation timeout to 1s */ 2209 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); 2210 } 2211 2212 static void iommu_apply_resume_quirks(struct amd_iommu *iommu) 2213 { 2214 int i, j; 2215 u32 ioc_feature_control; 2216 struct pci_dev *pdev = iommu->root_pdev; 2217 2218 /* RD890 BIOSes may not have completely reconfigured the iommu */ 2219 if (!is_rd890_iommu(iommu->dev) || !pdev) 2220 return; 2221 2222 /* 2223 * First, we need to ensure that the iommu is enabled. This is 2224 * controlled by a register in the northbridge 2225 */ 2226 2227 /* Select Northbridge indirect register 0x75 and enable writing */ 2228 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); 2229 pci_read_config_dword(pdev, 0x64, &ioc_feature_control); 2230 2231 /* Enable the iommu */ 2232 if (!(ioc_feature_control & 0x1)) 2233 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); 2234 2235 /* Restore the iommu BAR */ 2236 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2237 iommu->stored_addr_lo); 2238 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, 2239 iommu->stored_addr_hi); 2240 2241 /* Restore the l1 indirect regs for each of the 6 l1s */ 2242 for (i = 0; i < 6; i++) 2243 for (j = 0; j < 0x12; j++) 2244 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); 2245 2246 /* Restore the l2 indirect regs */ 2247 for (i = 0; i < 0x83; i++) 2248 iommu_write_l2(iommu, i, iommu->stored_l2[i]); 2249 2250 /* Lock PCI setup registers */ 2251 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2252 iommu->stored_addr_lo | 1); 2253 } 2254 2255 static void iommu_enable_ga(struct amd_iommu *iommu) 2256 { 2257 #ifdef CONFIG_IRQ_REMAP 2258 switch (amd_iommu_guest_ir) { 2259 case AMD_IOMMU_GUEST_IR_VAPIC: 2260 iommu_feature_enable(iommu, CONTROL_GAM_EN); 2261 /* Fall through */ 2262 case AMD_IOMMU_GUEST_IR_LEGACY_GA: 2263 iommu_feature_enable(iommu, CONTROL_GA_EN); 2264 iommu->irte_ops = &irte_128_ops; 2265 break; 2266 default: 2267 iommu->irte_ops = &irte_32_ops; 2268 break; 2269 } 2270 #endif 2271 } 2272 2273 static void early_enable_iommu(struct amd_iommu *iommu) 2274 { 2275 iommu_disable(iommu); 2276 iommu_init_flags(iommu); 2277 iommu_set_device_table(iommu); 2278 iommu_enable_command_buffer(iommu); 2279 iommu_enable_event_buffer(iommu); 2280 iommu_set_exclusion_range(iommu); 2281 iommu_enable_ga(iommu); 2282 iommu_enable_xt(iommu); 2283 iommu_enable(iommu); 2284 iommu_flush_all_caches(iommu); 2285 } 2286 2287 /* 2288 * This function finally enables all IOMMUs found in the system after 2289 * they have been initialized. 2290 * 2291 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy 2292 * the old content of device table entries. Not this case or copy failed, 2293 * just continue as normal kernel does. 2294 */ 2295 static void early_enable_iommus(void) 2296 { 2297 struct amd_iommu *iommu; 2298 2299 2300 if (!copy_device_table()) { 2301 /* 2302 * If come here because of failure in copying device table from old 2303 * kernel with all IOMMUs enabled, print error message and try to 2304 * free allocated old_dev_tbl_cpy. 2305 */ 2306 if (amd_iommu_pre_enabled) 2307 pr_err("Failed to copy DEV table from previous kernel.\n"); 2308 if (old_dev_tbl_cpy != NULL) 2309 free_pages((unsigned long)old_dev_tbl_cpy, 2310 get_order(dev_table_size)); 2311 2312 for_each_iommu(iommu) { 2313 clear_translation_pre_enabled(iommu); 2314 early_enable_iommu(iommu); 2315 } 2316 } else { 2317 pr_info("Copied DEV table from previous kernel.\n"); 2318 free_pages((unsigned long)amd_iommu_dev_table, 2319 get_order(dev_table_size)); 2320 amd_iommu_dev_table = old_dev_tbl_cpy; 2321 for_each_iommu(iommu) { 2322 iommu_disable_command_buffer(iommu); 2323 iommu_disable_event_buffer(iommu); 2324 iommu_enable_command_buffer(iommu); 2325 iommu_enable_event_buffer(iommu); 2326 iommu_enable_ga(iommu); 2327 iommu_enable_xt(iommu); 2328 iommu_set_device_table(iommu); 2329 iommu_flush_all_caches(iommu); 2330 } 2331 } 2332 2333 #ifdef CONFIG_IRQ_REMAP 2334 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 2335 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); 2336 #endif 2337 } 2338 2339 static void enable_iommus_v2(void) 2340 { 2341 struct amd_iommu *iommu; 2342 2343 for_each_iommu(iommu) { 2344 iommu_enable_ppr_log(iommu); 2345 iommu_enable_gt(iommu); 2346 } 2347 } 2348 2349 static void enable_iommus(void) 2350 { 2351 early_enable_iommus(); 2352 2353 enable_iommus_v2(); 2354 } 2355 2356 static void disable_iommus(void) 2357 { 2358 struct amd_iommu *iommu; 2359 2360 for_each_iommu(iommu) 2361 iommu_disable(iommu); 2362 2363 #ifdef CONFIG_IRQ_REMAP 2364 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 2365 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP); 2366 #endif 2367 } 2368 2369 /* 2370 * Suspend/Resume support 2371 * disable suspend until real resume implemented 2372 */ 2373 2374 static void amd_iommu_resume(void) 2375 { 2376 struct amd_iommu *iommu; 2377 2378 for_each_iommu(iommu) 2379 iommu_apply_resume_quirks(iommu); 2380 2381 /* re-load the hardware */ 2382 enable_iommus(); 2383 2384 amd_iommu_enable_interrupts(); 2385 } 2386 2387 static int amd_iommu_suspend(void) 2388 { 2389 /* disable IOMMUs to go out of the way for BIOS */ 2390 disable_iommus(); 2391 2392 return 0; 2393 } 2394 2395 static struct syscore_ops amd_iommu_syscore_ops = { 2396 .suspend = amd_iommu_suspend, 2397 .resume = amd_iommu_resume, 2398 }; 2399 2400 static void __init free_iommu_resources(void) 2401 { 2402 kmemleak_free(irq_lookup_table); 2403 free_pages((unsigned long)irq_lookup_table, 2404 get_order(rlookup_table_size)); 2405 irq_lookup_table = NULL; 2406 2407 kmem_cache_destroy(amd_iommu_irq_cache); 2408 amd_iommu_irq_cache = NULL; 2409 2410 free_pages((unsigned long)amd_iommu_rlookup_table, 2411 get_order(rlookup_table_size)); 2412 amd_iommu_rlookup_table = NULL; 2413 2414 free_pages((unsigned long)amd_iommu_alias_table, 2415 get_order(alias_table_size)); 2416 amd_iommu_alias_table = NULL; 2417 2418 free_pages((unsigned long)amd_iommu_dev_table, 2419 get_order(dev_table_size)); 2420 amd_iommu_dev_table = NULL; 2421 2422 free_iommu_all(); 2423 } 2424 2425 /* SB IOAPIC is always on this device in AMD systems */ 2426 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) 2427 2428 static bool __init check_ioapic_information(void) 2429 { 2430 const char *fw_bug = FW_BUG; 2431 bool ret, has_sb_ioapic; 2432 int idx; 2433 2434 has_sb_ioapic = false; 2435 ret = false; 2436 2437 /* 2438 * If we have map overrides on the kernel command line the 2439 * messages in this function might not describe firmware bugs 2440 * anymore - so be careful 2441 */ 2442 if (cmdline_maps) 2443 fw_bug = ""; 2444 2445 for (idx = 0; idx < nr_ioapics; idx++) { 2446 int devid, id = mpc_ioapic_id(idx); 2447 2448 devid = get_ioapic_devid(id); 2449 if (devid < 0) { 2450 pr_err("%s: IOAPIC[%d] not in IVRS table\n", 2451 fw_bug, id); 2452 ret = false; 2453 } else if (devid == IOAPIC_SB_DEVID) { 2454 has_sb_ioapic = true; 2455 ret = true; 2456 } 2457 } 2458 2459 if (!has_sb_ioapic) { 2460 /* 2461 * We expect the SB IOAPIC to be listed in the IVRS 2462 * table. The system timer is connected to the SB IOAPIC 2463 * and if we don't have it in the list the system will 2464 * panic at boot time. This situation usually happens 2465 * when the BIOS is buggy and provides us the wrong 2466 * device id for the IOAPIC in the system. 2467 */ 2468 pr_err("%s: No southbridge IOAPIC found\n", fw_bug); 2469 } 2470 2471 if (!ret) 2472 pr_err("Disabling interrupt remapping\n"); 2473 2474 return ret; 2475 } 2476 2477 static void __init free_dma_resources(void) 2478 { 2479 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 2480 get_order(MAX_DOMAIN_ID/8)); 2481 amd_iommu_pd_alloc_bitmap = NULL; 2482 2483 free_unity_maps(); 2484 } 2485 2486 /* 2487 * This is the hardware init function for AMD IOMMU in the system. 2488 * This function is called either from amd_iommu_init or from the interrupt 2489 * remapping setup code. 2490 * 2491 * This function basically parses the ACPI table for AMD IOMMU (IVRS) 2492 * four times: 2493 * 2494 * 1 pass) Discover the most comprehensive IVHD type to use. 2495 * 2496 * 2 pass) Find the highest PCI device id the driver has to handle. 2497 * Upon this information the size of the data structures is 2498 * determined that needs to be allocated. 2499 * 2500 * 3 pass) Initialize the data structures just allocated with the 2501 * information in the ACPI table about available AMD IOMMUs 2502 * in the system. It also maps the PCI devices in the 2503 * system to specific IOMMUs 2504 * 2505 * 4 pass) After the basic data structures are allocated and 2506 * initialized we update them with information about memory 2507 * remapping requirements parsed out of the ACPI table in 2508 * this last pass. 2509 * 2510 * After everything is set up the IOMMUs are enabled and the necessary 2511 * hotplug and suspend notifiers are registered. 2512 */ 2513 static int __init early_amd_iommu_init(void) 2514 { 2515 struct acpi_table_header *ivrs_base; 2516 acpi_status status; 2517 int i, remap_cache_sz, ret = 0; 2518 u32 pci_id; 2519 2520 if (!amd_iommu_detected) 2521 return -ENODEV; 2522 2523 status = acpi_get_table("IVRS", 0, &ivrs_base); 2524 if (status == AE_NOT_FOUND) 2525 return -ENODEV; 2526 else if (ACPI_FAILURE(status)) { 2527 const char *err = acpi_format_exception(status); 2528 pr_err("IVRS table error: %s\n", err); 2529 return -EINVAL; 2530 } 2531 2532 /* 2533 * Validate checksum here so we don't need to do it when 2534 * we actually parse the table 2535 */ 2536 ret = check_ivrs_checksum(ivrs_base); 2537 if (ret) 2538 goto out; 2539 2540 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); 2541 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); 2542 2543 /* 2544 * First parse ACPI tables to find the largest Bus/Dev/Func 2545 * we need to handle. Upon this information the shared data 2546 * structures for the IOMMUs in the system will be allocated 2547 */ 2548 ret = find_last_devid_acpi(ivrs_base); 2549 if (ret) 2550 goto out; 2551 2552 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); 2553 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); 2554 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); 2555 2556 /* Device table - directly used by all IOMMUs */ 2557 ret = -ENOMEM; 2558 amd_iommu_dev_table = (void *)__get_free_pages( 2559 GFP_KERNEL | __GFP_ZERO | GFP_DMA32, 2560 get_order(dev_table_size)); 2561 if (amd_iommu_dev_table == NULL) 2562 goto out; 2563 2564 /* 2565 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the 2566 * IOMMU see for that device 2567 */ 2568 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, 2569 get_order(alias_table_size)); 2570 if (amd_iommu_alias_table == NULL) 2571 goto out; 2572 2573 /* IOMMU rlookup table - find the IOMMU for a specific device */ 2574 amd_iommu_rlookup_table = (void *)__get_free_pages( 2575 GFP_KERNEL | __GFP_ZERO, 2576 get_order(rlookup_table_size)); 2577 if (amd_iommu_rlookup_table == NULL) 2578 goto out; 2579 2580 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( 2581 GFP_KERNEL | __GFP_ZERO, 2582 get_order(MAX_DOMAIN_ID/8)); 2583 if (amd_iommu_pd_alloc_bitmap == NULL) 2584 goto out; 2585 2586 /* 2587 * let all alias entries point to itself 2588 */ 2589 for (i = 0; i <= amd_iommu_last_bdf; ++i) 2590 amd_iommu_alias_table[i] = i; 2591 2592 /* 2593 * never allocate domain 0 because its used as the non-allocated and 2594 * error value placeholder 2595 */ 2596 __set_bit(0, amd_iommu_pd_alloc_bitmap); 2597 2598 /* 2599 * now the data structures are allocated and basically initialized 2600 * start the real acpi table scan 2601 */ 2602 ret = init_iommu_all(ivrs_base); 2603 if (ret) 2604 goto out; 2605 2606 /* Disable IOMMU if there's Stoney Ridge graphics */ 2607 for (i = 0; i < 32; i++) { 2608 pci_id = read_pci_config(0, i, 0, 0); 2609 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) { 2610 pr_info("Disable IOMMU on Stoney Ridge\n"); 2611 amd_iommu_disabled = true; 2612 break; 2613 } 2614 } 2615 2616 /* Disable any previously enabled IOMMUs */ 2617 if (!is_kdump_kernel() || amd_iommu_disabled) 2618 disable_iommus(); 2619 2620 if (amd_iommu_irq_remap) 2621 amd_iommu_irq_remap = check_ioapic_information(); 2622 2623 if (amd_iommu_irq_remap) { 2624 /* 2625 * Interrupt remapping enabled, create kmem_cache for the 2626 * remapping tables. 2627 */ 2628 ret = -ENOMEM; 2629 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) 2630 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32); 2631 else 2632 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2); 2633 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", 2634 remap_cache_sz, 2635 IRQ_TABLE_ALIGNMENT, 2636 0, NULL); 2637 if (!amd_iommu_irq_cache) 2638 goto out; 2639 2640 irq_lookup_table = (void *)__get_free_pages( 2641 GFP_KERNEL | __GFP_ZERO, 2642 get_order(rlookup_table_size)); 2643 kmemleak_alloc(irq_lookup_table, rlookup_table_size, 2644 1, GFP_KERNEL); 2645 if (!irq_lookup_table) 2646 goto out; 2647 } 2648 2649 ret = init_memory_definitions(ivrs_base); 2650 if (ret) 2651 goto out; 2652 2653 /* init the device table */ 2654 init_device_table(); 2655 2656 out: 2657 /* Don't leak any ACPI memory */ 2658 acpi_put_table(ivrs_base); 2659 ivrs_base = NULL; 2660 2661 return ret; 2662 } 2663 2664 static int amd_iommu_enable_interrupts(void) 2665 { 2666 struct amd_iommu *iommu; 2667 int ret = 0; 2668 2669 for_each_iommu(iommu) { 2670 ret = iommu_init_msi(iommu); 2671 if (ret) 2672 goto out; 2673 } 2674 2675 out: 2676 return ret; 2677 } 2678 2679 static bool detect_ivrs(void) 2680 { 2681 struct acpi_table_header *ivrs_base; 2682 acpi_status status; 2683 2684 status = acpi_get_table("IVRS", 0, &ivrs_base); 2685 if (status == AE_NOT_FOUND) 2686 return false; 2687 else if (ACPI_FAILURE(status)) { 2688 const char *err = acpi_format_exception(status); 2689 pr_err("IVRS table error: %s\n", err); 2690 return false; 2691 } 2692 2693 acpi_put_table(ivrs_base); 2694 2695 /* Make sure ACS will be enabled during PCI probe */ 2696 pci_request_acs(); 2697 2698 return true; 2699 } 2700 2701 /**************************************************************************** 2702 * 2703 * AMD IOMMU Initialization State Machine 2704 * 2705 ****************************************************************************/ 2706 2707 static int __init state_next(void) 2708 { 2709 int ret = 0; 2710 2711 switch (init_state) { 2712 case IOMMU_START_STATE: 2713 if (!detect_ivrs()) { 2714 init_state = IOMMU_NOT_FOUND; 2715 ret = -ENODEV; 2716 } else { 2717 init_state = IOMMU_IVRS_DETECTED; 2718 } 2719 break; 2720 case IOMMU_IVRS_DETECTED: 2721 ret = early_amd_iommu_init(); 2722 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; 2723 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { 2724 pr_info("AMD IOMMU disabled\n"); 2725 init_state = IOMMU_CMDLINE_DISABLED; 2726 ret = -EINVAL; 2727 } 2728 break; 2729 case IOMMU_ACPI_FINISHED: 2730 early_enable_iommus(); 2731 x86_platform.iommu_shutdown = disable_iommus; 2732 init_state = IOMMU_ENABLED; 2733 break; 2734 case IOMMU_ENABLED: 2735 register_syscore_ops(&amd_iommu_syscore_ops); 2736 ret = amd_iommu_init_pci(); 2737 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; 2738 enable_iommus_v2(); 2739 break; 2740 case IOMMU_PCI_INIT: 2741 ret = amd_iommu_enable_interrupts(); 2742 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 2743 break; 2744 case IOMMU_INTERRUPTS_EN: 2745 ret = amd_iommu_init_dma_ops(); 2746 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; 2747 break; 2748 case IOMMU_DMA_OPS: 2749 init_state = IOMMU_INITIALIZED; 2750 break; 2751 case IOMMU_INITIALIZED: 2752 /* Nothing to do */ 2753 break; 2754 case IOMMU_NOT_FOUND: 2755 case IOMMU_INIT_ERROR: 2756 case IOMMU_CMDLINE_DISABLED: 2757 /* Error states => do nothing */ 2758 ret = -EINVAL; 2759 break; 2760 default: 2761 /* Unknown state */ 2762 BUG(); 2763 } 2764 2765 if (ret) { 2766 free_dma_resources(); 2767 if (!irq_remapping_enabled) { 2768 disable_iommus(); 2769 free_iommu_resources(); 2770 } else { 2771 struct amd_iommu *iommu; 2772 2773 uninit_device_table_dma(); 2774 for_each_iommu(iommu) 2775 iommu_flush_all_caches(iommu); 2776 } 2777 } 2778 return ret; 2779 } 2780 2781 static int __init iommu_go_to_state(enum iommu_init_state state) 2782 { 2783 int ret = -EINVAL; 2784 2785 while (init_state != state) { 2786 if (init_state == IOMMU_NOT_FOUND || 2787 init_state == IOMMU_INIT_ERROR || 2788 init_state == IOMMU_CMDLINE_DISABLED) 2789 break; 2790 ret = state_next(); 2791 } 2792 2793 return ret; 2794 } 2795 2796 #ifdef CONFIG_IRQ_REMAP 2797 int __init amd_iommu_prepare(void) 2798 { 2799 int ret; 2800 2801 amd_iommu_irq_remap = true; 2802 2803 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); 2804 if (ret) 2805 return ret; 2806 return amd_iommu_irq_remap ? 0 : -ENODEV; 2807 } 2808 2809 int __init amd_iommu_enable(void) 2810 { 2811 int ret; 2812 2813 ret = iommu_go_to_state(IOMMU_ENABLED); 2814 if (ret) 2815 return ret; 2816 2817 irq_remapping_enabled = 1; 2818 return amd_iommu_xt_mode; 2819 } 2820 2821 void amd_iommu_disable(void) 2822 { 2823 amd_iommu_suspend(); 2824 } 2825 2826 int amd_iommu_reenable(int mode) 2827 { 2828 amd_iommu_resume(); 2829 2830 return 0; 2831 } 2832 2833 int __init amd_iommu_enable_faulting(void) 2834 { 2835 /* We enable MSI later when PCI is initialized */ 2836 return 0; 2837 } 2838 #endif 2839 2840 /* 2841 * This is the core init function for AMD IOMMU hardware in the system. 2842 * This function is called from the generic x86 DMA layer initialization 2843 * code. 2844 */ 2845 static int __init amd_iommu_init(void) 2846 { 2847 struct amd_iommu *iommu; 2848 int ret; 2849 2850 ret = iommu_go_to_state(IOMMU_INITIALIZED); 2851 #ifdef CONFIG_GART_IOMMU 2852 if (ret && list_empty(&amd_iommu_list)) { 2853 /* 2854 * We failed to initialize the AMD IOMMU - try fallback 2855 * to GART if possible. 2856 */ 2857 gart_iommu_init(); 2858 } 2859 #endif 2860 2861 for_each_iommu(iommu) 2862 amd_iommu_debugfs_setup(iommu); 2863 2864 return ret; 2865 } 2866 2867 static bool amd_iommu_sme_check(void) 2868 { 2869 if (!sme_active() || (boot_cpu_data.x86 != 0x17)) 2870 return true; 2871 2872 /* For Fam17h, a specific level of support is required */ 2873 if (boot_cpu_data.microcode >= 0x08001205) 2874 return true; 2875 2876 if ((boot_cpu_data.microcode >= 0x08001126) && 2877 (boot_cpu_data.microcode <= 0x080011ff)) 2878 return true; 2879 2880 pr_notice("IOMMU not currently supported when SME is active\n"); 2881 2882 return false; 2883 } 2884 2885 /**************************************************************************** 2886 * 2887 * Early detect code. This code runs at IOMMU detection time in the DMA 2888 * layer. It just looks if there is an IVRS ACPI table to detect AMD 2889 * IOMMUs 2890 * 2891 ****************************************************************************/ 2892 int __init amd_iommu_detect(void) 2893 { 2894 int ret; 2895 2896 if (no_iommu || (iommu_detected && !gart_iommu_aperture)) 2897 return -ENODEV; 2898 2899 if (!amd_iommu_sme_check()) 2900 return -ENODEV; 2901 2902 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); 2903 if (ret) 2904 return ret; 2905 2906 amd_iommu_detected = true; 2907 iommu_detected = 1; 2908 x86_init.iommu.iommu_init = amd_iommu_init; 2909 2910 return 1; 2911 } 2912 2913 /**************************************************************************** 2914 * 2915 * Parsing functions for the AMD IOMMU specific kernel command line 2916 * options. 2917 * 2918 ****************************************************************************/ 2919 2920 static int __init parse_amd_iommu_dump(char *str) 2921 { 2922 amd_iommu_dump = true; 2923 2924 return 1; 2925 } 2926 2927 static int __init parse_amd_iommu_intr(char *str) 2928 { 2929 for (; *str; ++str) { 2930 if (strncmp(str, "legacy", 6) == 0) { 2931 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 2932 break; 2933 } 2934 if (strncmp(str, "vapic", 5) == 0) { 2935 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 2936 break; 2937 } 2938 } 2939 return 1; 2940 } 2941 2942 static int __init parse_amd_iommu_options(char *str) 2943 { 2944 for (; *str; ++str) { 2945 if (strncmp(str, "fullflush", 9) == 0) 2946 amd_iommu_unmap_flush = true; 2947 if (strncmp(str, "off", 3) == 0) 2948 amd_iommu_disabled = true; 2949 if (strncmp(str, "force_isolation", 15) == 0) 2950 amd_iommu_force_isolation = true; 2951 } 2952 2953 return 1; 2954 } 2955 2956 static int __init parse_ivrs_ioapic(char *str) 2957 { 2958 unsigned int bus, dev, fn; 2959 int ret, id, i; 2960 u16 devid; 2961 2962 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 2963 2964 if (ret != 4) { 2965 pr_err("Invalid command line: ivrs_ioapic%s\n", str); 2966 return 1; 2967 } 2968 2969 if (early_ioapic_map_size == EARLY_MAP_SIZE) { 2970 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", 2971 str); 2972 return 1; 2973 } 2974 2975 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 2976 2977 cmdline_maps = true; 2978 i = early_ioapic_map_size++; 2979 early_ioapic_map[i].id = id; 2980 early_ioapic_map[i].devid = devid; 2981 early_ioapic_map[i].cmd_line = true; 2982 2983 return 1; 2984 } 2985 2986 static int __init parse_ivrs_hpet(char *str) 2987 { 2988 unsigned int bus, dev, fn; 2989 int ret, id, i; 2990 u16 devid; 2991 2992 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 2993 2994 if (ret != 4) { 2995 pr_err("Invalid command line: ivrs_hpet%s\n", str); 2996 return 1; 2997 } 2998 2999 if (early_hpet_map_size == EARLY_MAP_SIZE) { 3000 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", 3001 str); 3002 return 1; 3003 } 3004 3005 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 3006 3007 cmdline_maps = true; 3008 i = early_hpet_map_size++; 3009 early_hpet_map[i].id = id; 3010 early_hpet_map[i].devid = devid; 3011 early_hpet_map[i].cmd_line = true; 3012 3013 return 1; 3014 } 3015 3016 static int __init parse_ivrs_acpihid(char *str) 3017 { 3018 u32 bus, dev, fn; 3019 char *hid, *uid, *p; 3020 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0}; 3021 int ret, i; 3022 3023 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid); 3024 if (ret != 4) { 3025 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str); 3026 return 1; 3027 } 3028 3029 p = acpiid; 3030 hid = strsep(&p, ":"); 3031 uid = p; 3032 3033 if (!hid || !(*hid) || !uid) { 3034 pr_err("Invalid command line: hid or uid\n"); 3035 return 1; 3036 } 3037 3038 i = early_acpihid_map_size++; 3039 memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); 3040 memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); 3041 early_acpihid_map[i].devid = 3042 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 3043 early_acpihid_map[i].cmd_line = true; 3044 3045 return 1; 3046 } 3047 3048 __setup("amd_iommu_dump", parse_amd_iommu_dump); 3049 __setup("amd_iommu=", parse_amd_iommu_options); 3050 __setup("amd_iommu_intr=", parse_amd_iommu_intr); 3051 __setup("ivrs_ioapic", parse_ivrs_ioapic); 3052 __setup("ivrs_hpet", parse_ivrs_hpet); 3053 __setup("ivrs_acpihid", parse_ivrs_acpihid); 3054 3055 IOMMU_INIT_FINISH(amd_iommu_detect, 3056 gart_iommu_hole_init, 3057 NULL, 3058 NULL); 3059 3060 bool amd_iommu_v2_supported(void) 3061 { 3062 return amd_iommu_v2_present; 3063 } 3064 EXPORT_SYMBOL(amd_iommu_v2_supported); 3065 3066 struct amd_iommu *get_amd_iommu(unsigned int idx) 3067 { 3068 unsigned int i = 0; 3069 struct amd_iommu *iommu; 3070 3071 for_each_iommu(iommu) 3072 if (i++ == idx) 3073 return iommu; 3074 return NULL; 3075 } 3076 EXPORT_SYMBOL(get_amd_iommu); 3077 3078 /**************************************************************************** 3079 * 3080 * IOMMU EFR Performance Counter support functionality. This code allows 3081 * access to the IOMMU PC functionality. 3082 * 3083 ****************************************************************************/ 3084 3085 u8 amd_iommu_pc_get_max_banks(unsigned int idx) 3086 { 3087 struct amd_iommu *iommu = get_amd_iommu(idx); 3088 3089 if (iommu) 3090 return iommu->max_banks; 3091 3092 return 0; 3093 } 3094 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); 3095 3096 bool amd_iommu_pc_supported(void) 3097 { 3098 return amd_iommu_pc_present; 3099 } 3100 EXPORT_SYMBOL(amd_iommu_pc_supported); 3101 3102 u8 amd_iommu_pc_get_max_counters(unsigned int idx) 3103 { 3104 struct amd_iommu *iommu = get_amd_iommu(idx); 3105 3106 if (iommu) 3107 return iommu->max_counters; 3108 3109 return 0; 3110 } 3111 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); 3112 3113 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 3114 u8 fxn, u64 *value, bool is_write) 3115 { 3116 u32 offset; 3117 u32 max_offset_lim; 3118 3119 /* Make sure the IOMMU PC resource is available */ 3120 if (!amd_iommu_pc_present) 3121 return -ENODEV; 3122 3123 /* Check for valid iommu and pc register indexing */ 3124 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) 3125 return -ENODEV; 3126 3127 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn); 3128 3129 /* Limit the offset to the hw defined mmio region aperture */ 3130 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | 3131 (iommu->max_counters << 8) | 0x28); 3132 if ((offset < MMIO_CNTR_REG_OFFSET) || 3133 (offset > max_offset_lim)) 3134 return -EINVAL; 3135 3136 if (is_write) { 3137 u64 val = *value & GENMASK_ULL(47, 0); 3138 3139 writel((u32)val, iommu->mmio_base + offset); 3140 writel((val >> 32), iommu->mmio_base + offset + 4); 3141 } else { 3142 *value = readl(iommu->mmio_base + offset + 4); 3143 *value <<= 32; 3144 *value |= readl(iommu->mmio_base + offset); 3145 *value &= GENMASK_ULL(47, 0); 3146 } 3147 3148 return 0; 3149 } 3150 3151 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3152 { 3153 if (!iommu) 3154 return -EINVAL; 3155 3156 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); 3157 } 3158 EXPORT_SYMBOL(amd_iommu_pc_get_reg); 3159 3160 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3161 { 3162 if (!iommu) 3163 return -EINVAL; 3164 3165 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); 3166 } 3167 EXPORT_SYMBOL(amd_iommu_pc_set_reg); 3168