1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #ifndef _ASM_X86_AMD_IOMMU_TYPES_H 9 #define _ASM_X86_AMD_IOMMU_TYPES_H 10 11 #include <linux/types.h> 12 #include <linux/mutex.h> 13 #include <linux/msi.h> 14 #include <linux/list.h> 15 #include <linux/spinlock.h> 16 #include <linux/pci.h> 17 #include <linux/irqreturn.h> 18 19 /* 20 * Maximum number of IOMMUs supported 21 */ 22 #define MAX_IOMMUS 32 23 24 /* 25 * some size calculation constants 26 */ 27 #define DEV_TABLE_ENTRY_SIZE 32 28 #define ALIAS_TABLE_ENTRY_SIZE 2 29 #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) 30 31 /* Capability offsets used by the driver */ 32 #define MMIO_CAP_HDR_OFFSET 0x00 33 #define MMIO_RANGE_OFFSET 0x0c 34 #define MMIO_MISC_OFFSET 0x10 35 36 /* Masks, shifts and macros to parse the device range capability */ 37 #define MMIO_RANGE_LD_MASK 0xff000000 38 #define MMIO_RANGE_FD_MASK 0x00ff0000 39 #define MMIO_RANGE_BUS_MASK 0x0000ff00 40 #define MMIO_RANGE_LD_SHIFT 24 41 #define MMIO_RANGE_FD_SHIFT 16 42 #define MMIO_RANGE_BUS_SHIFT 8 43 #define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) 44 #define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) 45 #define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) 46 #define MMIO_MSI_NUM(x) ((x) & 0x1f) 47 48 /* Flag masks for the AMD IOMMU exclusion range */ 49 #define MMIO_EXCL_ENABLE_MASK 0x01ULL 50 #define MMIO_EXCL_ALLOW_MASK 0x02ULL 51 52 /* Used offsets into the MMIO space */ 53 #define MMIO_DEV_TABLE_OFFSET 0x0000 54 #define MMIO_CMD_BUF_OFFSET 0x0008 55 #define MMIO_EVT_BUF_OFFSET 0x0010 56 #define MMIO_CONTROL_OFFSET 0x0018 57 #define MMIO_EXCL_BASE_OFFSET 0x0020 58 #define MMIO_EXCL_LIMIT_OFFSET 0x0028 59 #define MMIO_EXT_FEATURES 0x0030 60 #define MMIO_PPR_LOG_OFFSET 0x0038 61 #define MMIO_GA_LOG_BASE_OFFSET 0x00e0 62 #define MMIO_GA_LOG_TAIL_OFFSET 0x00e8 63 #define MMIO_MSI_ADDR_LO_OFFSET 0x015C 64 #define MMIO_MSI_ADDR_HI_OFFSET 0x0160 65 #define MMIO_MSI_DATA_OFFSET 0x0164 66 #define MMIO_INTCAPXT_EVT_OFFSET 0x0170 67 #define MMIO_INTCAPXT_PPR_OFFSET 0x0178 68 #define MMIO_INTCAPXT_GALOG_OFFSET 0x0180 69 #define MMIO_CMD_HEAD_OFFSET 0x2000 70 #define MMIO_CMD_TAIL_OFFSET 0x2008 71 #define MMIO_EVT_HEAD_OFFSET 0x2010 72 #define MMIO_EVT_TAIL_OFFSET 0x2018 73 #define MMIO_STATUS_OFFSET 0x2020 74 #define MMIO_PPR_HEAD_OFFSET 0x2030 75 #define MMIO_PPR_TAIL_OFFSET 0x2038 76 #define MMIO_GA_HEAD_OFFSET 0x2040 77 #define MMIO_GA_TAIL_OFFSET 0x2048 78 #define MMIO_CNTR_CONF_OFFSET 0x4000 79 #define MMIO_CNTR_REG_OFFSET 0x40000 80 #define MMIO_REG_END_OFFSET 0x80000 81 82 83 84 /* Extended Feature Bits */ 85 #define FEATURE_PREFETCH (1ULL<<0) 86 #define FEATURE_PPR (1ULL<<1) 87 #define FEATURE_X2APIC (1ULL<<2) 88 #define FEATURE_NX (1ULL<<3) 89 #define FEATURE_GT (1ULL<<4) 90 #define FEATURE_IA (1ULL<<6) 91 #define FEATURE_GA (1ULL<<7) 92 #define FEATURE_HE (1ULL<<8) 93 #define FEATURE_PC (1ULL<<9) 94 #define FEATURE_GAM_VAPIC (1ULL<<21) 95 #define FEATURE_EPHSUP (1ULL<<50) 96 97 #define FEATURE_PASID_SHIFT 32 98 #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) 99 100 #define FEATURE_GLXVAL_SHIFT 14 101 #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) 102 103 /* Note: 104 * The current driver only support 16-bit PASID. 105 * Currently, hardware only implement upto 16-bit PASID 106 * even though the spec says it could have upto 20 bits. 107 */ 108 #define PASID_MASK 0x0000ffff 109 110 /* MMIO status bits */ 111 #define MMIO_STATUS_EVT_INT_MASK (1 << 1) 112 #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) 113 #define MMIO_STATUS_PPR_INT_MASK (1 << 6) 114 #define MMIO_STATUS_GALOG_RUN_MASK (1 << 8) 115 #define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9) 116 #define MMIO_STATUS_GALOG_INT_MASK (1 << 10) 117 118 /* event logging constants */ 119 #define EVENT_ENTRY_SIZE 0x10 120 #define EVENT_TYPE_SHIFT 28 121 #define EVENT_TYPE_MASK 0xf 122 #define EVENT_TYPE_ILL_DEV 0x1 123 #define EVENT_TYPE_IO_FAULT 0x2 124 #define EVENT_TYPE_DEV_TAB_ERR 0x3 125 #define EVENT_TYPE_PAGE_TAB_ERR 0x4 126 #define EVENT_TYPE_ILL_CMD 0x5 127 #define EVENT_TYPE_CMD_HARD_ERR 0x6 128 #define EVENT_TYPE_IOTLB_INV_TO 0x7 129 #define EVENT_TYPE_INV_DEV_REQ 0x8 130 #define EVENT_TYPE_INV_PPR_REQ 0x9 131 #define EVENT_DEVID_MASK 0xffff 132 #define EVENT_DEVID_SHIFT 0 133 #define EVENT_DOMID_MASK_LO 0xffff 134 #define EVENT_DOMID_MASK_HI 0xf0000 135 #define EVENT_FLAGS_MASK 0xfff 136 #define EVENT_FLAGS_SHIFT 0x10 137 138 /* feature control bits */ 139 #define CONTROL_IOMMU_EN 0x00ULL 140 #define CONTROL_HT_TUN_EN 0x01ULL 141 #define CONTROL_EVT_LOG_EN 0x02ULL 142 #define CONTROL_EVT_INT_EN 0x03ULL 143 #define CONTROL_COMWAIT_EN 0x04ULL 144 #define CONTROL_INV_TIMEOUT 0x05ULL 145 #define CONTROL_PASSPW_EN 0x08ULL 146 #define CONTROL_RESPASSPW_EN 0x09ULL 147 #define CONTROL_COHERENT_EN 0x0aULL 148 #define CONTROL_ISOC_EN 0x0bULL 149 #define CONTROL_CMDBUF_EN 0x0cULL 150 #define CONTROL_PPRLOG_EN 0x0dULL 151 #define CONTROL_PPRINT_EN 0x0eULL 152 #define CONTROL_PPR_EN 0x0fULL 153 #define CONTROL_GT_EN 0x10ULL 154 #define CONTROL_GA_EN 0x11ULL 155 #define CONTROL_GAM_EN 0x19ULL 156 #define CONTROL_GALOG_EN 0x1CULL 157 #define CONTROL_GAINT_EN 0x1DULL 158 #define CONTROL_XT_EN 0x32ULL 159 #define CONTROL_INTCAPXT_EN 0x33ULL 160 161 #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) 162 #define CTRL_INV_TO_NONE 0 163 #define CTRL_INV_TO_1MS 1 164 #define CTRL_INV_TO_10MS 2 165 #define CTRL_INV_TO_100MS 3 166 #define CTRL_INV_TO_1S 4 167 #define CTRL_INV_TO_10S 5 168 #define CTRL_INV_TO_100S 6 169 170 /* command specific defines */ 171 #define CMD_COMPL_WAIT 0x01 172 #define CMD_INV_DEV_ENTRY 0x02 173 #define CMD_INV_IOMMU_PAGES 0x03 174 #define CMD_INV_IOTLB_PAGES 0x04 175 #define CMD_INV_IRT 0x05 176 #define CMD_COMPLETE_PPR 0x07 177 #define CMD_INV_ALL 0x08 178 179 #define CMD_COMPL_WAIT_STORE_MASK 0x01 180 #define CMD_COMPL_WAIT_INT_MASK 0x02 181 #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 182 #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 183 #define CMD_INV_IOMMU_PAGES_GN_MASK 0x04 184 185 #define PPR_STATUS_MASK 0xf 186 #define PPR_STATUS_SHIFT 12 187 188 #define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL 189 190 /* macros and definitions for device table entries */ 191 #define DEV_ENTRY_VALID 0x00 192 #define DEV_ENTRY_TRANSLATION 0x01 193 #define DEV_ENTRY_PPR 0x34 194 #define DEV_ENTRY_IR 0x3d 195 #define DEV_ENTRY_IW 0x3e 196 #define DEV_ENTRY_NO_PAGE_FAULT 0x62 197 #define DEV_ENTRY_EX 0x67 198 #define DEV_ENTRY_SYSMGT1 0x68 199 #define DEV_ENTRY_SYSMGT2 0x69 200 #define DEV_ENTRY_IRQ_TBL_EN 0x80 201 #define DEV_ENTRY_INIT_PASS 0xb8 202 #define DEV_ENTRY_EINT_PASS 0xb9 203 #define DEV_ENTRY_NMI_PASS 0xba 204 #define DEV_ENTRY_LINT0_PASS 0xbe 205 #define DEV_ENTRY_LINT1_PASS 0xbf 206 #define DEV_ENTRY_MODE_MASK 0x07 207 #define DEV_ENTRY_MODE_SHIFT 0x09 208 209 #define MAX_DEV_TABLE_ENTRIES 0xffff 210 211 /* constants to configure the command buffer */ 212 #define CMD_BUFFER_SIZE 8192 213 #define CMD_BUFFER_UNINITIALIZED 1 214 #define CMD_BUFFER_ENTRIES 512 215 #define MMIO_CMD_SIZE_SHIFT 56 216 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) 217 218 /* constants for event buffer handling */ 219 #define EVT_BUFFER_SIZE 8192 /* 512 entries */ 220 #define EVT_LEN_MASK (0x9ULL << 56) 221 222 /* Constants for PPR Log handling */ 223 #define PPR_LOG_ENTRIES 512 224 #define PPR_LOG_SIZE_SHIFT 56 225 #define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT) 226 #define PPR_ENTRY_SIZE 16 227 #define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES) 228 229 #define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL) 230 #define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL) 231 #define PPR_DEVID(x) ((x) & 0xffffULL) 232 #define PPR_TAG(x) (((x) >> 32) & 0x3ffULL) 233 #define PPR_PASID1(x) (((x) >> 16) & 0xffffULL) 234 #define PPR_PASID2(x) (((x) >> 42) & 0xfULL) 235 #define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x)) 236 237 #define PPR_REQ_FAULT 0x01 238 239 /* Constants for GA Log handling */ 240 #define GA_LOG_ENTRIES 512 241 #define GA_LOG_SIZE_SHIFT 56 242 #define GA_LOG_SIZE_512 (0x8ULL << GA_LOG_SIZE_SHIFT) 243 #define GA_ENTRY_SIZE 8 244 #define GA_LOG_SIZE (GA_ENTRY_SIZE * GA_LOG_ENTRIES) 245 246 #define GA_TAG(x) (u32)(x & 0xffffffffULL) 247 #define GA_DEVID(x) (u16)(((x) >> 32) & 0xffffULL) 248 #define GA_REQ_TYPE(x) (((x) >> 60) & 0xfULL) 249 250 #define GA_GUEST_NR 0x1 251 252 /* Bit value definition for dte irq remapping fields*/ 253 #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) 254 #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) 255 #define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1) 256 #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) 257 #define DTE_IRQ_TABLE_LEN (8ULL << 1) 258 #define DTE_IRQ_REMAP_ENABLE 1ULL 259 260 #define PAGE_MODE_NONE 0x00 261 #define PAGE_MODE_1_LEVEL 0x01 262 #define PAGE_MODE_2_LEVEL 0x02 263 #define PAGE_MODE_3_LEVEL 0x03 264 #define PAGE_MODE_4_LEVEL 0x04 265 #define PAGE_MODE_5_LEVEL 0x05 266 #define PAGE_MODE_6_LEVEL 0x06 267 #define PAGE_MODE_7_LEVEL 0x07 268 269 #define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) 270 #define PM_LEVEL_SIZE(x) (((x) < 6) ? \ 271 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ 272 (0xffffffffffffffffULL)) 273 #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) 274 #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) 275 #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ 276 IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW) 277 #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) 278 279 #define PM_MAP_4k 0 280 #define PM_ADDR_MASK 0x000ffffffffff000ULL 281 #define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ 282 (~((1ULL << (12 + ((lvl) * 9))) - 1))) 283 #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) 284 285 /* 286 * Returns the page table level to use for a given page size 287 * Pagesize is expected to be a power-of-two 288 */ 289 #define PAGE_SIZE_LEVEL(pagesize) \ 290 ((__ffs(pagesize) - 12) / 9) 291 /* 292 * Returns the number of ptes to use for a given page size 293 * Pagesize is expected to be a power-of-two 294 */ 295 #define PAGE_SIZE_PTE_COUNT(pagesize) \ 296 (1ULL << ((__ffs(pagesize) - 12) % 9)) 297 298 /* 299 * Aligns a given io-virtual address to a given page size 300 * Pagesize is expected to be a power-of-two 301 */ 302 #define PAGE_SIZE_ALIGN(address, pagesize) \ 303 ((address) & ~((pagesize) - 1)) 304 /* 305 * Creates an IOMMU PTE for an address and a given pagesize 306 * The PTE has no permission bits set 307 * Pagesize is expected to be a power-of-two larger than 4096 308 */ 309 #define PAGE_SIZE_PTE(address, pagesize) \ 310 (((address) | ((pagesize) - 1)) & \ 311 (~(pagesize >> 1)) & PM_ADDR_MASK) 312 313 /* 314 * Takes a PTE value with mode=0x07 and returns the page size it maps 315 */ 316 #define PTE_PAGE_SIZE(pte) \ 317 (1ULL << (1 + ffz(((pte) | 0xfffULL)))) 318 319 /* 320 * Takes a page-table level and returns the default page-size for this level 321 */ 322 #define PTE_LEVEL_PAGE_SIZE(level) \ 323 (1ULL << (12 + (9 * (level)))) 324 325 /* 326 * Bit value definition for I/O PTE fields 327 */ 328 #define IOMMU_PTE_PR (1ULL << 0) 329 #define IOMMU_PTE_U (1ULL << 59) 330 #define IOMMU_PTE_FC (1ULL << 60) 331 #define IOMMU_PTE_IR (1ULL << 61) 332 #define IOMMU_PTE_IW (1ULL << 62) 333 334 /* 335 * Bit value definition for DTE fields 336 */ 337 #define DTE_FLAG_V (1ULL << 0) 338 #define DTE_FLAG_TV (1ULL << 1) 339 #define DTE_FLAG_IR (1ULL << 61) 340 #define DTE_FLAG_IW (1ULL << 62) 341 342 #define DTE_FLAG_IOTLB (1ULL << 32) 343 #define DTE_FLAG_GV (1ULL << 55) 344 #define DTE_FLAG_MASK (0x3ffULL << 32) 345 #define DTE_GLX_SHIFT (56) 346 #define DTE_GLX_MASK (3) 347 #define DEV_DOMID_MASK 0xffffULL 348 349 #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) 350 #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) 351 #define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) 352 353 #define DTE_GCR3_INDEX_A 0 354 #define DTE_GCR3_INDEX_B 1 355 #define DTE_GCR3_INDEX_C 1 356 357 #define DTE_GCR3_SHIFT_A 58 358 #define DTE_GCR3_SHIFT_B 16 359 #define DTE_GCR3_SHIFT_C 43 360 361 #define GCR3_VALID 0x01ULL 362 363 #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) 364 #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) 365 #define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK)) 366 #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) 367 368 #define IOMMU_PROT_MASK 0x03 369 #define IOMMU_PROT_IR 0x01 370 #define IOMMU_PROT_IW 0x02 371 372 #define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2) 373 374 /* IOMMU capabilities */ 375 #define IOMMU_CAP_IOTLB 24 376 #define IOMMU_CAP_NPCACHE 26 377 #define IOMMU_CAP_EFR 27 378 379 /* IOMMU Feature Reporting Field (for IVHD type 10h */ 380 #define IOMMU_FEAT_GASUP_SHIFT 6 381 382 /* IOMMU Extended Feature Register (EFR) */ 383 #define IOMMU_EFR_XTSUP_SHIFT 2 384 #define IOMMU_EFR_GASUP_SHIFT 7 385 #define IOMMU_EFR_MSICAPMMIOSUP_SHIFT 46 386 387 #define MAX_DOMAIN_ID 65536 388 389 /* Protection domain flags */ 390 #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ 391 #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops 392 domain for an IOMMU */ 393 #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page 394 translation */ 395 #define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */ 396 397 extern bool amd_iommu_dump; 398 #define DUMP_printk(format, arg...) \ 399 do { \ 400 if (amd_iommu_dump) \ 401 pr_info("AMD-Vi: " format, ## arg); \ 402 } while(0); 403 404 /* global flag if IOMMUs cache non-present entries */ 405 extern bool amd_iommu_np_cache; 406 /* Only true if all IOMMUs support device IOTLBs */ 407 extern bool amd_iommu_iotlb_sup; 408 409 #define MAX_IRQS_PER_TABLE 256 410 #define IRQ_TABLE_ALIGNMENT 128 411 412 struct irq_remap_table { 413 raw_spinlock_t lock; 414 unsigned min_index; 415 u32 *table; 416 }; 417 418 extern struct irq_remap_table **irq_lookup_table; 419 420 /* Interrupt remapping feature used? */ 421 extern bool amd_iommu_irq_remap; 422 423 /* kmem_cache to get tables with 128 byte alignement */ 424 extern struct kmem_cache *amd_iommu_irq_cache; 425 426 /* 427 * Make iterating over all IOMMUs easier 428 */ 429 #define for_each_iommu(iommu) \ 430 list_for_each_entry((iommu), &amd_iommu_list, list) 431 #define for_each_iommu_safe(iommu, next) \ 432 list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) 433 434 #define APERTURE_RANGE_SHIFT 27 /* 128 MB */ 435 #define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) 436 #define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) 437 #define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ 438 #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) 439 #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) 440 441 /* 442 * This struct is used to pass information about 443 * incoming PPR faults around. 444 */ 445 struct amd_iommu_fault { 446 u64 address; /* IO virtual address of the fault*/ 447 u32 pasid; /* Address space identifier */ 448 u16 device_id; /* Originating PCI device id */ 449 u16 tag; /* PPR tag */ 450 u16 flags; /* Fault flags */ 451 452 }; 453 454 455 struct iommu_domain; 456 struct irq_domain; 457 struct amd_irte_ops; 458 459 #define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0) 460 461 /* 462 * This structure contains generic data for IOMMU protection domains 463 * independent of their use. 464 */ 465 struct protection_domain { 466 struct list_head dev_list; /* List of all devices in this domain */ 467 struct iommu_domain domain; /* generic domain handle used by 468 iommu core code */ 469 spinlock_t lock; /* mostly used to lock the page table*/ 470 u16 id; /* the domain id written to the device table */ 471 atomic64_t pt_root; /* pgtable root and pgtable mode */ 472 int glx; /* Number of levels for GCR3 table */ 473 u64 *gcr3_tbl; /* Guest CR3 table */ 474 unsigned long flags; /* flags to find out type of domain */ 475 unsigned dev_cnt; /* devices assigned to this domain */ 476 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ 477 }; 478 479 /* For decocded pt_root */ 480 struct domain_pgtable { 481 int mode; 482 u64 *root; 483 }; 484 485 /* 486 * Structure where we save information about one hardware AMD IOMMU in the 487 * system. 488 */ 489 struct amd_iommu { 490 struct list_head list; 491 492 /* Index within the IOMMU array */ 493 int index; 494 495 /* locks the accesses to the hardware */ 496 raw_spinlock_t lock; 497 498 /* Pointer to PCI device of this IOMMU */ 499 struct pci_dev *dev; 500 501 /* Cache pdev to root device for resume quirks */ 502 struct pci_dev *root_pdev; 503 504 /* physical address of MMIO space */ 505 u64 mmio_phys; 506 507 /* physical end address of MMIO space */ 508 u64 mmio_phys_end; 509 510 /* virtual address of MMIO space */ 511 u8 __iomem *mmio_base; 512 513 /* capabilities of that IOMMU read from ACPI */ 514 u32 cap; 515 516 /* flags read from acpi table */ 517 u8 acpi_flags; 518 519 /* Extended features */ 520 u64 features; 521 522 /* IOMMUv2 */ 523 bool is_iommu_v2; 524 525 /* PCI device id of the IOMMU device */ 526 u16 devid; 527 528 /* 529 * Capability pointer. There could be more than one IOMMU per PCI 530 * device function if there are more than one AMD IOMMU capability 531 * pointers. 532 */ 533 u16 cap_ptr; 534 535 /* pci domain of this IOMMU */ 536 u16 pci_seg; 537 538 /* start of exclusion range of that IOMMU */ 539 u64 exclusion_start; 540 /* length of exclusion range of that IOMMU */ 541 u64 exclusion_length; 542 543 /* command buffer virtual address */ 544 u8 *cmd_buf; 545 u32 cmd_buf_head; 546 u32 cmd_buf_tail; 547 548 /* event buffer virtual address */ 549 u8 *evt_buf; 550 551 /* Base of the PPR log, if present */ 552 u8 *ppr_log; 553 554 /* Base of the GA log, if present */ 555 u8 *ga_log; 556 557 /* Tail of the GA log, if present */ 558 u8 *ga_log_tail; 559 560 /* true if interrupts for this IOMMU are already enabled */ 561 bool int_enabled; 562 563 /* if one, we need to send a completion wait command */ 564 bool need_sync; 565 566 /* Handle for IOMMU core code */ 567 struct iommu_device iommu; 568 569 /* 570 * We can't rely on the BIOS to restore all values on reinit, so we 571 * need to stash them 572 */ 573 574 /* The iommu BAR */ 575 u32 stored_addr_lo; 576 u32 stored_addr_hi; 577 578 /* 579 * Each iommu has 6 l1s, each of which is documented as having 0x12 580 * registers 581 */ 582 u32 stored_l1[6][0x12]; 583 584 /* The l2 indirect registers */ 585 u32 stored_l2[0x83]; 586 587 /* The maximum PC banks and counters/bank (PCSup=1) */ 588 u8 max_banks; 589 u8 max_counters; 590 #ifdef CONFIG_IRQ_REMAP 591 struct irq_domain *ir_domain; 592 struct irq_domain *msi_domain; 593 594 struct amd_irte_ops *irte_ops; 595 #endif 596 597 u32 flags; 598 volatile u64 __aligned(8) cmd_sem; 599 600 #ifdef CONFIG_AMD_IOMMU_DEBUGFS 601 /* DebugFS Info */ 602 struct dentry *debugfs; 603 #endif 604 /* IRQ notifier for IntCapXT interrupt */ 605 struct irq_affinity_notify intcapxt_notify; 606 }; 607 608 static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) 609 { 610 struct iommu_device *iommu = dev_to_iommu_device(dev); 611 612 return container_of(iommu, struct amd_iommu, iommu); 613 } 614 615 #define ACPIHID_UID_LEN 256 616 #define ACPIHID_HID_LEN 9 617 618 struct acpihid_map_entry { 619 struct list_head list; 620 u8 uid[ACPIHID_UID_LEN]; 621 u8 hid[ACPIHID_HID_LEN]; 622 u16 devid; 623 u16 root_devid; 624 bool cmd_line; 625 struct iommu_group *group; 626 }; 627 628 struct devid_map { 629 struct list_head list; 630 u8 id; 631 u16 devid; 632 bool cmd_line; 633 }; 634 635 /* 636 * This struct contains device specific data for the IOMMU 637 */ 638 struct iommu_dev_data { 639 /*Protect against attach/detach races */ 640 spinlock_t lock; 641 642 struct list_head list; /* For domain->dev_list */ 643 struct llist_node dev_data_list; /* For global dev_data_list */ 644 struct protection_domain *domain; /* Domain the device is bound to */ 645 struct pci_dev *pdev; 646 u16 devid; /* PCI Device ID */ 647 bool iommu_v2; /* Device can make use of IOMMUv2 */ 648 struct { 649 bool enabled; 650 int qdep; 651 } ats; /* ATS state */ 652 bool pri_tlp; /* PASID TLB required for 653 PPR completions */ 654 u32 errata; /* Bitmap for errata to apply */ 655 bool use_vapic; /* Enable device to use vapic mode */ 656 bool defer_attach; 657 658 struct ratelimit_state rs; /* Ratelimit IOPF messages */ 659 }; 660 661 /* Map HPET and IOAPIC ids to the devid used by the IOMMU */ 662 extern struct list_head ioapic_map; 663 extern struct list_head hpet_map; 664 extern struct list_head acpihid_map; 665 666 /* 667 * List with all IOMMUs in the system. This list is not locked because it is 668 * only written and read at driver initialization or suspend time 669 */ 670 extern struct list_head amd_iommu_list; 671 672 /* 673 * Array with pointers to each IOMMU struct 674 * The indices are referenced in the protection domains 675 */ 676 extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; 677 678 /* 679 * Structure defining one entry in the device table 680 */ 681 struct dev_table_entry { 682 u64 data[4]; 683 }; 684 685 /* 686 * One entry for unity mappings parsed out of the ACPI table. 687 */ 688 struct unity_map_entry { 689 struct list_head list; 690 691 /* starting device id this entry is used for (including) */ 692 u16 devid_start; 693 /* end device id this entry is used for (including) */ 694 u16 devid_end; 695 696 /* start address to unity map (including) */ 697 u64 address_start; 698 /* end address to unity map (including) */ 699 u64 address_end; 700 701 /* required protection */ 702 int prot; 703 }; 704 705 /* 706 * List of all unity mappings. It is not locked because as runtime it is only 707 * read. It is created at ACPI table parsing time. 708 */ 709 extern struct list_head amd_iommu_unity_map; 710 711 /* 712 * Data structures for device handling 713 */ 714 715 /* 716 * Device table used by hardware. Read and write accesses by software are 717 * locked with the amd_iommu_pd_table lock. 718 */ 719 extern struct dev_table_entry *amd_iommu_dev_table; 720 721 /* 722 * Alias table to find requestor ids to device ids. Not locked because only 723 * read on runtime. 724 */ 725 extern u16 *amd_iommu_alias_table; 726 727 /* 728 * Reverse lookup table to find the IOMMU which translates a specific device. 729 */ 730 extern struct amd_iommu **amd_iommu_rlookup_table; 731 732 /* size of the dma_ops aperture as power of 2 */ 733 extern unsigned amd_iommu_aperture_order; 734 735 /* largest PCI device id we expect translation requests for */ 736 extern u16 amd_iommu_last_bdf; 737 738 /* allocation bitmap for domain ids */ 739 extern unsigned long *amd_iommu_pd_alloc_bitmap; 740 741 /* 742 * If true, the addresses will be flushed on unmap time, not when 743 * they are reused 744 */ 745 extern bool amd_iommu_unmap_flush; 746 747 /* Smallest max PASID supported by any IOMMU in the system */ 748 extern u32 amd_iommu_max_pasid; 749 750 extern bool amd_iommu_v2_present; 751 752 extern bool amd_iommu_force_isolation; 753 754 /* Max levels of glxval supported */ 755 extern int amd_iommu_max_glx_val; 756 757 /* 758 * This function flushes all internal caches of 759 * the IOMMU used by this driver. 760 */ 761 extern void iommu_flush_all_caches(struct amd_iommu *iommu); 762 763 static inline int get_ioapic_devid(int id) 764 { 765 struct devid_map *entry; 766 767 list_for_each_entry(entry, &ioapic_map, list) { 768 if (entry->id == id) 769 return entry->devid; 770 } 771 772 return -EINVAL; 773 } 774 775 static inline int get_hpet_devid(int id) 776 { 777 struct devid_map *entry; 778 779 list_for_each_entry(entry, &hpet_map, list) { 780 if (entry->id == id) 781 return entry->devid; 782 } 783 784 return -EINVAL; 785 } 786 787 enum amd_iommu_intr_mode_type { 788 AMD_IOMMU_GUEST_IR_LEGACY, 789 790 /* This mode is not visible to users. It is used when 791 * we cannot fully enable vAPIC and fallback to only support 792 * legacy interrupt remapping via 128-bit IRTE. 793 */ 794 AMD_IOMMU_GUEST_IR_LEGACY_GA, 795 AMD_IOMMU_GUEST_IR_VAPIC, 796 }; 797 798 #define AMD_IOMMU_GUEST_IR_GA(x) (x == AMD_IOMMU_GUEST_IR_VAPIC || \ 799 x == AMD_IOMMU_GUEST_IR_LEGACY_GA) 800 801 #define AMD_IOMMU_GUEST_IR_VAPIC(x) (x == AMD_IOMMU_GUEST_IR_VAPIC) 802 803 union irte { 804 u32 val; 805 struct { 806 u32 valid : 1, 807 no_fault : 1, 808 int_type : 3, 809 rq_eoi : 1, 810 dm : 1, 811 rsvd_1 : 1, 812 destination : 8, 813 vector : 8, 814 rsvd_2 : 8; 815 } fields; 816 }; 817 818 #define APICID_TO_IRTE_DEST_LO(x) (x & 0xffffff) 819 #define APICID_TO_IRTE_DEST_HI(x) ((x >> 24) & 0xff) 820 821 union irte_ga_lo { 822 u64 val; 823 824 /* For int remapping */ 825 struct { 826 u64 valid : 1, 827 no_fault : 1, 828 /* ------ */ 829 int_type : 3, 830 rq_eoi : 1, 831 dm : 1, 832 /* ------ */ 833 guest_mode : 1, 834 destination : 24, 835 ga_tag : 32; 836 } fields_remap; 837 838 /* For guest vAPIC */ 839 struct { 840 u64 valid : 1, 841 no_fault : 1, 842 /* ------ */ 843 ga_log_intr : 1, 844 rsvd1 : 3, 845 is_run : 1, 846 /* ------ */ 847 guest_mode : 1, 848 destination : 24, 849 ga_tag : 32; 850 } fields_vapic; 851 }; 852 853 union irte_ga_hi { 854 u64 val; 855 struct { 856 u64 vector : 8, 857 rsvd_1 : 4, 858 ga_root_ptr : 40, 859 rsvd_2 : 4, 860 destination : 8; 861 } fields; 862 }; 863 864 struct irte_ga { 865 union irte_ga_lo lo; 866 union irte_ga_hi hi; 867 }; 868 869 struct irq_2_irte { 870 u16 devid; /* Device ID for IRTE table */ 871 u16 index; /* Index into IRTE table*/ 872 }; 873 874 struct amd_ir_data { 875 u32 cached_ga_tag; 876 struct irq_2_irte irq_2_irte; 877 struct msi_msg msi_entry; 878 void *entry; /* Pointer to union irte or struct irte_ga */ 879 void *ref; /* Pointer to the actual irte */ 880 881 /** 882 * Store information for activate/de-activate 883 * Guest virtual APIC mode during runtime. 884 */ 885 struct irq_cfg *cfg; 886 int ga_vector; 887 int ga_root_ptr; 888 int ga_tag; 889 }; 890 891 struct amd_irte_ops { 892 void (*prepare)(void *, u32, u32, u8, u32, int); 893 void (*activate)(void *, u16, u16); 894 void (*deactivate)(void *, u16, u16); 895 void (*set_affinity)(void *, u16, u16, u8, u32); 896 void *(*get)(struct irq_remap_table *, int); 897 void (*set_allocated)(struct irq_remap_table *, int); 898 bool (*is_allocated)(struct irq_remap_table *, int); 899 void (*clear_allocated)(struct irq_remap_table *, int); 900 }; 901 902 #ifdef CONFIG_IRQ_REMAP 903 extern struct amd_irte_ops irte_32_ops; 904 extern struct amd_irte_ops irte_128_ops; 905 #endif 906 907 #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 908