1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #ifndef _ASM_X86_AMD_IOMMU_TYPES_H 9 #define _ASM_X86_AMD_IOMMU_TYPES_H 10 11 #include <linux/types.h> 12 #include <linux/mutex.h> 13 #include <linux/msi.h> 14 #include <linux/list.h> 15 #include <linux/spinlock.h> 16 #include <linux/pci.h> 17 #include <linux/irqreturn.h> 18 19 /* 20 * Maximum number of IOMMUs supported 21 */ 22 #define MAX_IOMMUS 32 23 24 /* 25 * some size calculation constants 26 */ 27 #define DEV_TABLE_ENTRY_SIZE 32 28 #define ALIAS_TABLE_ENTRY_SIZE 2 29 #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) 30 31 /* Capability offsets used by the driver */ 32 #define MMIO_CAP_HDR_OFFSET 0x00 33 #define MMIO_RANGE_OFFSET 0x0c 34 #define MMIO_MISC_OFFSET 0x10 35 36 /* Masks, shifts and macros to parse the device range capability */ 37 #define MMIO_RANGE_LD_MASK 0xff000000 38 #define MMIO_RANGE_FD_MASK 0x00ff0000 39 #define MMIO_RANGE_BUS_MASK 0x0000ff00 40 #define MMIO_RANGE_LD_SHIFT 24 41 #define MMIO_RANGE_FD_SHIFT 16 42 #define MMIO_RANGE_BUS_SHIFT 8 43 #define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) 44 #define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) 45 #define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) 46 #define MMIO_MSI_NUM(x) ((x) & 0x1f) 47 48 /* Flag masks for the AMD IOMMU exclusion range */ 49 #define MMIO_EXCL_ENABLE_MASK 0x01ULL 50 #define MMIO_EXCL_ALLOW_MASK 0x02ULL 51 52 /* Used offsets into the MMIO space */ 53 #define MMIO_DEV_TABLE_OFFSET 0x0000 54 #define MMIO_CMD_BUF_OFFSET 0x0008 55 #define MMIO_EVT_BUF_OFFSET 0x0010 56 #define MMIO_CONTROL_OFFSET 0x0018 57 #define MMIO_EXCL_BASE_OFFSET 0x0020 58 #define MMIO_EXCL_LIMIT_OFFSET 0x0028 59 #define MMIO_EXT_FEATURES 0x0030 60 #define MMIO_PPR_LOG_OFFSET 0x0038 61 #define MMIO_GA_LOG_BASE_OFFSET 0x00e0 62 #define MMIO_GA_LOG_TAIL_OFFSET 0x00e8 63 #define MMIO_MSI_ADDR_LO_OFFSET 0x015C 64 #define MMIO_MSI_ADDR_HI_OFFSET 0x0160 65 #define MMIO_MSI_DATA_OFFSET 0x0164 66 #define MMIO_INTCAPXT_EVT_OFFSET 0x0170 67 #define MMIO_INTCAPXT_PPR_OFFSET 0x0178 68 #define MMIO_INTCAPXT_GALOG_OFFSET 0x0180 69 #define MMIO_CMD_HEAD_OFFSET 0x2000 70 #define MMIO_CMD_TAIL_OFFSET 0x2008 71 #define MMIO_EVT_HEAD_OFFSET 0x2010 72 #define MMIO_EVT_TAIL_OFFSET 0x2018 73 #define MMIO_STATUS_OFFSET 0x2020 74 #define MMIO_PPR_HEAD_OFFSET 0x2030 75 #define MMIO_PPR_TAIL_OFFSET 0x2038 76 #define MMIO_GA_HEAD_OFFSET 0x2040 77 #define MMIO_GA_TAIL_OFFSET 0x2048 78 #define MMIO_CNTR_CONF_OFFSET 0x4000 79 #define MMIO_CNTR_REG_OFFSET 0x40000 80 #define MMIO_REG_END_OFFSET 0x80000 81 82 83 84 /* Extended Feature Bits */ 85 #define FEATURE_PREFETCH (1ULL<<0) 86 #define FEATURE_PPR (1ULL<<1) 87 #define FEATURE_X2APIC (1ULL<<2) 88 #define FEATURE_NX (1ULL<<3) 89 #define FEATURE_GT (1ULL<<4) 90 #define FEATURE_IA (1ULL<<6) 91 #define FEATURE_GA (1ULL<<7) 92 #define FEATURE_HE (1ULL<<8) 93 #define FEATURE_PC (1ULL<<9) 94 #define FEATURE_GAM_VAPIC (1ULL<<21) 95 #define FEATURE_EPHSUP (1ULL<<50) 96 #define FEATURE_SNP (1ULL<<63) 97 98 #define FEATURE_PASID_SHIFT 32 99 #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) 100 101 #define FEATURE_GLXVAL_SHIFT 14 102 #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) 103 104 /* Note: 105 * The current driver only support 16-bit PASID. 106 * Currently, hardware only implement upto 16-bit PASID 107 * even though the spec says it could have upto 20 bits. 108 */ 109 #define PASID_MASK 0x0000ffff 110 111 /* MMIO status bits */ 112 #define MMIO_STATUS_EVT_INT_MASK (1 << 1) 113 #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) 114 #define MMIO_STATUS_PPR_INT_MASK (1 << 6) 115 #define MMIO_STATUS_GALOG_RUN_MASK (1 << 8) 116 #define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9) 117 #define MMIO_STATUS_GALOG_INT_MASK (1 << 10) 118 119 /* event logging constants */ 120 #define EVENT_ENTRY_SIZE 0x10 121 #define EVENT_TYPE_SHIFT 28 122 #define EVENT_TYPE_MASK 0xf 123 #define EVENT_TYPE_ILL_DEV 0x1 124 #define EVENT_TYPE_IO_FAULT 0x2 125 #define EVENT_TYPE_DEV_TAB_ERR 0x3 126 #define EVENT_TYPE_PAGE_TAB_ERR 0x4 127 #define EVENT_TYPE_ILL_CMD 0x5 128 #define EVENT_TYPE_CMD_HARD_ERR 0x6 129 #define EVENT_TYPE_IOTLB_INV_TO 0x7 130 #define EVENT_TYPE_INV_DEV_REQ 0x8 131 #define EVENT_TYPE_INV_PPR_REQ 0x9 132 #define EVENT_TYPE_RMP_FAULT 0xd 133 #define EVENT_TYPE_RMP_HW_ERR 0xe 134 #define EVENT_DEVID_MASK 0xffff 135 #define EVENT_DEVID_SHIFT 0 136 #define EVENT_DOMID_MASK_LO 0xffff 137 #define EVENT_DOMID_MASK_HI 0xf0000 138 #define EVENT_FLAGS_MASK 0xfff 139 #define EVENT_FLAGS_SHIFT 0x10 140 141 /* feature control bits */ 142 #define CONTROL_IOMMU_EN 0x00ULL 143 #define CONTROL_HT_TUN_EN 0x01ULL 144 #define CONTROL_EVT_LOG_EN 0x02ULL 145 #define CONTROL_EVT_INT_EN 0x03ULL 146 #define CONTROL_COMWAIT_EN 0x04ULL 147 #define CONTROL_INV_TIMEOUT 0x05ULL 148 #define CONTROL_PASSPW_EN 0x08ULL 149 #define CONTROL_RESPASSPW_EN 0x09ULL 150 #define CONTROL_COHERENT_EN 0x0aULL 151 #define CONTROL_ISOC_EN 0x0bULL 152 #define CONTROL_CMDBUF_EN 0x0cULL 153 #define CONTROL_PPRLOG_EN 0x0dULL 154 #define CONTROL_PPRINT_EN 0x0eULL 155 #define CONTROL_PPR_EN 0x0fULL 156 #define CONTROL_GT_EN 0x10ULL 157 #define CONTROL_GA_EN 0x11ULL 158 #define CONTROL_GAM_EN 0x19ULL 159 #define CONTROL_GALOG_EN 0x1CULL 160 #define CONTROL_GAINT_EN 0x1DULL 161 #define CONTROL_XT_EN 0x32ULL 162 #define CONTROL_INTCAPXT_EN 0x33ULL 163 164 #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) 165 #define CTRL_INV_TO_NONE 0 166 #define CTRL_INV_TO_1MS 1 167 #define CTRL_INV_TO_10MS 2 168 #define CTRL_INV_TO_100MS 3 169 #define CTRL_INV_TO_1S 4 170 #define CTRL_INV_TO_10S 5 171 #define CTRL_INV_TO_100S 6 172 173 /* command specific defines */ 174 #define CMD_COMPL_WAIT 0x01 175 #define CMD_INV_DEV_ENTRY 0x02 176 #define CMD_INV_IOMMU_PAGES 0x03 177 #define CMD_INV_IOTLB_PAGES 0x04 178 #define CMD_INV_IRT 0x05 179 #define CMD_COMPLETE_PPR 0x07 180 #define CMD_INV_ALL 0x08 181 182 #define CMD_COMPL_WAIT_STORE_MASK 0x01 183 #define CMD_COMPL_WAIT_INT_MASK 0x02 184 #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 185 #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 186 #define CMD_INV_IOMMU_PAGES_GN_MASK 0x04 187 188 #define PPR_STATUS_MASK 0xf 189 #define PPR_STATUS_SHIFT 12 190 191 #define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL 192 193 /* macros and definitions for device table entries */ 194 #define DEV_ENTRY_VALID 0x00 195 #define DEV_ENTRY_TRANSLATION 0x01 196 #define DEV_ENTRY_PPR 0x34 197 #define DEV_ENTRY_IR 0x3d 198 #define DEV_ENTRY_IW 0x3e 199 #define DEV_ENTRY_NO_PAGE_FAULT 0x62 200 #define DEV_ENTRY_EX 0x67 201 #define DEV_ENTRY_SYSMGT1 0x68 202 #define DEV_ENTRY_SYSMGT2 0x69 203 #define DEV_ENTRY_IRQ_TBL_EN 0x80 204 #define DEV_ENTRY_INIT_PASS 0xb8 205 #define DEV_ENTRY_EINT_PASS 0xb9 206 #define DEV_ENTRY_NMI_PASS 0xba 207 #define DEV_ENTRY_LINT0_PASS 0xbe 208 #define DEV_ENTRY_LINT1_PASS 0xbf 209 #define DEV_ENTRY_MODE_MASK 0x07 210 #define DEV_ENTRY_MODE_SHIFT 0x09 211 212 #define MAX_DEV_TABLE_ENTRIES 0xffff 213 214 /* constants to configure the command buffer */ 215 #define CMD_BUFFER_SIZE 8192 216 #define CMD_BUFFER_UNINITIALIZED 1 217 #define CMD_BUFFER_ENTRIES 512 218 #define MMIO_CMD_SIZE_SHIFT 56 219 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) 220 221 /* constants for event buffer handling */ 222 #define EVT_BUFFER_SIZE 8192 /* 512 entries */ 223 #define EVT_LEN_MASK (0x9ULL << 56) 224 225 /* Constants for PPR Log handling */ 226 #define PPR_LOG_ENTRIES 512 227 #define PPR_LOG_SIZE_SHIFT 56 228 #define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT) 229 #define PPR_ENTRY_SIZE 16 230 #define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES) 231 232 #define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL) 233 #define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL) 234 #define PPR_DEVID(x) ((x) & 0xffffULL) 235 #define PPR_TAG(x) (((x) >> 32) & 0x3ffULL) 236 #define PPR_PASID1(x) (((x) >> 16) & 0xffffULL) 237 #define PPR_PASID2(x) (((x) >> 42) & 0xfULL) 238 #define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x)) 239 240 #define PPR_REQ_FAULT 0x01 241 242 /* Constants for GA Log handling */ 243 #define GA_LOG_ENTRIES 512 244 #define GA_LOG_SIZE_SHIFT 56 245 #define GA_LOG_SIZE_512 (0x8ULL << GA_LOG_SIZE_SHIFT) 246 #define GA_ENTRY_SIZE 8 247 #define GA_LOG_SIZE (GA_ENTRY_SIZE * GA_LOG_ENTRIES) 248 249 #define GA_TAG(x) (u32)(x & 0xffffffffULL) 250 #define GA_DEVID(x) (u16)(((x) >> 32) & 0xffffULL) 251 #define GA_REQ_TYPE(x) (((x) >> 60) & 0xfULL) 252 253 #define GA_GUEST_NR 0x1 254 255 /* Bit value definition for dte irq remapping fields*/ 256 #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) 257 #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) 258 #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) 259 #define DTE_IRQ_REMAP_ENABLE 1ULL 260 261 /* 262 * AMD IOMMU hardware only support 512 IRTEs despite 263 * the architectural limitation of 2048 entries. 264 */ 265 #define DTE_INTTAB_ALIGNMENT 128 266 #define DTE_INTTABLEN_VALUE 9ULL 267 #define DTE_INTTABLEN (DTE_INTTABLEN_VALUE << 1) 268 #define DTE_INTTABLEN_MASK (0xfULL << 1) 269 #define MAX_IRQS_PER_TABLE (1 << DTE_INTTABLEN_VALUE) 270 271 #define PAGE_MODE_NONE 0x00 272 #define PAGE_MODE_1_LEVEL 0x01 273 #define PAGE_MODE_2_LEVEL 0x02 274 #define PAGE_MODE_3_LEVEL 0x03 275 #define PAGE_MODE_4_LEVEL 0x04 276 #define PAGE_MODE_5_LEVEL 0x05 277 #define PAGE_MODE_6_LEVEL 0x06 278 #define PAGE_MODE_7_LEVEL 0x07 279 280 #define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) 281 #define PM_LEVEL_SIZE(x) (((x) < 6) ? \ 282 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ 283 (0xffffffffffffffffULL)) 284 #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) 285 #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) 286 #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ 287 IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW) 288 #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) 289 290 #define PM_MAP_4k 0 291 #define PM_ADDR_MASK 0x000ffffffffff000ULL 292 #define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ 293 (~((1ULL << (12 + ((lvl) * 9))) - 1))) 294 #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) 295 296 /* 297 * Returns the page table level to use for a given page size 298 * Pagesize is expected to be a power-of-two 299 */ 300 #define PAGE_SIZE_LEVEL(pagesize) \ 301 ((__ffs(pagesize) - 12) / 9) 302 /* 303 * Returns the number of ptes to use for a given page size 304 * Pagesize is expected to be a power-of-two 305 */ 306 #define PAGE_SIZE_PTE_COUNT(pagesize) \ 307 (1ULL << ((__ffs(pagesize) - 12) % 9)) 308 309 /* 310 * Aligns a given io-virtual address to a given page size 311 * Pagesize is expected to be a power-of-two 312 */ 313 #define PAGE_SIZE_ALIGN(address, pagesize) \ 314 ((address) & ~((pagesize) - 1)) 315 /* 316 * Creates an IOMMU PTE for an address and a given pagesize 317 * The PTE has no permission bits set 318 * Pagesize is expected to be a power-of-two larger than 4096 319 */ 320 #define PAGE_SIZE_PTE(address, pagesize) \ 321 (((address) | ((pagesize) - 1)) & \ 322 (~(pagesize >> 1)) & PM_ADDR_MASK) 323 324 /* 325 * Takes a PTE value with mode=0x07 and returns the page size it maps 326 */ 327 #define PTE_PAGE_SIZE(pte) \ 328 (1ULL << (1 + ffz(((pte) | 0xfffULL)))) 329 330 /* 331 * Takes a page-table level and returns the default page-size for this level 332 */ 333 #define PTE_LEVEL_PAGE_SIZE(level) \ 334 (1ULL << (12 + (9 * (level)))) 335 336 /* 337 * Bit value definition for I/O PTE fields 338 */ 339 #define IOMMU_PTE_PR (1ULL << 0) 340 #define IOMMU_PTE_U (1ULL << 59) 341 #define IOMMU_PTE_FC (1ULL << 60) 342 #define IOMMU_PTE_IR (1ULL << 61) 343 #define IOMMU_PTE_IW (1ULL << 62) 344 345 /* 346 * Bit value definition for DTE fields 347 */ 348 #define DTE_FLAG_V (1ULL << 0) 349 #define DTE_FLAG_TV (1ULL << 1) 350 #define DTE_FLAG_IR (1ULL << 61) 351 #define DTE_FLAG_IW (1ULL << 62) 352 353 #define DTE_FLAG_IOTLB (1ULL << 32) 354 #define DTE_FLAG_GV (1ULL << 55) 355 #define DTE_FLAG_MASK (0x3ffULL << 32) 356 #define DTE_GLX_SHIFT (56) 357 #define DTE_GLX_MASK (3) 358 #define DEV_DOMID_MASK 0xffffULL 359 360 #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) 361 #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) 362 #define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) 363 364 #define DTE_GCR3_INDEX_A 0 365 #define DTE_GCR3_INDEX_B 1 366 #define DTE_GCR3_INDEX_C 1 367 368 #define DTE_GCR3_SHIFT_A 58 369 #define DTE_GCR3_SHIFT_B 16 370 #define DTE_GCR3_SHIFT_C 43 371 372 #define GCR3_VALID 0x01ULL 373 374 #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) 375 #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) 376 #define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK)) 377 #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) 378 379 #define IOMMU_PROT_MASK 0x03 380 #define IOMMU_PROT_IR 0x01 381 #define IOMMU_PROT_IW 0x02 382 383 #define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2) 384 385 /* IOMMU capabilities */ 386 #define IOMMU_CAP_IOTLB 24 387 #define IOMMU_CAP_NPCACHE 26 388 #define IOMMU_CAP_EFR 27 389 390 /* IOMMU IVINFO */ 391 #define IOMMU_IVINFO_OFFSET 36 392 #define IOMMU_IVINFO_EFRSUP BIT(0) 393 394 /* IOMMU Feature Reporting Field (for IVHD type 10h */ 395 #define IOMMU_FEAT_GASUP_SHIFT 6 396 397 /* IOMMU Extended Feature Register (EFR) */ 398 #define IOMMU_EFR_XTSUP_SHIFT 2 399 #define IOMMU_EFR_GASUP_SHIFT 7 400 #define IOMMU_EFR_MSICAPMMIOSUP_SHIFT 46 401 402 #define MAX_DOMAIN_ID 65536 403 404 /* Protection domain flags */ 405 #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ 406 #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops 407 domain for an IOMMU */ 408 #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page 409 translation */ 410 #define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */ 411 412 extern bool amd_iommu_dump; 413 #define DUMP_printk(format, arg...) \ 414 do { \ 415 if (amd_iommu_dump) \ 416 pr_info("AMD-Vi: " format, ## arg); \ 417 } while(0); 418 419 /* global flag if IOMMUs cache non-present entries */ 420 extern bool amd_iommu_np_cache; 421 /* Only true if all IOMMUs support device IOTLBs */ 422 extern bool amd_iommu_iotlb_sup; 423 424 struct irq_remap_table { 425 raw_spinlock_t lock; 426 unsigned min_index; 427 u32 *table; 428 }; 429 430 extern struct irq_remap_table **irq_lookup_table; 431 432 /* Interrupt remapping feature used? */ 433 extern bool amd_iommu_irq_remap; 434 435 /* kmem_cache to get tables with 128 byte alignement */ 436 extern struct kmem_cache *amd_iommu_irq_cache; 437 438 /* 439 * Make iterating over all IOMMUs easier 440 */ 441 #define for_each_iommu(iommu) \ 442 list_for_each_entry((iommu), &amd_iommu_list, list) 443 #define for_each_iommu_safe(iommu, next) \ 444 list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) 445 446 #define APERTURE_RANGE_SHIFT 27 /* 128 MB */ 447 #define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) 448 #define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) 449 #define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ 450 #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) 451 #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) 452 453 /* 454 * This struct is used to pass information about 455 * incoming PPR faults around. 456 */ 457 struct amd_iommu_fault { 458 u64 address; /* IO virtual address of the fault*/ 459 u32 pasid; /* Address space identifier */ 460 u16 device_id; /* Originating PCI device id */ 461 u16 tag; /* PPR tag */ 462 u16 flags; /* Fault flags */ 463 464 }; 465 466 467 struct iommu_domain; 468 struct irq_domain; 469 struct amd_irte_ops; 470 471 #define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0) 472 473 /* 474 * This structure contains generic data for IOMMU protection domains 475 * independent of their use. 476 */ 477 struct protection_domain { 478 struct list_head dev_list; /* List of all devices in this domain */ 479 struct iommu_domain domain; /* generic domain handle used by 480 iommu core code */ 481 spinlock_t lock; /* mostly used to lock the page table*/ 482 u16 id; /* the domain id written to the device table */ 483 atomic64_t pt_root; /* pgtable root and pgtable mode */ 484 int glx; /* Number of levels for GCR3 table */ 485 u64 *gcr3_tbl; /* Guest CR3 table */ 486 unsigned long flags; /* flags to find out type of domain */ 487 unsigned dev_cnt; /* devices assigned to this domain */ 488 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ 489 }; 490 491 /* For decocded pt_root */ 492 struct domain_pgtable { 493 int mode; 494 u64 *root; 495 }; 496 497 /* 498 * Structure where we save information about one hardware AMD IOMMU in the 499 * system. 500 */ 501 struct amd_iommu { 502 struct list_head list; 503 504 /* Index within the IOMMU array */ 505 int index; 506 507 /* locks the accesses to the hardware */ 508 raw_spinlock_t lock; 509 510 /* Pointer to PCI device of this IOMMU */ 511 struct pci_dev *dev; 512 513 /* Cache pdev to root device for resume quirks */ 514 struct pci_dev *root_pdev; 515 516 /* physical address of MMIO space */ 517 u64 mmio_phys; 518 519 /* physical end address of MMIO space */ 520 u64 mmio_phys_end; 521 522 /* virtual address of MMIO space */ 523 u8 __iomem *mmio_base; 524 525 /* capabilities of that IOMMU read from ACPI */ 526 u32 cap; 527 528 /* flags read from acpi table */ 529 u8 acpi_flags; 530 531 /* Extended features */ 532 u64 features; 533 534 /* IOMMUv2 */ 535 bool is_iommu_v2; 536 537 /* PCI device id of the IOMMU device */ 538 u16 devid; 539 540 /* 541 * Capability pointer. There could be more than one IOMMU per PCI 542 * device function if there are more than one AMD IOMMU capability 543 * pointers. 544 */ 545 u16 cap_ptr; 546 547 /* pci domain of this IOMMU */ 548 u16 pci_seg; 549 550 /* start of exclusion range of that IOMMU */ 551 u64 exclusion_start; 552 /* length of exclusion range of that IOMMU */ 553 u64 exclusion_length; 554 555 /* command buffer virtual address */ 556 u8 *cmd_buf; 557 u32 cmd_buf_head; 558 u32 cmd_buf_tail; 559 560 /* event buffer virtual address */ 561 u8 *evt_buf; 562 563 /* Base of the PPR log, if present */ 564 u8 *ppr_log; 565 566 /* Base of the GA log, if present */ 567 u8 *ga_log; 568 569 /* Tail of the GA log, if present */ 570 u8 *ga_log_tail; 571 572 /* true if interrupts for this IOMMU are already enabled */ 573 bool int_enabled; 574 575 /* if one, we need to send a completion wait command */ 576 bool need_sync; 577 578 /* Handle for IOMMU core code */ 579 struct iommu_device iommu; 580 581 /* 582 * We can't rely on the BIOS to restore all values on reinit, so we 583 * need to stash them 584 */ 585 586 /* The iommu BAR */ 587 u32 stored_addr_lo; 588 u32 stored_addr_hi; 589 590 /* 591 * Each iommu has 6 l1s, each of which is documented as having 0x12 592 * registers 593 */ 594 u32 stored_l1[6][0x12]; 595 596 /* The l2 indirect registers */ 597 u32 stored_l2[0x83]; 598 599 /* The maximum PC banks and counters/bank (PCSup=1) */ 600 u8 max_banks; 601 u8 max_counters; 602 #ifdef CONFIG_IRQ_REMAP 603 struct irq_domain *ir_domain; 604 struct irq_domain *msi_domain; 605 606 struct amd_irte_ops *irte_ops; 607 #endif 608 609 u32 flags; 610 volatile u64 *cmd_sem; 611 u64 cmd_sem_val; 612 613 #ifdef CONFIG_AMD_IOMMU_DEBUGFS 614 /* DebugFS Info */ 615 struct dentry *debugfs; 616 #endif 617 /* IRQ notifier for IntCapXT interrupt */ 618 struct irq_affinity_notify intcapxt_notify; 619 }; 620 621 static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) 622 { 623 struct iommu_device *iommu = dev_to_iommu_device(dev); 624 625 return container_of(iommu, struct amd_iommu, iommu); 626 } 627 628 #define ACPIHID_UID_LEN 256 629 #define ACPIHID_HID_LEN 9 630 631 struct acpihid_map_entry { 632 struct list_head list; 633 u8 uid[ACPIHID_UID_LEN]; 634 u8 hid[ACPIHID_HID_LEN]; 635 u16 devid; 636 u16 root_devid; 637 bool cmd_line; 638 struct iommu_group *group; 639 }; 640 641 struct devid_map { 642 struct list_head list; 643 u8 id; 644 u16 devid; 645 bool cmd_line; 646 }; 647 648 /* 649 * This struct contains device specific data for the IOMMU 650 */ 651 struct iommu_dev_data { 652 /*Protect against attach/detach races */ 653 spinlock_t lock; 654 655 struct list_head list; /* For domain->dev_list */ 656 struct llist_node dev_data_list; /* For global dev_data_list */ 657 struct protection_domain *domain; /* Domain the device is bound to */ 658 struct pci_dev *pdev; 659 u16 devid; /* PCI Device ID */ 660 bool iommu_v2; /* Device can make use of IOMMUv2 */ 661 struct { 662 bool enabled; 663 int qdep; 664 } ats; /* ATS state */ 665 bool pri_tlp; /* PASID TLB required for 666 PPR completions */ 667 u32 errata; /* Bitmap for errata to apply */ 668 bool use_vapic; /* Enable device to use vapic mode */ 669 bool defer_attach; 670 671 struct ratelimit_state rs; /* Ratelimit IOPF messages */ 672 }; 673 674 /* Map HPET and IOAPIC ids to the devid used by the IOMMU */ 675 extern struct list_head ioapic_map; 676 extern struct list_head hpet_map; 677 extern struct list_head acpihid_map; 678 679 /* 680 * List with all IOMMUs in the system. This list is not locked because it is 681 * only written and read at driver initialization or suspend time 682 */ 683 extern struct list_head amd_iommu_list; 684 685 /* 686 * Array with pointers to each IOMMU struct 687 * The indices are referenced in the protection domains 688 */ 689 extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; 690 691 /* 692 * Structure defining one entry in the device table 693 */ 694 struct dev_table_entry { 695 u64 data[4]; 696 }; 697 698 /* 699 * One entry for unity mappings parsed out of the ACPI table. 700 */ 701 struct unity_map_entry { 702 struct list_head list; 703 704 /* starting device id this entry is used for (including) */ 705 u16 devid_start; 706 /* end device id this entry is used for (including) */ 707 u16 devid_end; 708 709 /* start address to unity map (including) */ 710 u64 address_start; 711 /* end address to unity map (including) */ 712 u64 address_end; 713 714 /* required protection */ 715 int prot; 716 }; 717 718 /* 719 * List of all unity mappings. It is not locked because as runtime it is only 720 * read. It is created at ACPI table parsing time. 721 */ 722 extern struct list_head amd_iommu_unity_map; 723 724 /* 725 * Data structures for device handling 726 */ 727 728 /* 729 * Device table used by hardware. Read and write accesses by software are 730 * locked with the amd_iommu_pd_table lock. 731 */ 732 extern struct dev_table_entry *amd_iommu_dev_table; 733 734 /* 735 * Alias table to find requestor ids to device ids. Not locked because only 736 * read on runtime. 737 */ 738 extern u16 *amd_iommu_alias_table; 739 740 /* 741 * Reverse lookup table to find the IOMMU which translates a specific device. 742 */ 743 extern struct amd_iommu **amd_iommu_rlookup_table; 744 745 /* size of the dma_ops aperture as power of 2 */ 746 extern unsigned amd_iommu_aperture_order; 747 748 /* largest PCI device id we expect translation requests for */ 749 extern u16 amd_iommu_last_bdf; 750 751 /* allocation bitmap for domain ids */ 752 extern unsigned long *amd_iommu_pd_alloc_bitmap; 753 754 /* 755 * If true, the addresses will be flushed on unmap time, not when 756 * they are reused 757 */ 758 extern bool amd_iommu_unmap_flush; 759 760 /* Smallest max PASID supported by any IOMMU in the system */ 761 extern u32 amd_iommu_max_pasid; 762 763 extern bool amd_iommu_v2_present; 764 765 extern bool amd_iommu_force_isolation; 766 767 /* Max levels of glxval supported */ 768 extern int amd_iommu_max_glx_val; 769 770 /* 771 * This function flushes all internal caches of 772 * the IOMMU used by this driver. 773 */ 774 extern void iommu_flush_all_caches(struct amd_iommu *iommu); 775 776 static inline int get_ioapic_devid(int id) 777 { 778 struct devid_map *entry; 779 780 list_for_each_entry(entry, &ioapic_map, list) { 781 if (entry->id == id) 782 return entry->devid; 783 } 784 785 return -EINVAL; 786 } 787 788 static inline int get_hpet_devid(int id) 789 { 790 struct devid_map *entry; 791 792 list_for_each_entry(entry, &hpet_map, list) { 793 if (entry->id == id) 794 return entry->devid; 795 } 796 797 return -EINVAL; 798 } 799 800 enum amd_iommu_intr_mode_type { 801 AMD_IOMMU_GUEST_IR_LEGACY, 802 803 /* This mode is not visible to users. It is used when 804 * we cannot fully enable vAPIC and fallback to only support 805 * legacy interrupt remapping via 128-bit IRTE. 806 */ 807 AMD_IOMMU_GUEST_IR_LEGACY_GA, 808 AMD_IOMMU_GUEST_IR_VAPIC, 809 }; 810 811 #define AMD_IOMMU_GUEST_IR_GA(x) (x == AMD_IOMMU_GUEST_IR_VAPIC || \ 812 x == AMD_IOMMU_GUEST_IR_LEGACY_GA) 813 814 #define AMD_IOMMU_GUEST_IR_VAPIC(x) (x == AMD_IOMMU_GUEST_IR_VAPIC) 815 816 union irte { 817 u32 val; 818 struct { 819 u32 valid : 1, 820 no_fault : 1, 821 int_type : 3, 822 rq_eoi : 1, 823 dm : 1, 824 rsvd_1 : 1, 825 destination : 8, 826 vector : 8, 827 rsvd_2 : 8; 828 } fields; 829 }; 830 831 #define APICID_TO_IRTE_DEST_LO(x) (x & 0xffffff) 832 #define APICID_TO_IRTE_DEST_HI(x) ((x >> 24) & 0xff) 833 834 union irte_ga_lo { 835 u64 val; 836 837 /* For int remapping */ 838 struct { 839 u64 valid : 1, 840 no_fault : 1, 841 /* ------ */ 842 int_type : 3, 843 rq_eoi : 1, 844 dm : 1, 845 /* ------ */ 846 guest_mode : 1, 847 destination : 24, 848 ga_tag : 32; 849 } fields_remap; 850 851 /* For guest vAPIC */ 852 struct { 853 u64 valid : 1, 854 no_fault : 1, 855 /* ------ */ 856 ga_log_intr : 1, 857 rsvd1 : 3, 858 is_run : 1, 859 /* ------ */ 860 guest_mode : 1, 861 destination : 24, 862 ga_tag : 32; 863 } fields_vapic; 864 }; 865 866 union irte_ga_hi { 867 u64 val; 868 struct { 869 u64 vector : 8, 870 rsvd_1 : 4, 871 ga_root_ptr : 40, 872 rsvd_2 : 4, 873 destination : 8; 874 } fields; 875 }; 876 877 struct irte_ga { 878 union irte_ga_lo lo; 879 union irte_ga_hi hi; 880 }; 881 882 struct irq_2_irte { 883 u16 devid; /* Device ID for IRTE table */ 884 u16 index; /* Index into IRTE table*/ 885 }; 886 887 struct amd_ir_data { 888 u32 cached_ga_tag; 889 struct irq_2_irte irq_2_irte; 890 struct msi_msg msi_entry; 891 void *entry; /* Pointer to union irte or struct irte_ga */ 892 void *ref; /* Pointer to the actual irte */ 893 894 /** 895 * Store information for activate/de-activate 896 * Guest virtual APIC mode during runtime. 897 */ 898 struct irq_cfg *cfg; 899 int ga_vector; 900 int ga_root_ptr; 901 int ga_tag; 902 }; 903 904 struct amd_irte_ops { 905 void (*prepare)(void *, u32, bool, u8, u32, int); 906 void (*activate)(void *, u16, u16); 907 void (*deactivate)(void *, u16, u16); 908 void (*set_affinity)(void *, u16, u16, u8, u32); 909 void *(*get)(struct irq_remap_table *, int); 910 void (*set_allocated)(struct irq_remap_table *, int); 911 bool (*is_allocated)(struct irq_remap_table *, int); 912 void (*clear_allocated)(struct irq_remap_table *, int); 913 }; 914 915 #ifdef CONFIG_IRQ_REMAP 916 extern struct amd_irte_ops irte_32_ops; 917 extern struct amd_irte_ops irte_128_ops; 918 #endif 919 920 #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 921