1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #ifndef _ASM_X86_AMD_IOMMU_TYPES_H 9 #define _ASM_X86_AMD_IOMMU_TYPES_H 10 11 #include <linux/types.h> 12 #include <linux/mutex.h> 13 #include <linux/msi.h> 14 #include <linux/list.h> 15 #include <linux/spinlock.h> 16 #include <linux/pci.h> 17 #include <linux/irqreturn.h> 18 19 /* 20 * Maximum number of IOMMUs supported 21 */ 22 #define MAX_IOMMUS 32 23 24 /* 25 * some size calculation constants 26 */ 27 #define DEV_TABLE_ENTRY_SIZE 32 28 #define ALIAS_TABLE_ENTRY_SIZE 2 29 #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) 30 31 /* Capability offsets used by the driver */ 32 #define MMIO_CAP_HDR_OFFSET 0x00 33 #define MMIO_RANGE_OFFSET 0x0c 34 #define MMIO_MISC_OFFSET 0x10 35 36 /* Masks, shifts and macros to parse the device range capability */ 37 #define MMIO_RANGE_LD_MASK 0xff000000 38 #define MMIO_RANGE_FD_MASK 0x00ff0000 39 #define MMIO_RANGE_BUS_MASK 0x0000ff00 40 #define MMIO_RANGE_LD_SHIFT 24 41 #define MMIO_RANGE_FD_SHIFT 16 42 #define MMIO_RANGE_BUS_SHIFT 8 43 #define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) 44 #define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) 45 #define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) 46 #define MMIO_MSI_NUM(x) ((x) & 0x1f) 47 48 /* Flag masks for the AMD IOMMU exclusion range */ 49 #define MMIO_EXCL_ENABLE_MASK 0x01ULL 50 #define MMIO_EXCL_ALLOW_MASK 0x02ULL 51 52 /* Used offsets into the MMIO space */ 53 #define MMIO_DEV_TABLE_OFFSET 0x0000 54 #define MMIO_CMD_BUF_OFFSET 0x0008 55 #define MMIO_EVT_BUF_OFFSET 0x0010 56 #define MMIO_CONTROL_OFFSET 0x0018 57 #define MMIO_EXCL_BASE_OFFSET 0x0020 58 #define MMIO_EXCL_LIMIT_OFFSET 0x0028 59 #define MMIO_EXT_FEATURES 0x0030 60 #define MMIO_PPR_LOG_OFFSET 0x0038 61 #define MMIO_GA_LOG_BASE_OFFSET 0x00e0 62 #define MMIO_GA_LOG_TAIL_OFFSET 0x00e8 63 #define MMIO_MSI_ADDR_LO_OFFSET 0x015C 64 #define MMIO_MSI_ADDR_HI_OFFSET 0x0160 65 #define MMIO_MSI_DATA_OFFSET 0x0164 66 #define MMIO_INTCAPXT_EVT_OFFSET 0x0170 67 #define MMIO_INTCAPXT_PPR_OFFSET 0x0178 68 #define MMIO_INTCAPXT_GALOG_OFFSET 0x0180 69 #define MMIO_CMD_HEAD_OFFSET 0x2000 70 #define MMIO_CMD_TAIL_OFFSET 0x2008 71 #define MMIO_EVT_HEAD_OFFSET 0x2010 72 #define MMIO_EVT_TAIL_OFFSET 0x2018 73 #define MMIO_STATUS_OFFSET 0x2020 74 #define MMIO_PPR_HEAD_OFFSET 0x2030 75 #define MMIO_PPR_TAIL_OFFSET 0x2038 76 #define MMIO_GA_HEAD_OFFSET 0x2040 77 #define MMIO_GA_TAIL_OFFSET 0x2048 78 #define MMIO_CNTR_CONF_OFFSET 0x4000 79 #define MMIO_CNTR_REG_OFFSET 0x40000 80 #define MMIO_REG_END_OFFSET 0x80000 81 82 83 84 /* Extended Feature Bits */ 85 #define FEATURE_PREFETCH (1ULL<<0) 86 #define FEATURE_PPR (1ULL<<1) 87 #define FEATURE_X2APIC (1ULL<<2) 88 #define FEATURE_NX (1ULL<<3) 89 #define FEATURE_GT (1ULL<<4) 90 #define FEATURE_IA (1ULL<<6) 91 #define FEATURE_GA (1ULL<<7) 92 #define FEATURE_HE (1ULL<<8) 93 #define FEATURE_PC (1ULL<<9) 94 #define FEATURE_GAM_VAPIC (1ULL<<21) 95 #define FEATURE_EPHSUP (1ULL<<50) 96 #define FEATURE_SNP (1ULL<<63) 97 98 #define FEATURE_PASID_SHIFT 32 99 #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) 100 101 #define FEATURE_GLXVAL_SHIFT 14 102 #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) 103 104 /* Note: 105 * The current driver only support 16-bit PASID. 106 * Currently, hardware only implement upto 16-bit PASID 107 * even though the spec says it could have upto 20 bits. 108 */ 109 #define PASID_MASK 0x0000ffff 110 111 /* MMIO status bits */ 112 #define MMIO_STATUS_EVT_INT_MASK (1 << 1) 113 #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) 114 #define MMIO_STATUS_PPR_INT_MASK (1 << 6) 115 #define MMIO_STATUS_GALOG_RUN_MASK (1 << 8) 116 #define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9) 117 #define MMIO_STATUS_GALOG_INT_MASK (1 << 10) 118 119 /* event logging constants */ 120 #define EVENT_ENTRY_SIZE 0x10 121 #define EVENT_TYPE_SHIFT 28 122 #define EVENT_TYPE_MASK 0xf 123 #define EVENT_TYPE_ILL_DEV 0x1 124 #define EVENT_TYPE_IO_FAULT 0x2 125 #define EVENT_TYPE_DEV_TAB_ERR 0x3 126 #define EVENT_TYPE_PAGE_TAB_ERR 0x4 127 #define EVENT_TYPE_ILL_CMD 0x5 128 #define EVENT_TYPE_CMD_HARD_ERR 0x6 129 #define EVENT_TYPE_IOTLB_INV_TO 0x7 130 #define EVENT_TYPE_INV_DEV_REQ 0x8 131 #define EVENT_TYPE_INV_PPR_REQ 0x9 132 #define EVENT_TYPE_RMP_FAULT 0xd 133 #define EVENT_TYPE_RMP_HW_ERR 0xe 134 #define EVENT_DEVID_MASK 0xffff 135 #define EVENT_DEVID_SHIFT 0 136 #define EVENT_DOMID_MASK_LO 0xffff 137 #define EVENT_DOMID_MASK_HI 0xf0000 138 #define EVENT_FLAGS_MASK 0xfff 139 #define EVENT_FLAGS_SHIFT 0x10 140 141 /* feature control bits */ 142 #define CONTROL_IOMMU_EN 0x00ULL 143 #define CONTROL_HT_TUN_EN 0x01ULL 144 #define CONTROL_EVT_LOG_EN 0x02ULL 145 #define CONTROL_EVT_INT_EN 0x03ULL 146 #define CONTROL_COMWAIT_EN 0x04ULL 147 #define CONTROL_INV_TIMEOUT 0x05ULL 148 #define CONTROL_PASSPW_EN 0x08ULL 149 #define CONTROL_RESPASSPW_EN 0x09ULL 150 #define CONTROL_COHERENT_EN 0x0aULL 151 #define CONTROL_ISOC_EN 0x0bULL 152 #define CONTROL_CMDBUF_EN 0x0cULL 153 #define CONTROL_PPRLOG_EN 0x0dULL 154 #define CONTROL_PPRINT_EN 0x0eULL 155 #define CONTROL_PPR_EN 0x0fULL 156 #define CONTROL_GT_EN 0x10ULL 157 #define CONTROL_GA_EN 0x11ULL 158 #define CONTROL_GAM_EN 0x19ULL 159 #define CONTROL_GALOG_EN 0x1CULL 160 #define CONTROL_GAINT_EN 0x1DULL 161 #define CONTROL_XT_EN 0x32ULL 162 #define CONTROL_INTCAPXT_EN 0x33ULL 163 164 #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) 165 #define CTRL_INV_TO_NONE 0 166 #define CTRL_INV_TO_1MS 1 167 #define CTRL_INV_TO_10MS 2 168 #define CTRL_INV_TO_100MS 3 169 #define CTRL_INV_TO_1S 4 170 #define CTRL_INV_TO_10S 5 171 #define CTRL_INV_TO_100S 6 172 173 /* command specific defines */ 174 #define CMD_COMPL_WAIT 0x01 175 #define CMD_INV_DEV_ENTRY 0x02 176 #define CMD_INV_IOMMU_PAGES 0x03 177 #define CMD_INV_IOTLB_PAGES 0x04 178 #define CMD_INV_IRT 0x05 179 #define CMD_COMPLETE_PPR 0x07 180 #define CMD_INV_ALL 0x08 181 182 #define CMD_COMPL_WAIT_STORE_MASK 0x01 183 #define CMD_COMPL_WAIT_INT_MASK 0x02 184 #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 185 #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 186 #define CMD_INV_IOMMU_PAGES_GN_MASK 0x04 187 188 #define PPR_STATUS_MASK 0xf 189 #define PPR_STATUS_SHIFT 12 190 191 #define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL 192 193 /* macros and definitions for device table entries */ 194 #define DEV_ENTRY_VALID 0x00 195 #define DEV_ENTRY_TRANSLATION 0x01 196 #define DEV_ENTRY_PPR 0x34 197 #define DEV_ENTRY_IR 0x3d 198 #define DEV_ENTRY_IW 0x3e 199 #define DEV_ENTRY_NO_PAGE_FAULT 0x62 200 #define DEV_ENTRY_EX 0x67 201 #define DEV_ENTRY_SYSMGT1 0x68 202 #define DEV_ENTRY_SYSMGT2 0x69 203 #define DEV_ENTRY_IRQ_TBL_EN 0x80 204 #define DEV_ENTRY_INIT_PASS 0xb8 205 #define DEV_ENTRY_EINT_PASS 0xb9 206 #define DEV_ENTRY_NMI_PASS 0xba 207 #define DEV_ENTRY_LINT0_PASS 0xbe 208 #define DEV_ENTRY_LINT1_PASS 0xbf 209 #define DEV_ENTRY_MODE_MASK 0x07 210 #define DEV_ENTRY_MODE_SHIFT 0x09 211 212 #define MAX_DEV_TABLE_ENTRIES 0xffff 213 214 /* constants to configure the command buffer */ 215 #define CMD_BUFFER_SIZE 8192 216 #define CMD_BUFFER_UNINITIALIZED 1 217 #define CMD_BUFFER_ENTRIES 512 218 #define MMIO_CMD_SIZE_SHIFT 56 219 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) 220 221 /* constants for event buffer handling */ 222 #define EVT_BUFFER_SIZE 8192 /* 512 entries */ 223 #define EVT_LEN_MASK (0x9ULL << 56) 224 225 /* Constants for PPR Log handling */ 226 #define PPR_LOG_ENTRIES 512 227 #define PPR_LOG_SIZE_SHIFT 56 228 #define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT) 229 #define PPR_ENTRY_SIZE 16 230 #define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES) 231 232 #define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL) 233 #define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL) 234 #define PPR_DEVID(x) ((x) & 0xffffULL) 235 #define PPR_TAG(x) (((x) >> 32) & 0x3ffULL) 236 #define PPR_PASID1(x) (((x) >> 16) & 0xffffULL) 237 #define PPR_PASID2(x) (((x) >> 42) & 0xfULL) 238 #define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x)) 239 240 #define PPR_REQ_FAULT 0x01 241 242 /* Constants for GA Log handling */ 243 #define GA_LOG_ENTRIES 512 244 #define GA_LOG_SIZE_SHIFT 56 245 #define GA_LOG_SIZE_512 (0x8ULL << GA_LOG_SIZE_SHIFT) 246 #define GA_ENTRY_SIZE 8 247 #define GA_LOG_SIZE (GA_ENTRY_SIZE * GA_LOG_ENTRIES) 248 249 #define GA_TAG(x) (u32)(x & 0xffffffffULL) 250 #define GA_DEVID(x) (u16)(((x) >> 32) & 0xffffULL) 251 #define GA_REQ_TYPE(x) (((x) >> 60) & 0xfULL) 252 253 #define GA_GUEST_NR 0x1 254 255 /* Bit value definition for dte irq remapping fields*/ 256 #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) 257 #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) 258 #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) 259 #define DTE_IRQ_REMAP_ENABLE 1ULL 260 261 /* 262 * AMD IOMMU hardware only support 512 IRTEs despite 263 * the architectural limitation of 2048 entries. 264 */ 265 #define DTE_INTTAB_ALIGNMENT 128 266 #define DTE_INTTABLEN_VALUE 9ULL 267 #define DTE_INTTABLEN (DTE_INTTABLEN_VALUE << 1) 268 #define DTE_INTTABLEN_MASK (0xfULL << 1) 269 #define MAX_IRQS_PER_TABLE (1 << DTE_INTTABLEN_VALUE) 270 271 #define PAGE_MODE_NONE 0x00 272 #define PAGE_MODE_1_LEVEL 0x01 273 #define PAGE_MODE_2_LEVEL 0x02 274 #define PAGE_MODE_3_LEVEL 0x03 275 #define PAGE_MODE_4_LEVEL 0x04 276 #define PAGE_MODE_5_LEVEL 0x05 277 #define PAGE_MODE_6_LEVEL 0x06 278 #define PAGE_MODE_7_LEVEL 0x07 279 280 #define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) 281 #define PM_LEVEL_SIZE(x) (((x) < 6) ? \ 282 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ 283 (0xffffffffffffffffULL)) 284 #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) 285 #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) 286 #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ 287 IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW) 288 #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) 289 290 #define PM_MAP_4k 0 291 #define PM_ADDR_MASK 0x000ffffffffff000ULL 292 #define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ 293 (~((1ULL << (12 + ((lvl) * 9))) - 1))) 294 #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) 295 296 /* 297 * Returns the page table level to use for a given page size 298 * Pagesize is expected to be a power-of-two 299 */ 300 #define PAGE_SIZE_LEVEL(pagesize) \ 301 ((__ffs(pagesize) - 12) / 9) 302 /* 303 * Returns the number of ptes to use for a given page size 304 * Pagesize is expected to be a power-of-two 305 */ 306 #define PAGE_SIZE_PTE_COUNT(pagesize) \ 307 (1ULL << ((__ffs(pagesize) - 12) % 9)) 308 309 /* 310 * Aligns a given io-virtual address to a given page size 311 * Pagesize is expected to be a power-of-two 312 */ 313 #define PAGE_SIZE_ALIGN(address, pagesize) \ 314 ((address) & ~((pagesize) - 1)) 315 /* 316 * Creates an IOMMU PTE for an address and a given pagesize 317 * The PTE has no permission bits set 318 * Pagesize is expected to be a power-of-two larger than 4096 319 */ 320 #define PAGE_SIZE_PTE(address, pagesize) \ 321 (((address) | ((pagesize) - 1)) & \ 322 (~(pagesize >> 1)) & PM_ADDR_MASK) 323 324 /* 325 * Takes a PTE value with mode=0x07 and returns the page size it maps 326 */ 327 #define PTE_PAGE_SIZE(pte) \ 328 (1ULL << (1 + ffz(((pte) | 0xfffULL)))) 329 330 /* 331 * Takes a page-table level and returns the default page-size for this level 332 */ 333 #define PTE_LEVEL_PAGE_SIZE(level) \ 334 (1ULL << (12 + (9 * (level)))) 335 336 /* 337 * Bit value definition for I/O PTE fields 338 */ 339 #define IOMMU_PTE_PR (1ULL << 0) 340 #define IOMMU_PTE_U (1ULL << 59) 341 #define IOMMU_PTE_FC (1ULL << 60) 342 #define IOMMU_PTE_IR (1ULL << 61) 343 #define IOMMU_PTE_IW (1ULL << 62) 344 345 /* 346 * Bit value definition for DTE fields 347 */ 348 #define DTE_FLAG_V (1ULL << 0) 349 #define DTE_FLAG_TV (1ULL << 1) 350 #define DTE_FLAG_IR (1ULL << 61) 351 #define DTE_FLAG_IW (1ULL << 62) 352 353 #define DTE_FLAG_IOTLB (1ULL << 32) 354 #define DTE_FLAG_GV (1ULL << 55) 355 #define DTE_FLAG_MASK (0x3ffULL << 32) 356 #define DTE_GLX_SHIFT (56) 357 #define DTE_GLX_MASK (3) 358 #define DEV_DOMID_MASK 0xffffULL 359 360 #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) 361 #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) 362 #define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) 363 364 #define DTE_GCR3_INDEX_A 0 365 #define DTE_GCR3_INDEX_B 1 366 #define DTE_GCR3_INDEX_C 1 367 368 #define DTE_GCR3_SHIFT_A 58 369 #define DTE_GCR3_SHIFT_B 16 370 #define DTE_GCR3_SHIFT_C 43 371 372 #define GCR3_VALID 0x01ULL 373 374 #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) 375 #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) 376 #define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK)) 377 #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) 378 379 #define IOMMU_PROT_MASK 0x03 380 #define IOMMU_PROT_IR 0x01 381 #define IOMMU_PROT_IW 0x02 382 383 #define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2) 384 385 /* IOMMU capabilities */ 386 #define IOMMU_CAP_IOTLB 24 387 #define IOMMU_CAP_NPCACHE 26 388 #define IOMMU_CAP_EFR 27 389 390 /* IOMMU Feature Reporting Field (for IVHD type 10h */ 391 #define IOMMU_FEAT_GASUP_SHIFT 6 392 393 /* IOMMU Extended Feature Register (EFR) */ 394 #define IOMMU_EFR_XTSUP_SHIFT 2 395 #define IOMMU_EFR_GASUP_SHIFT 7 396 #define IOMMU_EFR_MSICAPMMIOSUP_SHIFT 46 397 398 #define MAX_DOMAIN_ID 65536 399 400 /* Protection domain flags */ 401 #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ 402 #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops 403 domain for an IOMMU */ 404 #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page 405 translation */ 406 #define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */ 407 408 extern bool amd_iommu_dump; 409 #define DUMP_printk(format, arg...) \ 410 do { \ 411 if (amd_iommu_dump) \ 412 pr_info("AMD-Vi: " format, ## arg); \ 413 } while(0); 414 415 /* global flag if IOMMUs cache non-present entries */ 416 extern bool amd_iommu_np_cache; 417 /* Only true if all IOMMUs support device IOTLBs */ 418 extern bool amd_iommu_iotlb_sup; 419 420 struct irq_remap_table { 421 raw_spinlock_t lock; 422 unsigned min_index; 423 u32 *table; 424 }; 425 426 extern struct irq_remap_table **irq_lookup_table; 427 428 /* Interrupt remapping feature used? */ 429 extern bool amd_iommu_irq_remap; 430 431 /* kmem_cache to get tables with 128 byte alignement */ 432 extern struct kmem_cache *amd_iommu_irq_cache; 433 434 /* 435 * Make iterating over all IOMMUs easier 436 */ 437 #define for_each_iommu(iommu) \ 438 list_for_each_entry((iommu), &amd_iommu_list, list) 439 #define for_each_iommu_safe(iommu, next) \ 440 list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) 441 442 #define APERTURE_RANGE_SHIFT 27 /* 128 MB */ 443 #define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) 444 #define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) 445 #define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ 446 #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) 447 #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) 448 449 /* 450 * This struct is used to pass information about 451 * incoming PPR faults around. 452 */ 453 struct amd_iommu_fault { 454 u64 address; /* IO virtual address of the fault*/ 455 u32 pasid; /* Address space identifier */ 456 u16 device_id; /* Originating PCI device id */ 457 u16 tag; /* PPR tag */ 458 u16 flags; /* Fault flags */ 459 460 }; 461 462 463 struct iommu_domain; 464 struct irq_domain; 465 struct amd_irte_ops; 466 467 #define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0) 468 469 /* 470 * This structure contains generic data for IOMMU protection domains 471 * independent of their use. 472 */ 473 struct protection_domain { 474 struct list_head dev_list; /* List of all devices in this domain */ 475 struct iommu_domain domain; /* generic domain handle used by 476 iommu core code */ 477 spinlock_t lock; /* mostly used to lock the page table*/ 478 u16 id; /* the domain id written to the device table */ 479 atomic64_t pt_root; /* pgtable root and pgtable mode */ 480 int glx; /* Number of levels for GCR3 table */ 481 u64 *gcr3_tbl; /* Guest CR3 table */ 482 unsigned long flags; /* flags to find out type of domain */ 483 unsigned dev_cnt; /* devices assigned to this domain */ 484 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ 485 }; 486 487 /* For decocded pt_root */ 488 struct domain_pgtable { 489 int mode; 490 u64 *root; 491 }; 492 493 /* 494 * Structure where we save information about one hardware AMD IOMMU in the 495 * system. 496 */ 497 struct amd_iommu { 498 struct list_head list; 499 500 /* Index within the IOMMU array */ 501 int index; 502 503 /* locks the accesses to the hardware */ 504 raw_spinlock_t lock; 505 506 /* Pointer to PCI device of this IOMMU */ 507 struct pci_dev *dev; 508 509 /* Cache pdev to root device for resume quirks */ 510 struct pci_dev *root_pdev; 511 512 /* physical address of MMIO space */ 513 u64 mmio_phys; 514 515 /* physical end address of MMIO space */ 516 u64 mmio_phys_end; 517 518 /* virtual address of MMIO space */ 519 u8 __iomem *mmio_base; 520 521 /* capabilities of that IOMMU read from ACPI */ 522 u32 cap; 523 524 /* flags read from acpi table */ 525 u8 acpi_flags; 526 527 /* Extended features */ 528 u64 features; 529 530 /* IOMMUv2 */ 531 bool is_iommu_v2; 532 533 /* PCI device id of the IOMMU device */ 534 u16 devid; 535 536 /* 537 * Capability pointer. There could be more than one IOMMU per PCI 538 * device function if there are more than one AMD IOMMU capability 539 * pointers. 540 */ 541 u16 cap_ptr; 542 543 /* pci domain of this IOMMU */ 544 u16 pci_seg; 545 546 /* start of exclusion range of that IOMMU */ 547 u64 exclusion_start; 548 /* length of exclusion range of that IOMMU */ 549 u64 exclusion_length; 550 551 /* command buffer virtual address */ 552 u8 *cmd_buf; 553 u32 cmd_buf_head; 554 u32 cmd_buf_tail; 555 556 /* event buffer virtual address */ 557 u8 *evt_buf; 558 559 /* Base of the PPR log, if present */ 560 u8 *ppr_log; 561 562 /* Base of the GA log, if present */ 563 u8 *ga_log; 564 565 /* Tail of the GA log, if present */ 566 u8 *ga_log_tail; 567 568 /* true if interrupts for this IOMMU are already enabled */ 569 bool int_enabled; 570 571 /* if one, we need to send a completion wait command */ 572 bool need_sync; 573 574 /* Handle for IOMMU core code */ 575 struct iommu_device iommu; 576 577 /* 578 * We can't rely on the BIOS to restore all values on reinit, so we 579 * need to stash them 580 */ 581 582 /* The iommu BAR */ 583 u32 stored_addr_lo; 584 u32 stored_addr_hi; 585 586 /* 587 * Each iommu has 6 l1s, each of which is documented as having 0x12 588 * registers 589 */ 590 u32 stored_l1[6][0x12]; 591 592 /* The l2 indirect registers */ 593 u32 stored_l2[0x83]; 594 595 /* The maximum PC banks and counters/bank (PCSup=1) */ 596 u8 max_banks; 597 u8 max_counters; 598 #ifdef CONFIG_IRQ_REMAP 599 struct irq_domain *ir_domain; 600 struct irq_domain *msi_domain; 601 602 struct amd_irte_ops *irte_ops; 603 #endif 604 605 u32 flags; 606 volatile u64 *cmd_sem; 607 u64 cmd_sem_val; 608 609 #ifdef CONFIG_AMD_IOMMU_DEBUGFS 610 /* DebugFS Info */ 611 struct dentry *debugfs; 612 #endif 613 /* IRQ notifier for IntCapXT interrupt */ 614 struct irq_affinity_notify intcapxt_notify; 615 }; 616 617 static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) 618 { 619 struct iommu_device *iommu = dev_to_iommu_device(dev); 620 621 return container_of(iommu, struct amd_iommu, iommu); 622 } 623 624 #define ACPIHID_UID_LEN 256 625 #define ACPIHID_HID_LEN 9 626 627 struct acpihid_map_entry { 628 struct list_head list; 629 u8 uid[ACPIHID_UID_LEN]; 630 u8 hid[ACPIHID_HID_LEN]; 631 u16 devid; 632 u16 root_devid; 633 bool cmd_line; 634 struct iommu_group *group; 635 }; 636 637 struct devid_map { 638 struct list_head list; 639 u8 id; 640 u16 devid; 641 bool cmd_line; 642 }; 643 644 /* 645 * This struct contains device specific data for the IOMMU 646 */ 647 struct iommu_dev_data { 648 /*Protect against attach/detach races */ 649 spinlock_t lock; 650 651 struct list_head list; /* For domain->dev_list */ 652 struct llist_node dev_data_list; /* For global dev_data_list */ 653 struct protection_domain *domain; /* Domain the device is bound to */ 654 struct pci_dev *pdev; 655 u16 devid; /* PCI Device ID */ 656 bool iommu_v2; /* Device can make use of IOMMUv2 */ 657 struct { 658 bool enabled; 659 int qdep; 660 } ats; /* ATS state */ 661 bool pri_tlp; /* PASID TLB required for 662 PPR completions */ 663 u32 errata; /* Bitmap for errata to apply */ 664 bool use_vapic; /* Enable device to use vapic mode */ 665 bool defer_attach; 666 667 struct ratelimit_state rs; /* Ratelimit IOPF messages */ 668 }; 669 670 /* Map HPET and IOAPIC ids to the devid used by the IOMMU */ 671 extern struct list_head ioapic_map; 672 extern struct list_head hpet_map; 673 extern struct list_head acpihid_map; 674 675 /* 676 * List with all IOMMUs in the system. This list is not locked because it is 677 * only written and read at driver initialization or suspend time 678 */ 679 extern struct list_head amd_iommu_list; 680 681 /* 682 * Array with pointers to each IOMMU struct 683 * The indices are referenced in the protection domains 684 */ 685 extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; 686 687 /* 688 * Structure defining one entry in the device table 689 */ 690 struct dev_table_entry { 691 u64 data[4]; 692 }; 693 694 /* 695 * One entry for unity mappings parsed out of the ACPI table. 696 */ 697 struct unity_map_entry { 698 struct list_head list; 699 700 /* starting device id this entry is used for (including) */ 701 u16 devid_start; 702 /* end device id this entry is used for (including) */ 703 u16 devid_end; 704 705 /* start address to unity map (including) */ 706 u64 address_start; 707 /* end address to unity map (including) */ 708 u64 address_end; 709 710 /* required protection */ 711 int prot; 712 }; 713 714 /* 715 * List of all unity mappings. It is not locked because as runtime it is only 716 * read. It is created at ACPI table parsing time. 717 */ 718 extern struct list_head amd_iommu_unity_map; 719 720 /* 721 * Data structures for device handling 722 */ 723 724 /* 725 * Device table used by hardware. Read and write accesses by software are 726 * locked with the amd_iommu_pd_table lock. 727 */ 728 extern struct dev_table_entry *amd_iommu_dev_table; 729 730 /* 731 * Alias table to find requestor ids to device ids. Not locked because only 732 * read on runtime. 733 */ 734 extern u16 *amd_iommu_alias_table; 735 736 /* 737 * Reverse lookup table to find the IOMMU which translates a specific device. 738 */ 739 extern struct amd_iommu **amd_iommu_rlookup_table; 740 741 /* size of the dma_ops aperture as power of 2 */ 742 extern unsigned amd_iommu_aperture_order; 743 744 /* largest PCI device id we expect translation requests for */ 745 extern u16 amd_iommu_last_bdf; 746 747 /* allocation bitmap for domain ids */ 748 extern unsigned long *amd_iommu_pd_alloc_bitmap; 749 750 /* 751 * If true, the addresses will be flushed on unmap time, not when 752 * they are reused 753 */ 754 extern bool amd_iommu_unmap_flush; 755 756 /* Smallest max PASID supported by any IOMMU in the system */ 757 extern u32 amd_iommu_max_pasid; 758 759 extern bool amd_iommu_v2_present; 760 761 extern bool amd_iommu_force_isolation; 762 763 /* Max levels of glxval supported */ 764 extern int amd_iommu_max_glx_val; 765 766 /* 767 * This function flushes all internal caches of 768 * the IOMMU used by this driver. 769 */ 770 extern void iommu_flush_all_caches(struct amd_iommu *iommu); 771 772 static inline int get_ioapic_devid(int id) 773 { 774 struct devid_map *entry; 775 776 list_for_each_entry(entry, &ioapic_map, list) { 777 if (entry->id == id) 778 return entry->devid; 779 } 780 781 return -EINVAL; 782 } 783 784 static inline int get_hpet_devid(int id) 785 { 786 struct devid_map *entry; 787 788 list_for_each_entry(entry, &hpet_map, list) { 789 if (entry->id == id) 790 return entry->devid; 791 } 792 793 return -EINVAL; 794 } 795 796 enum amd_iommu_intr_mode_type { 797 AMD_IOMMU_GUEST_IR_LEGACY, 798 799 /* This mode is not visible to users. It is used when 800 * we cannot fully enable vAPIC and fallback to only support 801 * legacy interrupt remapping via 128-bit IRTE. 802 */ 803 AMD_IOMMU_GUEST_IR_LEGACY_GA, 804 AMD_IOMMU_GUEST_IR_VAPIC, 805 }; 806 807 #define AMD_IOMMU_GUEST_IR_GA(x) (x == AMD_IOMMU_GUEST_IR_VAPIC || \ 808 x == AMD_IOMMU_GUEST_IR_LEGACY_GA) 809 810 #define AMD_IOMMU_GUEST_IR_VAPIC(x) (x == AMD_IOMMU_GUEST_IR_VAPIC) 811 812 union irte { 813 u32 val; 814 struct { 815 u32 valid : 1, 816 no_fault : 1, 817 int_type : 3, 818 rq_eoi : 1, 819 dm : 1, 820 rsvd_1 : 1, 821 destination : 8, 822 vector : 8, 823 rsvd_2 : 8; 824 } fields; 825 }; 826 827 #define APICID_TO_IRTE_DEST_LO(x) (x & 0xffffff) 828 #define APICID_TO_IRTE_DEST_HI(x) ((x >> 24) & 0xff) 829 830 union irte_ga_lo { 831 u64 val; 832 833 /* For int remapping */ 834 struct { 835 u64 valid : 1, 836 no_fault : 1, 837 /* ------ */ 838 int_type : 3, 839 rq_eoi : 1, 840 dm : 1, 841 /* ------ */ 842 guest_mode : 1, 843 destination : 24, 844 ga_tag : 32; 845 } fields_remap; 846 847 /* For guest vAPIC */ 848 struct { 849 u64 valid : 1, 850 no_fault : 1, 851 /* ------ */ 852 ga_log_intr : 1, 853 rsvd1 : 3, 854 is_run : 1, 855 /* ------ */ 856 guest_mode : 1, 857 destination : 24, 858 ga_tag : 32; 859 } fields_vapic; 860 }; 861 862 union irte_ga_hi { 863 u64 val; 864 struct { 865 u64 vector : 8, 866 rsvd_1 : 4, 867 ga_root_ptr : 40, 868 rsvd_2 : 4, 869 destination : 8; 870 } fields; 871 }; 872 873 struct irte_ga { 874 union irte_ga_lo lo; 875 union irte_ga_hi hi; 876 }; 877 878 struct irq_2_irte { 879 u16 devid; /* Device ID for IRTE table */ 880 u16 index; /* Index into IRTE table*/ 881 }; 882 883 struct amd_ir_data { 884 u32 cached_ga_tag; 885 struct irq_2_irte irq_2_irte; 886 struct msi_msg msi_entry; 887 void *entry; /* Pointer to union irte or struct irte_ga */ 888 void *ref; /* Pointer to the actual irte */ 889 890 /** 891 * Store information for activate/de-activate 892 * Guest virtual APIC mode during runtime. 893 */ 894 struct irq_cfg *cfg; 895 int ga_vector; 896 int ga_root_ptr; 897 int ga_tag; 898 }; 899 900 struct amd_irte_ops { 901 void (*prepare)(void *, u32, bool, u8, u32, int); 902 void (*activate)(void *, u16, u16); 903 void (*deactivate)(void *, u16, u16); 904 void (*set_affinity)(void *, u16, u16, u8, u32); 905 void *(*get)(struct irq_remap_table *, int); 906 void (*set_allocated)(struct irq_remap_table *, int); 907 bool (*is_allocated)(struct irq_remap_table *, int); 908 void (*clear_allocated)(struct irq_remap_table *, int); 909 }; 910 911 #ifdef CONFIG_IRQ_REMAP 912 extern struct amd_irte_ops irte_32_ops; 913 extern struct amd_irte_ops irte_128_ops; 914 #endif 915 916 #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 917