1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #ifndef _ASM_X86_AMD_IOMMU_TYPES_H 9 #define _ASM_X86_AMD_IOMMU_TYPES_H 10 11 #include <linux/types.h> 12 #include <linux/mutex.h> 13 #include <linux/msi.h> 14 #include <linux/list.h> 15 #include <linux/spinlock.h> 16 #include <linux/pci.h> 17 #include <linux/irqreturn.h> 18 #include <linux/io-pgtable.h> 19 20 /* 21 * Maximum number of IOMMUs supported 22 */ 23 #define MAX_IOMMUS 32 24 25 /* 26 * some size calculation constants 27 */ 28 #define DEV_TABLE_ENTRY_SIZE 32 29 #define ALIAS_TABLE_ENTRY_SIZE 2 30 #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) 31 32 /* Capability offsets used by the driver */ 33 #define MMIO_CAP_HDR_OFFSET 0x00 34 #define MMIO_RANGE_OFFSET 0x0c 35 #define MMIO_MISC_OFFSET 0x10 36 37 /* Masks, shifts and macros to parse the device range capability */ 38 #define MMIO_RANGE_LD_MASK 0xff000000 39 #define MMIO_RANGE_FD_MASK 0x00ff0000 40 #define MMIO_RANGE_BUS_MASK 0x0000ff00 41 #define MMIO_RANGE_LD_SHIFT 24 42 #define MMIO_RANGE_FD_SHIFT 16 43 #define MMIO_RANGE_BUS_SHIFT 8 44 #define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) 45 #define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) 46 #define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) 47 #define MMIO_MSI_NUM(x) ((x) & 0x1f) 48 49 /* Flag masks for the AMD IOMMU exclusion range */ 50 #define MMIO_EXCL_ENABLE_MASK 0x01ULL 51 #define MMIO_EXCL_ALLOW_MASK 0x02ULL 52 53 /* Used offsets into the MMIO space */ 54 #define MMIO_DEV_TABLE_OFFSET 0x0000 55 #define MMIO_CMD_BUF_OFFSET 0x0008 56 #define MMIO_EVT_BUF_OFFSET 0x0010 57 #define MMIO_CONTROL_OFFSET 0x0018 58 #define MMIO_EXCL_BASE_OFFSET 0x0020 59 #define MMIO_EXCL_LIMIT_OFFSET 0x0028 60 #define MMIO_EXT_FEATURES 0x0030 61 #define MMIO_PPR_LOG_OFFSET 0x0038 62 #define MMIO_GA_LOG_BASE_OFFSET 0x00e0 63 #define MMIO_GA_LOG_TAIL_OFFSET 0x00e8 64 #define MMIO_MSI_ADDR_LO_OFFSET 0x015C 65 #define MMIO_MSI_ADDR_HI_OFFSET 0x0160 66 #define MMIO_MSI_DATA_OFFSET 0x0164 67 #define MMIO_INTCAPXT_EVT_OFFSET 0x0170 68 #define MMIO_INTCAPXT_PPR_OFFSET 0x0178 69 #define MMIO_INTCAPXT_GALOG_OFFSET 0x0180 70 #define MMIO_CMD_HEAD_OFFSET 0x2000 71 #define MMIO_CMD_TAIL_OFFSET 0x2008 72 #define MMIO_EVT_HEAD_OFFSET 0x2010 73 #define MMIO_EVT_TAIL_OFFSET 0x2018 74 #define MMIO_STATUS_OFFSET 0x2020 75 #define MMIO_PPR_HEAD_OFFSET 0x2030 76 #define MMIO_PPR_TAIL_OFFSET 0x2038 77 #define MMIO_GA_HEAD_OFFSET 0x2040 78 #define MMIO_GA_TAIL_OFFSET 0x2048 79 #define MMIO_CNTR_CONF_OFFSET 0x4000 80 #define MMIO_CNTR_REG_OFFSET 0x40000 81 #define MMIO_REG_END_OFFSET 0x80000 82 83 84 85 /* Extended Feature Bits */ 86 #define FEATURE_PREFETCH (1ULL<<0) 87 #define FEATURE_PPR (1ULL<<1) 88 #define FEATURE_X2APIC (1ULL<<2) 89 #define FEATURE_NX (1ULL<<3) 90 #define FEATURE_GT (1ULL<<4) 91 #define FEATURE_IA (1ULL<<6) 92 #define FEATURE_GA (1ULL<<7) 93 #define FEATURE_HE (1ULL<<8) 94 #define FEATURE_PC (1ULL<<9) 95 #define FEATURE_GAM_VAPIC (1ULL<<21) 96 #define FEATURE_EPHSUP (1ULL<<50) 97 #define FEATURE_SNP (1ULL<<63) 98 99 #define FEATURE_PASID_SHIFT 32 100 #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) 101 102 #define FEATURE_GLXVAL_SHIFT 14 103 #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) 104 105 /* Note: 106 * The current driver only support 16-bit PASID. 107 * Currently, hardware only implement upto 16-bit PASID 108 * even though the spec says it could have upto 20 bits. 109 */ 110 #define PASID_MASK 0x0000ffff 111 112 /* MMIO status bits */ 113 #define MMIO_STATUS_EVT_INT_MASK (1 << 1) 114 #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) 115 #define MMIO_STATUS_PPR_INT_MASK (1 << 6) 116 #define MMIO_STATUS_GALOG_RUN_MASK (1 << 8) 117 #define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9) 118 #define MMIO_STATUS_GALOG_INT_MASK (1 << 10) 119 120 /* event logging constants */ 121 #define EVENT_ENTRY_SIZE 0x10 122 #define EVENT_TYPE_SHIFT 28 123 #define EVENT_TYPE_MASK 0xf 124 #define EVENT_TYPE_ILL_DEV 0x1 125 #define EVENT_TYPE_IO_FAULT 0x2 126 #define EVENT_TYPE_DEV_TAB_ERR 0x3 127 #define EVENT_TYPE_PAGE_TAB_ERR 0x4 128 #define EVENT_TYPE_ILL_CMD 0x5 129 #define EVENT_TYPE_CMD_HARD_ERR 0x6 130 #define EVENT_TYPE_IOTLB_INV_TO 0x7 131 #define EVENT_TYPE_INV_DEV_REQ 0x8 132 #define EVENT_TYPE_INV_PPR_REQ 0x9 133 #define EVENT_TYPE_RMP_FAULT 0xd 134 #define EVENT_TYPE_RMP_HW_ERR 0xe 135 #define EVENT_DEVID_MASK 0xffff 136 #define EVENT_DEVID_SHIFT 0 137 #define EVENT_DOMID_MASK_LO 0xffff 138 #define EVENT_DOMID_MASK_HI 0xf0000 139 #define EVENT_FLAGS_MASK 0xfff 140 #define EVENT_FLAGS_SHIFT 0x10 141 142 /* feature control bits */ 143 #define CONTROL_IOMMU_EN 0x00ULL 144 #define CONTROL_HT_TUN_EN 0x01ULL 145 #define CONTROL_EVT_LOG_EN 0x02ULL 146 #define CONTROL_EVT_INT_EN 0x03ULL 147 #define CONTROL_COMWAIT_EN 0x04ULL 148 #define CONTROL_INV_TIMEOUT 0x05ULL 149 #define CONTROL_PASSPW_EN 0x08ULL 150 #define CONTROL_RESPASSPW_EN 0x09ULL 151 #define CONTROL_COHERENT_EN 0x0aULL 152 #define CONTROL_ISOC_EN 0x0bULL 153 #define CONTROL_CMDBUF_EN 0x0cULL 154 #define CONTROL_PPRLOG_EN 0x0dULL 155 #define CONTROL_PPRINT_EN 0x0eULL 156 #define CONTROL_PPR_EN 0x0fULL 157 #define CONTROL_GT_EN 0x10ULL 158 #define CONTROL_GA_EN 0x11ULL 159 #define CONTROL_GAM_EN 0x19ULL 160 #define CONTROL_GALOG_EN 0x1CULL 161 #define CONTROL_GAINT_EN 0x1DULL 162 #define CONTROL_XT_EN 0x32ULL 163 #define CONTROL_INTCAPXT_EN 0x33ULL 164 165 #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) 166 #define CTRL_INV_TO_NONE 0 167 #define CTRL_INV_TO_1MS 1 168 #define CTRL_INV_TO_10MS 2 169 #define CTRL_INV_TO_100MS 3 170 #define CTRL_INV_TO_1S 4 171 #define CTRL_INV_TO_10S 5 172 #define CTRL_INV_TO_100S 6 173 174 /* command specific defines */ 175 #define CMD_COMPL_WAIT 0x01 176 #define CMD_INV_DEV_ENTRY 0x02 177 #define CMD_INV_IOMMU_PAGES 0x03 178 #define CMD_INV_IOTLB_PAGES 0x04 179 #define CMD_INV_IRT 0x05 180 #define CMD_COMPLETE_PPR 0x07 181 #define CMD_INV_ALL 0x08 182 183 #define CMD_COMPL_WAIT_STORE_MASK 0x01 184 #define CMD_COMPL_WAIT_INT_MASK 0x02 185 #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 186 #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 187 #define CMD_INV_IOMMU_PAGES_GN_MASK 0x04 188 189 #define PPR_STATUS_MASK 0xf 190 #define PPR_STATUS_SHIFT 12 191 192 #define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL 193 194 /* macros and definitions for device table entries */ 195 #define DEV_ENTRY_VALID 0x00 196 #define DEV_ENTRY_TRANSLATION 0x01 197 #define DEV_ENTRY_PPR 0x34 198 #define DEV_ENTRY_IR 0x3d 199 #define DEV_ENTRY_IW 0x3e 200 #define DEV_ENTRY_NO_PAGE_FAULT 0x62 201 #define DEV_ENTRY_EX 0x67 202 #define DEV_ENTRY_SYSMGT1 0x68 203 #define DEV_ENTRY_SYSMGT2 0x69 204 #define DEV_ENTRY_IRQ_TBL_EN 0x80 205 #define DEV_ENTRY_INIT_PASS 0xb8 206 #define DEV_ENTRY_EINT_PASS 0xb9 207 #define DEV_ENTRY_NMI_PASS 0xba 208 #define DEV_ENTRY_LINT0_PASS 0xbe 209 #define DEV_ENTRY_LINT1_PASS 0xbf 210 #define DEV_ENTRY_MODE_MASK 0x07 211 #define DEV_ENTRY_MODE_SHIFT 0x09 212 213 #define MAX_DEV_TABLE_ENTRIES 0xffff 214 215 /* constants to configure the command buffer */ 216 #define CMD_BUFFER_SIZE 8192 217 #define CMD_BUFFER_UNINITIALIZED 1 218 #define CMD_BUFFER_ENTRIES 512 219 #define MMIO_CMD_SIZE_SHIFT 56 220 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) 221 222 /* constants for event buffer handling */ 223 #define EVT_BUFFER_SIZE 8192 /* 512 entries */ 224 #define EVT_LEN_MASK (0x9ULL << 56) 225 226 /* Constants for PPR Log handling */ 227 #define PPR_LOG_ENTRIES 512 228 #define PPR_LOG_SIZE_SHIFT 56 229 #define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT) 230 #define PPR_ENTRY_SIZE 16 231 #define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES) 232 233 #define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL) 234 #define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL) 235 #define PPR_DEVID(x) ((x) & 0xffffULL) 236 #define PPR_TAG(x) (((x) >> 32) & 0x3ffULL) 237 #define PPR_PASID1(x) (((x) >> 16) & 0xffffULL) 238 #define PPR_PASID2(x) (((x) >> 42) & 0xfULL) 239 #define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x)) 240 241 #define PPR_REQ_FAULT 0x01 242 243 /* Constants for GA Log handling */ 244 #define GA_LOG_ENTRIES 512 245 #define GA_LOG_SIZE_SHIFT 56 246 #define GA_LOG_SIZE_512 (0x8ULL << GA_LOG_SIZE_SHIFT) 247 #define GA_ENTRY_SIZE 8 248 #define GA_LOG_SIZE (GA_ENTRY_SIZE * GA_LOG_ENTRIES) 249 250 #define GA_TAG(x) (u32)(x & 0xffffffffULL) 251 #define GA_DEVID(x) (u16)(((x) >> 32) & 0xffffULL) 252 #define GA_REQ_TYPE(x) (((x) >> 60) & 0xfULL) 253 254 #define GA_GUEST_NR 0x1 255 256 #define IOMMU_IN_ADDR_BIT_SIZE 52 257 #define IOMMU_OUT_ADDR_BIT_SIZE 52 258 259 /* 260 * This bitmap is used to advertise the page sizes our hardware support 261 * to the IOMMU core, which will then use this information to split 262 * physically contiguous memory regions it is mapping into page sizes 263 * that we support. 264 * 265 * 512GB Pages are not supported due to a hardware bug 266 */ 267 #define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38)) 268 269 /* Bit value definition for dte irq remapping fields*/ 270 #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) 271 #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) 272 #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) 273 #define DTE_IRQ_REMAP_ENABLE 1ULL 274 275 /* 276 * AMD IOMMU hardware only support 512 IRTEs despite 277 * the architectural limitation of 2048 entries. 278 */ 279 #define DTE_INTTAB_ALIGNMENT 128 280 #define DTE_INTTABLEN_VALUE 9ULL 281 #define DTE_INTTABLEN (DTE_INTTABLEN_VALUE << 1) 282 #define DTE_INTTABLEN_MASK (0xfULL << 1) 283 #define MAX_IRQS_PER_TABLE (1 << DTE_INTTABLEN_VALUE) 284 285 #define PAGE_MODE_NONE 0x00 286 #define PAGE_MODE_1_LEVEL 0x01 287 #define PAGE_MODE_2_LEVEL 0x02 288 #define PAGE_MODE_3_LEVEL 0x03 289 #define PAGE_MODE_4_LEVEL 0x04 290 #define PAGE_MODE_5_LEVEL 0x05 291 #define PAGE_MODE_6_LEVEL 0x06 292 #define PAGE_MODE_7_LEVEL 0x07 293 294 #define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) 295 #define PM_LEVEL_SIZE(x) (((x) < 6) ? \ 296 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ 297 (0xffffffffffffffffULL)) 298 #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) 299 #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) 300 #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ 301 IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW) 302 #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) 303 304 #define PM_MAP_4k 0 305 #define PM_ADDR_MASK 0x000ffffffffff000ULL 306 #define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ 307 (~((1ULL << (12 + ((lvl) * 9))) - 1))) 308 #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) 309 310 /* 311 * Returns the page table level to use for a given page size 312 * Pagesize is expected to be a power-of-two 313 */ 314 #define PAGE_SIZE_LEVEL(pagesize) \ 315 ((__ffs(pagesize) - 12) / 9) 316 /* 317 * Returns the number of ptes to use for a given page size 318 * Pagesize is expected to be a power-of-two 319 */ 320 #define PAGE_SIZE_PTE_COUNT(pagesize) \ 321 (1ULL << ((__ffs(pagesize) - 12) % 9)) 322 323 /* 324 * Aligns a given io-virtual address to a given page size 325 * Pagesize is expected to be a power-of-two 326 */ 327 #define PAGE_SIZE_ALIGN(address, pagesize) \ 328 ((address) & ~((pagesize) - 1)) 329 /* 330 * Creates an IOMMU PTE for an address and a given pagesize 331 * The PTE has no permission bits set 332 * Pagesize is expected to be a power-of-two larger than 4096 333 */ 334 #define PAGE_SIZE_PTE(address, pagesize) \ 335 (((address) | ((pagesize) - 1)) & \ 336 (~(pagesize >> 1)) & PM_ADDR_MASK) 337 338 /* 339 * Takes a PTE value with mode=0x07 and returns the page size it maps 340 */ 341 #define PTE_PAGE_SIZE(pte) \ 342 (1ULL << (1 + ffz(((pte) | 0xfffULL)))) 343 344 /* 345 * Takes a page-table level and returns the default page-size for this level 346 */ 347 #define PTE_LEVEL_PAGE_SIZE(level) \ 348 (1ULL << (12 + (9 * (level)))) 349 350 /* 351 * Bit value definition for I/O PTE fields 352 */ 353 #define IOMMU_PTE_PR (1ULL << 0) 354 #define IOMMU_PTE_U (1ULL << 59) 355 #define IOMMU_PTE_FC (1ULL << 60) 356 #define IOMMU_PTE_IR (1ULL << 61) 357 #define IOMMU_PTE_IW (1ULL << 62) 358 359 /* 360 * Bit value definition for DTE fields 361 */ 362 #define DTE_FLAG_V (1ULL << 0) 363 #define DTE_FLAG_TV (1ULL << 1) 364 #define DTE_FLAG_IR (1ULL << 61) 365 #define DTE_FLAG_IW (1ULL << 62) 366 367 #define DTE_FLAG_IOTLB (1ULL << 32) 368 #define DTE_FLAG_GV (1ULL << 55) 369 #define DTE_FLAG_MASK (0x3ffULL << 32) 370 #define DTE_GLX_SHIFT (56) 371 #define DTE_GLX_MASK (3) 372 #define DEV_DOMID_MASK 0xffffULL 373 374 #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) 375 #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) 376 #define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) 377 378 #define DTE_GCR3_INDEX_A 0 379 #define DTE_GCR3_INDEX_B 1 380 #define DTE_GCR3_INDEX_C 1 381 382 #define DTE_GCR3_SHIFT_A 58 383 #define DTE_GCR3_SHIFT_B 16 384 #define DTE_GCR3_SHIFT_C 43 385 386 #define GCR3_VALID 0x01ULL 387 388 #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) 389 #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) 390 #define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK)) 391 #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) 392 393 #define IOMMU_PROT_MASK 0x03 394 #define IOMMU_PROT_IR 0x01 395 #define IOMMU_PROT_IW 0x02 396 397 #define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2) 398 399 /* IOMMU capabilities */ 400 #define IOMMU_CAP_IOTLB 24 401 #define IOMMU_CAP_NPCACHE 26 402 #define IOMMU_CAP_EFR 27 403 404 /* IOMMU IVINFO */ 405 #define IOMMU_IVINFO_OFFSET 36 406 #define IOMMU_IVINFO_EFRSUP BIT(0) 407 408 /* IOMMU Feature Reporting Field (for IVHD type 10h */ 409 #define IOMMU_FEAT_GASUP_SHIFT 6 410 411 /* IOMMU Extended Feature Register (EFR) */ 412 #define IOMMU_EFR_XTSUP_SHIFT 2 413 #define IOMMU_EFR_GASUP_SHIFT 7 414 #define IOMMU_EFR_MSICAPMMIOSUP_SHIFT 46 415 416 #define MAX_DOMAIN_ID 65536 417 418 /* Protection domain flags */ 419 #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ 420 #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops 421 domain for an IOMMU */ 422 #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page 423 translation */ 424 #define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */ 425 426 extern bool amd_iommu_dump; 427 #define DUMP_printk(format, arg...) \ 428 do { \ 429 if (amd_iommu_dump) \ 430 pr_info("AMD-Vi: " format, ## arg); \ 431 } while(0); 432 433 /* global flag if IOMMUs cache non-present entries */ 434 extern bool amd_iommu_np_cache; 435 /* Only true if all IOMMUs support device IOTLBs */ 436 extern bool amd_iommu_iotlb_sup; 437 438 struct irq_remap_table { 439 raw_spinlock_t lock; 440 unsigned min_index; 441 u32 *table; 442 }; 443 444 extern struct irq_remap_table **irq_lookup_table; 445 446 /* Interrupt remapping feature used? */ 447 extern bool amd_iommu_irq_remap; 448 449 /* kmem_cache to get tables with 128 byte alignement */ 450 extern struct kmem_cache *amd_iommu_irq_cache; 451 452 /* 453 * Make iterating over all IOMMUs easier 454 */ 455 #define for_each_iommu(iommu) \ 456 list_for_each_entry((iommu), &amd_iommu_list, list) 457 #define for_each_iommu_safe(iommu, next) \ 458 list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) 459 460 #define APERTURE_RANGE_SHIFT 27 /* 128 MB */ 461 #define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) 462 #define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) 463 #define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ 464 #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) 465 #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) 466 467 /* 468 * This struct is used to pass information about 469 * incoming PPR faults around. 470 */ 471 struct amd_iommu_fault { 472 u64 address; /* IO virtual address of the fault*/ 473 u32 pasid; /* Address space identifier */ 474 u16 device_id; /* Originating PCI device id */ 475 u16 tag; /* PPR tag */ 476 u16 flags; /* Fault flags */ 477 478 }; 479 480 481 struct iommu_domain; 482 struct irq_domain; 483 struct amd_irte_ops; 484 485 #define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0) 486 487 #define io_pgtable_to_data(x) \ 488 container_of((x), struct amd_io_pgtable, iop) 489 490 #define io_pgtable_ops_to_data(x) \ 491 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) 492 493 #define io_pgtable_ops_to_domain(x) \ 494 container_of(io_pgtable_ops_to_data(x), \ 495 struct protection_domain, iop) 496 497 #define io_pgtable_cfg_to_data(x) \ 498 container_of((x), struct amd_io_pgtable, pgtbl_cfg) 499 500 struct amd_io_pgtable { 501 struct io_pgtable_cfg pgtbl_cfg; 502 struct io_pgtable iop; 503 int mode; 504 u64 *root; 505 atomic64_t pt_root; /* pgtable root and pgtable mode */ 506 }; 507 508 /* 509 * This structure contains generic data for IOMMU protection domains 510 * independent of their use. 511 */ 512 struct protection_domain { 513 struct list_head dev_list; /* List of all devices in this domain */ 514 struct iommu_domain domain; /* generic domain handle used by 515 iommu core code */ 516 struct amd_io_pgtable iop; 517 spinlock_t lock; /* mostly used to lock the page table*/ 518 u16 id; /* the domain id written to the device table */ 519 int glx; /* Number of levels for GCR3 table */ 520 u64 *gcr3_tbl; /* Guest CR3 table */ 521 unsigned long flags; /* flags to find out type of domain */ 522 unsigned dev_cnt; /* devices assigned to this domain */ 523 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ 524 }; 525 526 /* 527 * Structure where we save information about one hardware AMD IOMMU in the 528 * system. 529 */ 530 struct amd_iommu { 531 struct list_head list; 532 533 /* Index within the IOMMU array */ 534 int index; 535 536 /* locks the accesses to the hardware */ 537 raw_spinlock_t lock; 538 539 /* Pointer to PCI device of this IOMMU */ 540 struct pci_dev *dev; 541 542 /* Cache pdev to root device for resume quirks */ 543 struct pci_dev *root_pdev; 544 545 /* physical address of MMIO space */ 546 u64 mmio_phys; 547 548 /* physical end address of MMIO space */ 549 u64 mmio_phys_end; 550 551 /* virtual address of MMIO space */ 552 u8 __iomem *mmio_base; 553 554 /* capabilities of that IOMMU read from ACPI */ 555 u32 cap; 556 557 /* flags read from acpi table */ 558 u8 acpi_flags; 559 560 /* Extended features */ 561 u64 features; 562 563 /* IOMMUv2 */ 564 bool is_iommu_v2; 565 566 /* PCI device id of the IOMMU device */ 567 u16 devid; 568 569 /* 570 * Capability pointer. There could be more than one IOMMU per PCI 571 * device function if there are more than one AMD IOMMU capability 572 * pointers. 573 */ 574 u16 cap_ptr; 575 576 /* pci domain of this IOMMU */ 577 u16 pci_seg; 578 579 /* start of exclusion range of that IOMMU */ 580 u64 exclusion_start; 581 /* length of exclusion range of that IOMMU */ 582 u64 exclusion_length; 583 584 /* command buffer virtual address */ 585 u8 *cmd_buf; 586 u32 cmd_buf_head; 587 u32 cmd_buf_tail; 588 589 /* event buffer virtual address */ 590 u8 *evt_buf; 591 592 /* Base of the PPR log, if present */ 593 u8 *ppr_log; 594 595 /* Base of the GA log, if present */ 596 u8 *ga_log; 597 598 /* Tail of the GA log, if present */ 599 u8 *ga_log_tail; 600 601 /* true if interrupts for this IOMMU are already enabled */ 602 bool int_enabled; 603 604 /* if one, we need to send a completion wait command */ 605 bool need_sync; 606 607 /* Handle for IOMMU core code */ 608 struct iommu_device iommu; 609 610 /* 611 * We can't rely on the BIOS to restore all values on reinit, so we 612 * need to stash them 613 */ 614 615 /* The iommu BAR */ 616 u32 stored_addr_lo; 617 u32 stored_addr_hi; 618 619 /* 620 * Each iommu has 6 l1s, each of which is documented as having 0x12 621 * registers 622 */ 623 u32 stored_l1[6][0x12]; 624 625 /* The l2 indirect registers */ 626 u32 stored_l2[0x83]; 627 628 /* The maximum PC banks and counters/bank (PCSup=1) */ 629 u8 max_banks; 630 u8 max_counters; 631 #ifdef CONFIG_IRQ_REMAP 632 struct irq_domain *ir_domain; 633 struct irq_domain *msi_domain; 634 635 struct amd_irte_ops *irte_ops; 636 #endif 637 638 u32 flags; 639 volatile u64 *cmd_sem; 640 u64 cmd_sem_val; 641 642 #ifdef CONFIG_AMD_IOMMU_DEBUGFS 643 /* DebugFS Info */ 644 struct dentry *debugfs; 645 #endif 646 /* IRQ notifier for IntCapXT interrupt */ 647 struct irq_affinity_notify intcapxt_notify; 648 }; 649 650 static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) 651 { 652 struct iommu_device *iommu = dev_to_iommu_device(dev); 653 654 return container_of(iommu, struct amd_iommu, iommu); 655 } 656 657 #define ACPIHID_UID_LEN 256 658 #define ACPIHID_HID_LEN 9 659 660 struct acpihid_map_entry { 661 struct list_head list; 662 u8 uid[ACPIHID_UID_LEN]; 663 u8 hid[ACPIHID_HID_LEN]; 664 u16 devid; 665 u16 root_devid; 666 bool cmd_line; 667 struct iommu_group *group; 668 }; 669 670 struct devid_map { 671 struct list_head list; 672 u8 id; 673 u16 devid; 674 bool cmd_line; 675 }; 676 677 /* 678 * This struct contains device specific data for the IOMMU 679 */ 680 struct iommu_dev_data { 681 /*Protect against attach/detach races */ 682 spinlock_t lock; 683 684 struct list_head list; /* For domain->dev_list */ 685 struct llist_node dev_data_list; /* For global dev_data_list */ 686 struct protection_domain *domain; /* Domain the device is bound to */ 687 struct pci_dev *pdev; 688 u16 devid; /* PCI Device ID */ 689 bool iommu_v2; /* Device can make use of IOMMUv2 */ 690 struct { 691 bool enabled; 692 int qdep; 693 } ats; /* ATS state */ 694 bool pri_tlp; /* PASID TLB required for 695 PPR completions */ 696 bool use_vapic; /* Enable device to use vapic mode */ 697 bool defer_attach; 698 699 struct ratelimit_state rs; /* Ratelimit IOPF messages */ 700 }; 701 702 /* Map HPET and IOAPIC ids to the devid used by the IOMMU */ 703 extern struct list_head ioapic_map; 704 extern struct list_head hpet_map; 705 extern struct list_head acpihid_map; 706 707 /* 708 * List with all IOMMUs in the system. This list is not locked because it is 709 * only written and read at driver initialization or suspend time 710 */ 711 extern struct list_head amd_iommu_list; 712 713 /* 714 * Array with pointers to each IOMMU struct 715 * The indices are referenced in the protection domains 716 */ 717 extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; 718 719 /* 720 * Structure defining one entry in the device table 721 */ 722 struct dev_table_entry { 723 u64 data[4]; 724 }; 725 726 /* 727 * One entry for unity mappings parsed out of the ACPI table. 728 */ 729 struct unity_map_entry { 730 struct list_head list; 731 732 /* starting device id this entry is used for (including) */ 733 u16 devid_start; 734 /* end device id this entry is used for (including) */ 735 u16 devid_end; 736 737 /* start address to unity map (including) */ 738 u64 address_start; 739 /* end address to unity map (including) */ 740 u64 address_end; 741 742 /* required protection */ 743 int prot; 744 }; 745 746 /* 747 * List of all unity mappings. It is not locked because as runtime it is only 748 * read. It is created at ACPI table parsing time. 749 */ 750 extern struct list_head amd_iommu_unity_map; 751 752 /* 753 * Data structures for device handling 754 */ 755 756 /* 757 * Device table used by hardware. Read and write accesses by software are 758 * locked with the amd_iommu_pd_table lock. 759 */ 760 extern struct dev_table_entry *amd_iommu_dev_table; 761 762 /* 763 * Alias table to find requestor ids to device ids. Not locked because only 764 * read on runtime. 765 */ 766 extern u16 *amd_iommu_alias_table; 767 768 /* 769 * Reverse lookup table to find the IOMMU which translates a specific device. 770 */ 771 extern struct amd_iommu **amd_iommu_rlookup_table; 772 773 /* size of the dma_ops aperture as power of 2 */ 774 extern unsigned amd_iommu_aperture_order; 775 776 /* largest PCI device id we expect translation requests for */ 777 extern u16 amd_iommu_last_bdf; 778 779 /* allocation bitmap for domain ids */ 780 extern unsigned long *amd_iommu_pd_alloc_bitmap; 781 782 /* 783 * If true, the addresses will be flushed on unmap time, not when 784 * they are reused 785 */ 786 extern bool amd_iommu_unmap_flush; 787 788 /* Smallest max PASID supported by any IOMMU in the system */ 789 extern u32 amd_iommu_max_pasid; 790 791 extern bool amd_iommu_v2_present; 792 793 extern bool amd_iommu_force_isolation; 794 795 /* Max levels of glxval supported */ 796 extern int amd_iommu_max_glx_val; 797 798 /* 799 * This function flushes all internal caches of 800 * the IOMMU used by this driver. 801 */ 802 extern void iommu_flush_all_caches(struct amd_iommu *iommu); 803 804 static inline int get_ioapic_devid(int id) 805 { 806 struct devid_map *entry; 807 808 list_for_each_entry(entry, &ioapic_map, list) { 809 if (entry->id == id) 810 return entry->devid; 811 } 812 813 return -EINVAL; 814 } 815 816 static inline int get_hpet_devid(int id) 817 { 818 struct devid_map *entry; 819 820 list_for_each_entry(entry, &hpet_map, list) { 821 if (entry->id == id) 822 return entry->devid; 823 } 824 825 return -EINVAL; 826 } 827 828 enum amd_iommu_intr_mode_type { 829 AMD_IOMMU_GUEST_IR_LEGACY, 830 831 /* This mode is not visible to users. It is used when 832 * we cannot fully enable vAPIC and fallback to only support 833 * legacy interrupt remapping via 128-bit IRTE. 834 */ 835 AMD_IOMMU_GUEST_IR_LEGACY_GA, 836 AMD_IOMMU_GUEST_IR_VAPIC, 837 }; 838 839 #define AMD_IOMMU_GUEST_IR_GA(x) (x == AMD_IOMMU_GUEST_IR_VAPIC || \ 840 x == AMD_IOMMU_GUEST_IR_LEGACY_GA) 841 842 #define AMD_IOMMU_GUEST_IR_VAPIC(x) (x == AMD_IOMMU_GUEST_IR_VAPIC) 843 844 union irte { 845 u32 val; 846 struct { 847 u32 valid : 1, 848 no_fault : 1, 849 int_type : 3, 850 rq_eoi : 1, 851 dm : 1, 852 rsvd_1 : 1, 853 destination : 8, 854 vector : 8, 855 rsvd_2 : 8; 856 } fields; 857 }; 858 859 #define APICID_TO_IRTE_DEST_LO(x) (x & 0xffffff) 860 #define APICID_TO_IRTE_DEST_HI(x) ((x >> 24) & 0xff) 861 862 union irte_ga_lo { 863 u64 val; 864 865 /* For int remapping */ 866 struct { 867 u64 valid : 1, 868 no_fault : 1, 869 /* ------ */ 870 int_type : 3, 871 rq_eoi : 1, 872 dm : 1, 873 /* ------ */ 874 guest_mode : 1, 875 destination : 24, 876 ga_tag : 32; 877 } fields_remap; 878 879 /* For guest vAPIC */ 880 struct { 881 u64 valid : 1, 882 no_fault : 1, 883 /* ------ */ 884 ga_log_intr : 1, 885 rsvd1 : 3, 886 is_run : 1, 887 /* ------ */ 888 guest_mode : 1, 889 destination : 24, 890 ga_tag : 32; 891 } fields_vapic; 892 }; 893 894 union irte_ga_hi { 895 u64 val; 896 struct { 897 u64 vector : 8, 898 rsvd_1 : 4, 899 ga_root_ptr : 40, 900 rsvd_2 : 4, 901 destination : 8; 902 } fields; 903 }; 904 905 struct irte_ga { 906 union irte_ga_lo lo; 907 union irte_ga_hi hi; 908 }; 909 910 struct irq_2_irte { 911 u16 devid; /* Device ID for IRTE table */ 912 u16 index; /* Index into IRTE table*/ 913 }; 914 915 struct amd_ir_data { 916 u32 cached_ga_tag; 917 struct irq_2_irte irq_2_irte; 918 struct msi_msg msi_entry; 919 void *entry; /* Pointer to union irte or struct irte_ga */ 920 void *ref; /* Pointer to the actual irte */ 921 922 /** 923 * Store information for activate/de-activate 924 * Guest virtual APIC mode during runtime. 925 */ 926 struct irq_cfg *cfg; 927 int ga_vector; 928 int ga_root_ptr; 929 int ga_tag; 930 }; 931 932 struct amd_irte_ops { 933 void (*prepare)(void *, u32, bool, u8, u32, int); 934 void (*activate)(void *, u16, u16); 935 void (*deactivate)(void *, u16, u16); 936 void (*set_affinity)(void *, u16, u16, u8, u32); 937 void *(*get)(struct irq_remap_table *, int); 938 void (*set_allocated)(struct irq_remap_table *, int); 939 bool (*is_allocated)(struct irq_remap_table *, int); 940 void (*clear_allocated)(struct irq_remap_table *, int); 941 }; 942 943 #ifdef CONFIG_IRQ_REMAP 944 extern struct amd_irte_ops irte_32_ops; 945 extern struct amd_irte_ops irte_128_ops; 946 #endif 947 948 #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 949