1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #ifndef _ASM_X86_AMD_IOMMU_TYPES_H 9 #define _ASM_X86_AMD_IOMMU_TYPES_H 10 11 #include <linux/types.h> 12 #include <linux/mutex.h> 13 #include <linux/msi.h> 14 #include <linux/list.h> 15 #include <linux/spinlock.h> 16 #include <linux/pci.h> 17 #include <linux/irqreturn.h> 18 #include <linux/io-pgtable.h> 19 20 /* 21 * Maximum number of IOMMUs supported 22 */ 23 #define MAX_IOMMUS 32 24 25 /* 26 * some size calculation constants 27 */ 28 #define DEV_TABLE_ENTRY_SIZE 32 29 #define ALIAS_TABLE_ENTRY_SIZE 2 30 #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) 31 32 /* Capability offsets used by the driver */ 33 #define MMIO_CAP_HDR_OFFSET 0x00 34 #define MMIO_RANGE_OFFSET 0x0c 35 #define MMIO_MISC_OFFSET 0x10 36 37 /* Masks, shifts and macros to parse the device range capability */ 38 #define MMIO_RANGE_LD_MASK 0xff000000 39 #define MMIO_RANGE_FD_MASK 0x00ff0000 40 #define MMIO_RANGE_BUS_MASK 0x0000ff00 41 #define MMIO_RANGE_LD_SHIFT 24 42 #define MMIO_RANGE_FD_SHIFT 16 43 #define MMIO_RANGE_BUS_SHIFT 8 44 #define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) 45 #define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) 46 #define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) 47 #define MMIO_MSI_NUM(x) ((x) & 0x1f) 48 49 /* Flag masks for the AMD IOMMU exclusion range */ 50 #define MMIO_EXCL_ENABLE_MASK 0x01ULL 51 #define MMIO_EXCL_ALLOW_MASK 0x02ULL 52 53 /* Used offsets into the MMIO space */ 54 #define MMIO_DEV_TABLE_OFFSET 0x0000 55 #define MMIO_CMD_BUF_OFFSET 0x0008 56 #define MMIO_EVT_BUF_OFFSET 0x0010 57 #define MMIO_CONTROL_OFFSET 0x0018 58 #define MMIO_EXCL_BASE_OFFSET 0x0020 59 #define MMIO_EXCL_LIMIT_OFFSET 0x0028 60 #define MMIO_EXT_FEATURES 0x0030 61 #define MMIO_PPR_LOG_OFFSET 0x0038 62 #define MMIO_GA_LOG_BASE_OFFSET 0x00e0 63 #define MMIO_GA_LOG_TAIL_OFFSET 0x00e8 64 #define MMIO_MSI_ADDR_LO_OFFSET 0x015C 65 #define MMIO_MSI_ADDR_HI_OFFSET 0x0160 66 #define MMIO_MSI_DATA_OFFSET 0x0164 67 #define MMIO_INTCAPXT_EVT_OFFSET 0x0170 68 #define MMIO_INTCAPXT_PPR_OFFSET 0x0178 69 #define MMIO_INTCAPXT_GALOG_OFFSET 0x0180 70 #define MMIO_CMD_HEAD_OFFSET 0x2000 71 #define MMIO_CMD_TAIL_OFFSET 0x2008 72 #define MMIO_EVT_HEAD_OFFSET 0x2010 73 #define MMIO_EVT_TAIL_OFFSET 0x2018 74 #define MMIO_STATUS_OFFSET 0x2020 75 #define MMIO_PPR_HEAD_OFFSET 0x2030 76 #define MMIO_PPR_TAIL_OFFSET 0x2038 77 #define MMIO_GA_HEAD_OFFSET 0x2040 78 #define MMIO_GA_TAIL_OFFSET 0x2048 79 #define MMIO_CNTR_CONF_OFFSET 0x4000 80 #define MMIO_CNTR_REG_OFFSET 0x40000 81 #define MMIO_REG_END_OFFSET 0x80000 82 83 84 85 /* Extended Feature Bits */ 86 #define FEATURE_PREFETCH (1ULL<<0) 87 #define FEATURE_PPR (1ULL<<1) 88 #define FEATURE_X2APIC (1ULL<<2) 89 #define FEATURE_NX (1ULL<<3) 90 #define FEATURE_GT (1ULL<<4) 91 #define FEATURE_IA (1ULL<<6) 92 #define FEATURE_GA (1ULL<<7) 93 #define FEATURE_HE (1ULL<<8) 94 #define FEATURE_PC (1ULL<<9) 95 #define FEATURE_GAM_VAPIC (1ULL<<21) 96 #define FEATURE_EPHSUP (1ULL<<50) 97 #define FEATURE_SNP (1ULL<<63) 98 99 #define FEATURE_PASID_SHIFT 32 100 #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) 101 102 #define FEATURE_GLXVAL_SHIFT 14 103 #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) 104 105 /* Note: 106 * The current driver only support 16-bit PASID. 107 * Currently, hardware only implement upto 16-bit PASID 108 * even though the spec says it could have upto 20 bits. 109 */ 110 #define PASID_MASK 0x0000ffff 111 112 /* MMIO status bits */ 113 #define MMIO_STATUS_EVT_OVERFLOW_INT_MASK (1 << 0) 114 #define MMIO_STATUS_EVT_INT_MASK (1 << 1) 115 #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) 116 #define MMIO_STATUS_PPR_INT_MASK (1 << 6) 117 #define MMIO_STATUS_GALOG_RUN_MASK (1 << 8) 118 #define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9) 119 #define MMIO_STATUS_GALOG_INT_MASK (1 << 10) 120 121 /* event logging constants */ 122 #define EVENT_ENTRY_SIZE 0x10 123 #define EVENT_TYPE_SHIFT 28 124 #define EVENT_TYPE_MASK 0xf 125 #define EVENT_TYPE_ILL_DEV 0x1 126 #define EVENT_TYPE_IO_FAULT 0x2 127 #define EVENT_TYPE_DEV_TAB_ERR 0x3 128 #define EVENT_TYPE_PAGE_TAB_ERR 0x4 129 #define EVENT_TYPE_ILL_CMD 0x5 130 #define EVENT_TYPE_CMD_HARD_ERR 0x6 131 #define EVENT_TYPE_IOTLB_INV_TO 0x7 132 #define EVENT_TYPE_INV_DEV_REQ 0x8 133 #define EVENT_TYPE_INV_PPR_REQ 0x9 134 #define EVENT_TYPE_RMP_FAULT 0xd 135 #define EVENT_TYPE_RMP_HW_ERR 0xe 136 #define EVENT_DEVID_MASK 0xffff 137 #define EVENT_DEVID_SHIFT 0 138 #define EVENT_DOMID_MASK_LO 0xffff 139 #define EVENT_DOMID_MASK_HI 0xf0000 140 #define EVENT_FLAGS_MASK 0xfff 141 #define EVENT_FLAGS_SHIFT 0x10 142 #define EVENT_FLAG_RW 0x020 143 #define EVENT_FLAG_I 0x008 144 145 /* feature control bits */ 146 #define CONTROL_IOMMU_EN 0x00ULL 147 #define CONTROL_HT_TUN_EN 0x01ULL 148 #define CONTROL_EVT_LOG_EN 0x02ULL 149 #define CONTROL_EVT_INT_EN 0x03ULL 150 #define CONTROL_COMWAIT_EN 0x04ULL 151 #define CONTROL_INV_TIMEOUT 0x05ULL 152 #define CONTROL_PASSPW_EN 0x08ULL 153 #define CONTROL_RESPASSPW_EN 0x09ULL 154 #define CONTROL_COHERENT_EN 0x0aULL 155 #define CONTROL_ISOC_EN 0x0bULL 156 #define CONTROL_CMDBUF_EN 0x0cULL 157 #define CONTROL_PPRLOG_EN 0x0dULL 158 #define CONTROL_PPRINT_EN 0x0eULL 159 #define CONTROL_PPR_EN 0x0fULL 160 #define CONTROL_GT_EN 0x10ULL 161 #define CONTROL_GA_EN 0x11ULL 162 #define CONTROL_GAM_EN 0x19ULL 163 #define CONTROL_GALOG_EN 0x1CULL 164 #define CONTROL_GAINT_EN 0x1DULL 165 #define CONTROL_XT_EN 0x32ULL 166 #define CONTROL_INTCAPXT_EN 0x33ULL 167 168 #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) 169 #define CTRL_INV_TO_NONE 0 170 #define CTRL_INV_TO_1MS 1 171 #define CTRL_INV_TO_10MS 2 172 #define CTRL_INV_TO_100MS 3 173 #define CTRL_INV_TO_1S 4 174 #define CTRL_INV_TO_10S 5 175 #define CTRL_INV_TO_100S 6 176 177 /* command specific defines */ 178 #define CMD_COMPL_WAIT 0x01 179 #define CMD_INV_DEV_ENTRY 0x02 180 #define CMD_INV_IOMMU_PAGES 0x03 181 #define CMD_INV_IOTLB_PAGES 0x04 182 #define CMD_INV_IRT 0x05 183 #define CMD_COMPLETE_PPR 0x07 184 #define CMD_INV_ALL 0x08 185 186 #define CMD_COMPL_WAIT_STORE_MASK 0x01 187 #define CMD_COMPL_WAIT_INT_MASK 0x02 188 #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 189 #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 190 #define CMD_INV_IOMMU_PAGES_GN_MASK 0x04 191 192 #define PPR_STATUS_MASK 0xf 193 #define PPR_STATUS_SHIFT 12 194 195 #define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL 196 197 /* macros and definitions for device table entries */ 198 #define DEV_ENTRY_VALID 0x00 199 #define DEV_ENTRY_TRANSLATION 0x01 200 #define DEV_ENTRY_PPR 0x34 201 #define DEV_ENTRY_IR 0x3d 202 #define DEV_ENTRY_IW 0x3e 203 #define DEV_ENTRY_NO_PAGE_FAULT 0x62 204 #define DEV_ENTRY_EX 0x67 205 #define DEV_ENTRY_SYSMGT1 0x68 206 #define DEV_ENTRY_SYSMGT2 0x69 207 #define DEV_ENTRY_IRQ_TBL_EN 0x80 208 #define DEV_ENTRY_INIT_PASS 0xb8 209 #define DEV_ENTRY_EINT_PASS 0xb9 210 #define DEV_ENTRY_NMI_PASS 0xba 211 #define DEV_ENTRY_LINT0_PASS 0xbe 212 #define DEV_ENTRY_LINT1_PASS 0xbf 213 #define DEV_ENTRY_MODE_MASK 0x07 214 #define DEV_ENTRY_MODE_SHIFT 0x09 215 216 #define MAX_DEV_TABLE_ENTRIES 0xffff 217 218 /* constants to configure the command buffer */ 219 #define CMD_BUFFER_SIZE 8192 220 #define CMD_BUFFER_UNINITIALIZED 1 221 #define CMD_BUFFER_ENTRIES 512 222 #define MMIO_CMD_SIZE_SHIFT 56 223 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) 224 225 /* constants for event buffer handling */ 226 #define EVT_BUFFER_SIZE 8192 /* 512 entries */ 227 #define EVT_LEN_MASK (0x9ULL << 56) 228 229 /* Constants for PPR Log handling */ 230 #define PPR_LOG_ENTRIES 512 231 #define PPR_LOG_SIZE_SHIFT 56 232 #define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT) 233 #define PPR_ENTRY_SIZE 16 234 #define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES) 235 236 #define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL) 237 #define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL) 238 #define PPR_DEVID(x) ((x) & 0xffffULL) 239 #define PPR_TAG(x) (((x) >> 32) & 0x3ffULL) 240 #define PPR_PASID1(x) (((x) >> 16) & 0xffffULL) 241 #define PPR_PASID2(x) (((x) >> 42) & 0xfULL) 242 #define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x)) 243 244 #define PPR_REQ_FAULT 0x01 245 246 /* Constants for GA Log handling */ 247 #define GA_LOG_ENTRIES 512 248 #define GA_LOG_SIZE_SHIFT 56 249 #define GA_LOG_SIZE_512 (0x8ULL << GA_LOG_SIZE_SHIFT) 250 #define GA_ENTRY_SIZE 8 251 #define GA_LOG_SIZE (GA_ENTRY_SIZE * GA_LOG_ENTRIES) 252 253 #define GA_TAG(x) (u32)(x & 0xffffffffULL) 254 #define GA_DEVID(x) (u16)(((x) >> 32) & 0xffffULL) 255 #define GA_REQ_TYPE(x) (((x) >> 60) & 0xfULL) 256 257 #define GA_GUEST_NR 0x1 258 259 #define IOMMU_IN_ADDR_BIT_SIZE 52 260 #define IOMMU_OUT_ADDR_BIT_SIZE 52 261 262 /* 263 * This bitmap is used to advertise the page sizes our hardware support 264 * to the IOMMU core, which will then use this information to split 265 * physically contiguous memory regions it is mapping into page sizes 266 * that we support. 267 * 268 * 512GB Pages are not supported due to a hardware bug 269 */ 270 #define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38)) 271 272 /* Bit value definition for dte irq remapping fields*/ 273 #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) 274 #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) 275 #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) 276 #define DTE_IRQ_REMAP_ENABLE 1ULL 277 278 /* 279 * AMD IOMMU hardware only support 512 IRTEs despite 280 * the architectural limitation of 2048 entries. 281 */ 282 #define DTE_INTTAB_ALIGNMENT 128 283 #define DTE_INTTABLEN_VALUE 9ULL 284 #define DTE_INTTABLEN (DTE_INTTABLEN_VALUE << 1) 285 #define DTE_INTTABLEN_MASK (0xfULL << 1) 286 #define MAX_IRQS_PER_TABLE (1 << DTE_INTTABLEN_VALUE) 287 288 #define PAGE_MODE_NONE 0x00 289 #define PAGE_MODE_1_LEVEL 0x01 290 #define PAGE_MODE_2_LEVEL 0x02 291 #define PAGE_MODE_3_LEVEL 0x03 292 #define PAGE_MODE_4_LEVEL 0x04 293 #define PAGE_MODE_5_LEVEL 0x05 294 #define PAGE_MODE_6_LEVEL 0x06 295 #define PAGE_MODE_7_LEVEL 0x07 296 297 #define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) 298 #define PM_LEVEL_SIZE(x) (((x) < 6) ? \ 299 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ 300 (0xffffffffffffffffULL)) 301 #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) 302 #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) 303 #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ 304 IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW) 305 #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) 306 307 #define PM_MAP_4k 0 308 #define PM_ADDR_MASK 0x000ffffffffff000ULL 309 #define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ 310 (~((1ULL << (12 + ((lvl) * 9))) - 1))) 311 #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) 312 313 /* 314 * Returns the page table level to use for a given page size 315 * Pagesize is expected to be a power-of-two 316 */ 317 #define PAGE_SIZE_LEVEL(pagesize) \ 318 ((__ffs(pagesize) - 12) / 9) 319 /* 320 * Returns the number of ptes to use for a given page size 321 * Pagesize is expected to be a power-of-two 322 */ 323 #define PAGE_SIZE_PTE_COUNT(pagesize) \ 324 (1ULL << ((__ffs(pagesize) - 12) % 9)) 325 326 /* 327 * Aligns a given io-virtual address to a given page size 328 * Pagesize is expected to be a power-of-two 329 */ 330 #define PAGE_SIZE_ALIGN(address, pagesize) \ 331 ((address) & ~((pagesize) - 1)) 332 /* 333 * Creates an IOMMU PTE for an address and a given pagesize 334 * The PTE has no permission bits set 335 * Pagesize is expected to be a power-of-two larger than 4096 336 */ 337 #define PAGE_SIZE_PTE(address, pagesize) \ 338 (((address) | ((pagesize) - 1)) & \ 339 (~(pagesize >> 1)) & PM_ADDR_MASK) 340 341 /* 342 * Takes a PTE value with mode=0x07 and returns the page size it maps 343 */ 344 #define PTE_PAGE_SIZE(pte) \ 345 (1ULL << (1 + ffz(((pte) | 0xfffULL)))) 346 347 /* 348 * Takes a page-table level and returns the default page-size for this level 349 */ 350 #define PTE_LEVEL_PAGE_SIZE(level) \ 351 (1ULL << (12 + (9 * (level)))) 352 353 /* 354 * Bit value definition for I/O PTE fields 355 */ 356 #define IOMMU_PTE_PR (1ULL << 0) 357 #define IOMMU_PTE_U (1ULL << 59) 358 #define IOMMU_PTE_FC (1ULL << 60) 359 #define IOMMU_PTE_IR (1ULL << 61) 360 #define IOMMU_PTE_IW (1ULL << 62) 361 362 /* 363 * Bit value definition for DTE fields 364 */ 365 #define DTE_FLAG_V (1ULL << 0) 366 #define DTE_FLAG_TV (1ULL << 1) 367 #define DTE_FLAG_IR (1ULL << 61) 368 #define DTE_FLAG_IW (1ULL << 62) 369 370 #define DTE_FLAG_IOTLB (1ULL << 32) 371 #define DTE_FLAG_GV (1ULL << 55) 372 #define DTE_FLAG_MASK (0x3ffULL << 32) 373 #define DTE_GLX_SHIFT (56) 374 #define DTE_GLX_MASK (3) 375 #define DEV_DOMID_MASK 0xffffULL 376 377 #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) 378 #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) 379 #define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) 380 381 #define DTE_GCR3_INDEX_A 0 382 #define DTE_GCR3_INDEX_B 1 383 #define DTE_GCR3_INDEX_C 1 384 385 #define DTE_GCR3_SHIFT_A 58 386 #define DTE_GCR3_SHIFT_B 16 387 #define DTE_GCR3_SHIFT_C 43 388 389 #define GCR3_VALID 0x01ULL 390 391 #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) 392 #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) 393 #define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK)) 394 #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) 395 396 #define IOMMU_PROT_MASK 0x03 397 #define IOMMU_PROT_IR 0x01 398 #define IOMMU_PROT_IW 0x02 399 400 #define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2) 401 402 /* IOMMU capabilities */ 403 #define IOMMU_CAP_IOTLB 24 404 #define IOMMU_CAP_NPCACHE 26 405 #define IOMMU_CAP_EFR 27 406 407 /* IOMMU IVINFO */ 408 #define IOMMU_IVINFO_OFFSET 36 409 #define IOMMU_IVINFO_EFRSUP BIT(0) 410 #define IOMMU_IVINFO_DMA_REMAP BIT(1) 411 412 /* IOMMU Feature Reporting Field (for IVHD type 10h */ 413 #define IOMMU_FEAT_GASUP_SHIFT 6 414 415 /* IOMMU Extended Feature Register (EFR) */ 416 #define IOMMU_EFR_XTSUP_SHIFT 2 417 #define IOMMU_EFR_GASUP_SHIFT 7 418 #define IOMMU_EFR_MSICAPMMIOSUP_SHIFT 46 419 420 #define MAX_DOMAIN_ID 65536 421 422 /* Protection domain flags */ 423 #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ 424 #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops 425 domain for an IOMMU */ 426 #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page 427 translation */ 428 #define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */ 429 430 extern bool amd_iommu_dump; 431 #define DUMP_printk(format, arg...) \ 432 do { \ 433 if (amd_iommu_dump) \ 434 pr_info("AMD-Vi: " format, ## arg); \ 435 } while(0); 436 437 /* global flag if IOMMUs cache non-present entries */ 438 extern bool amd_iommu_np_cache; 439 /* Only true if all IOMMUs support device IOTLBs */ 440 extern bool amd_iommu_iotlb_sup; 441 442 struct irq_remap_table { 443 raw_spinlock_t lock; 444 unsigned min_index; 445 u32 *table; 446 }; 447 448 extern struct irq_remap_table **irq_lookup_table; 449 450 /* Interrupt remapping feature used? */ 451 extern bool amd_iommu_irq_remap; 452 453 /* IVRS indicates that pre-boot remapping was enabled */ 454 extern bool amdr_ivrs_remap_support; 455 456 /* kmem_cache to get tables with 128 byte alignement */ 457 extern struct kmem_cache *amd_iommu_irq_cache; 458 459 /* 460 * Make iterating over all IOMMUs easier 461 */ 462 #define for_each_iommu(iommu) \ 463 list_for_each_entry((iommu), &amd_iommu_list, list) 464 #define for_each_iommu_safe(iommu, next) \ 465 list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) 466 467 #define APERTURE_RANGE_SHIFT 27 /* 128 MB */ 468 #define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) 469 #define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) 470 #define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ 471 #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) 472 #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) 473 474 /* 475 * This struct is used to pass information about 476 * incoming PPR faults around. 477 */ 478 struct amd_iommu_fault { 479 u64 address; /* IO virtual address of the fault*/ 480 u32 pasid; /* Address space identifier */ 481 u16 device_id; /* Originating PCI device id */ 482 u16 tag; /* PPR tag */ 483 u16 flags; /* Fault flags */ 484 485 }; 486 487 488 struct iommu_domain; 489 struct irq_domain; 490 struct amd_irte_ops; 491 492 #define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0) 493 494 #define io_pgtable_to_data(x) \ 495 container_of((x), struct amd_io_pgtable, iop) 496 497 #define io_pgtable_ops_to_data(x) \ 498 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) 499 500 #define io_pgtable_ops_to_domain(x) \ 501 container_of(io_pgtable_ops_to_data(x), \ 502 struct protection_domain, iop) 503 504 #define io_pgtable_cfg_to_data(x) \ 505 container_of((x), struct amd_io_pgtable, pgtbl_cfg) 506 507 struct amd_io_pgtable { 508 struct io_pgtable_cfg pgtbl_cfg; 509 struct io_pgtable iop; 510 int mode; 511 u64 *root; 512 atomic64_t pt_root; /* pgtable root and pgtable mode */ 513 }; 514 515 /* 516 * This structure contains generic data for IOMMU protection domains 517 * independent of their use. 518 */ 519 struct protection_domain { 520 struct list_head dev_list; /* List of all devices in this domain */ 521 struct iommu_domain domain; /* generic domain handle used by 522 iommu core code */ 523 struct amd_io_pgtable iop; 524 spinlock_t lock; /* mostly used to lock the page table*/ 525 u16 id; /* the domain id written to the device table */ 526 int glx; /* Number of levels for GCR3 table */ 527 u64 *gcr3_tbl; /* Guest CR3 table */ 528 unsigned long flags; /* flags to find out type of domain */ 529 unsigned dev_cnt; /* devices assigned to this domain */ 530 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ 531 }; 532 533 /* 534 * Structure where we save information about one hardware AMD IOMMU in the 535 * system. 536 */ 537 struct amd_iommu { 538 struct list_head list; 539 540 /* Index within the IOMMU array */ 541 int index; 542 543 /* locks the accesses to the hardware */ 544 raw_spinlock_t lock; 545 546 /* Pointer to PCI device of this IOMMU */ 547 struct pci_dev *dev; 548 549 /* Cache pdev to root device for resume quirks */ 550 struct pci_dev *root_pdev; 551 552 /* physical address of MMIO space */ 553 u64 mmio_phys; 554 555 /* physical end address of MMIO space */ 556 u64 mmio_phys_end; 557 558 /* virtual address of MMIO space */ 559 u8 __iomem *mmio_base; 560 561 /* capabilities of that IOMMU read from ACPI */ 562 u32 cap; 563 564 /* flags read from acpi table */ 565 u8 acpi_flags; 566 567 /* Extended features */ 568 u64 features; 569 570 /* IOMMUv2 */ 571 bool is_iommu_v2; 572 573 /* PCI device id of the IOMMU device */ 574 u16 devid; 575 576 /* 577 * Capability pointer. There could be more than one IOMMU per PCI 578 * device function if there are more than one AMD IOMMU capability 579 * pointers. 580 */ 581 u16 cap_ptr; 582 583 /* pci domain of this IOMMU */ 584 u16 pci_seg; 585 586 /* start of exclusion range of that IOMMU */ 587 u64 exclusion_start; 588 /* length of exclusion range of that IOMMU */ 589 u64 exclusion_length; 590 591 /* command buffer virtual address */ 592 u8 *cmd_buf; 593 u32 cmd_buf_head; 594 u32 cmd_buf_tail; 595 596 /* event buffer virtual address */ 597 u8 *evt_buf; 598 599 /* Base of the PPR log, if present */ 600 u8 *ppr_log; 601 602 /* Base of the GA log, if present */ 603 u8 *ga_log; 604 605 /* Tail of the GA log, if present */ 606 u8 *ga_log_tail; 607 608 /* true if interrupts for this IOMMU are already enabled */ 609 bool int_enabled; 610 611 /* if one, we need to send a completion wait command */ 612 bool need_sync; 613 614 /* Handle for IOMMU core code */ 615 struct iommu_device iommu; 616 617 /* 618 * We can't rely on the BIOS to restore all values on reinit, so we 619 * need to stash them 620 */ 621 622 /* The iommu BAR */ 623 u32 stored_addr_lo; 624 u32 stored_addr_hi; 625 626 /* 627 * Each iommu has 6 l1s, each of which is documented as having 0x12 628 * registers 629 */ 630 u32 stored_l1[6][0x12]; 631 632 /* The l2 indirect registers */ 633 u32 stored_l2[0x83]; 634 635 /* The maximum PC banks and counters/bank (PCSup=1) */ 636 u8 max_banks; 637 u8 max_counters; 638 #ifdef CONFIG_IRQ_REMAP 639 struct irq_domain *ir_domain; 640 struct irq_domain *msi_domain; 641 642 struct amd_irte_ops *irte_ops; 643 #endif 644 645 u32 flags; 646 volatile u64 *cmd_sem; 647 u64 cmd_sem_val; 648 649 #ifdef CONFIG_AMD_IOMMU_DEBUGFS 650 /* DebugFS Info */ 651 struct dentry *debugfs; 652 #endif 653 }; 654 655 static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) 656 { 657 struct iommu_device *iommu = dev_to_iommu_device(dev); 658 659 return container_of(iommu, struct amd_iommu, iommu); 660 } 661 662 #define ACPIHID_UID_LEN 256 663 #define ACPIHID_HID_LEN 9 664 665 struct acpihid_map_entry { 666 struct list_head list; 667 u8 uid[ACPIHID_UID_LEN]; 668 u8 hid[ACPIHID_HID_LEN]; 669 u16 devid; 670 u16 root_devid; 671 bool cmd_line; 672 struct iommu_group *group; 673 }; 674 675 struct devid_map { 676 struct list_head list; 677 u8 id; 678 u16 devid; 679 bool cmd_line; 680 }; 681 682 /* 683 * This struct contains device specific data for the IOMMU 684 */ 685 struct iommu_dev_data { 686 /*Protect against attach/detach races */ 687 spinlock_t lock; 688 689 struct list_head list; /* For domain->dev_list */ 690 struct llist_node dev_data_list; /* For global dev_data_list */ 691 struct protection_domain *domain; /* Domain the device is bound to */ 692 struct pci_dev *pdev; 693 u16 devid; /* PCI Device ID */ 694 bool iommu_v2; /* Device can make use of IOMMUv2 */ 695 struct { 696 bool enabled; 697 int qdep; 698 } ats; /* ATS state */ 699 bool pri_tlp; /* PASID TLB required for 700 PPR completions */ 701 bool use_vapic; /* Enable device to use vapic mode */ 702 bool defer_attach; 703 704 struct ratelimit_state rs; /* Ratelimit IOPF messages */ 705 }; 706 707 /* Map HPET and IOAPIC ids to the devid used by the IOMMU */ 708 extern struct list_head ioapic_map; 709 extern struct list_head hpet_map; 710 extern struct list_head acpihid_map; 711 712 /* 713 * List with all IOMMUs in the system. This list is not locked because it is 714 * only written and read at driver initialization or suspend time 715 */ 716 extern struct list_head amd_iommu_list; 717 718 /* 719 * Array with pointers to each IOMMU struct 720 * The indices are referenced in the protection domains 721 */ 722 extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; 723 724 /* 725 * Structure defining one entry in the device table 726 */ 727 struct dev_table_entry { 728 u64 data[4]; 729 }; 730 731 /* 732 * One entry for unity mappings parsed out of the ACPI table. 733 */ 734 struct unity_map_entry { 735 struct list_head list; 736 737 /* starting device id this entry is used for (including) */ 738 u16 devid_start; 739 /* end device id this entry is used for (including) */ 740 u16 devid_end; 741 742 /* start address to unity map (including) */ 743 u64 address_start; 744 /* end address to unity map (including) */ 745 u64 address_end; 746 747 /* required protection */ 748 int prot; 749 }; 750 751 /* 752 * List of all unity mappings. It is not locked because as runtime it is only 753 * read. It is created at ACPI table parsing time. 754 */ 755 extern struct list_head amd_iommu_unity_map; 756 757 /* 758 * Data structures for device handling 759 */ 760 761 /* 762 * Device table used by hardware. Read and write accesses by software are 763 * locked with the amd_iommu_pd_table lock. 764 */ 765 extern struct dev_table_entry *amd_iommu_dev_table; 766 767 /* 768 * Alias table to find requestor ids to device ids. Not locked because only 769 * read on runtime. 770 */ 771 extern u16 *amd_iommu_alias_table; 772 773 /* 774 * Reverse lookup table to find the IOMMU which translates a specific device. 775 */ 776 extern struct amd_iommu **amd_iommu_rlookup_table; 777 778 /* size of the dma_ops aperture as power of 2 */ 779 extern unsigned amd_iommu_aperture_order; 780 781 /* largest PCI device id we expect translation requests for */ 782 extern u16 amd_iommu_last_bdf; 783 784 /* allocation bitmap for domain ids */ 785 extern unsigned long *amd_iommu_pd_alloc_bitmap; 786 787 /* Smallest max PASID supported by any IOMMU in the system */ 788 extern u32 amd_iommu_max_pasid; 789 790 extern bool amd_iommu_v2_present; 791 792 extern bool amd_iommu_force_isolation; 793 794 /* Max levels of glxval supported */ 795 extern int amd_iommu_max_glx_val; 796 797 /* 798 * This function flushes all internal caches of 799 * the IOMMU used by this driver. 800 */ 801 extern void iommu_flush_all_caches(struct amd_iommu *iommu); 802 803 static inline int get_ioapic_devid(int id) 804 { 805 struct devid_map *entry; 806 807 list_for_each_entry(entry, &ioapic_map, list) { 808 if (entry->id == id) 809 return entry->devid; 810 } 811 812 return -EINVAL; 813 } 814 815 static inline int get_hpet_devid(int id) 816 { 817 struct devid_map *entry; 818 819 list_for_each_entry(entry, &hpet_map, list) { 820 if (entry->id == id) 821 return entry->devid; 822 } 823 824 return -EINVAL; 825 } 826 827 enum amd_iommu_intr_mode_type { 828 AMD_IOMMU_GUEST_IR_LEGACY, 829 830 /* This mode is not visible to users. It is used when 831 * we cannot fully enable vAPIC and fallback to only support 832 * legacy interrupt remapping via 128-bit IRTE. 833 */ 834 AMD_IOMMU_GUEST_IR_LEGACY_GA, 835 AMD_IOMMU_GUEST_IR_VAPIC, 836 }; 837 838 #define AMD_IOMMU_GUEST_IR_GA(x) (x == AMD_IOMMU_GUEST_IR_VAPIC || \ 839 x == AMD_IOMMU_GUEST_IR_LEGACY_GA) 840 841 #define AMD_IOMMU_GUEST_IR_VAPIC(x) (x == AMD_IOMMU_GUEST_IR_VAPIC) 842 843 union irte { 844 u32 val; 845 struct { 846 u32 valid : 1, 847 no_fault : 1, 848 int_type : 3, 849 rq_eoi : 1, 850 dm : 1, 851 rsvd_1 : 1, 852 destination : 8, 853 vector : 8, 854 rsvd_2 : 8; 855 } fields; 856 }; 857 858 #define APICID_TO_IRTE_DEST_LO(x) (x & 0xffffff) 859 #define APICID_TO_IRTE_DEST_HI(x) ((x >> 24) & 0xff) 860 861 union irte_ga_lo { 862 u64 val; 863 864 /* For int remapping */ 865 struct { 866 u64 valid : 1, 867 no_fault : 1, 868 /* ------ */ 869 int_type : 3, 870 rq_eoi : 1, 871 dm : 1, 872 /* ------ */ 873 guest_mode : 1, 874 destination : 24, 875 ga_tag : 32; 876 } fields_remap; 877 878 /* For guest vAPIC */ 879 struct { 880 u64 valid : 1, 881 no_fault : 1, 882 /* ------ */ 883 ga_log_intr : 1, 884 rsvd1 : 3, 885 is_run : 1, 886 /* ------ */ 887 guest_mode : 1, 888 destination : 24, 889 ga_tag : 32; 890 } fields_vapic; 891 }; 892 893 union irte_ga_hi { 894 u64 val; 895 struct { 896 u64 vector : 8, 897 rsvd_1 : 4, 898 ga_root_ptr : 40, 899 rsvd_2 : 4, 900 destination : 8; 901 } fields; 902 }; 903 904 struct irte_ga { 905 union irte_ga_lo lo; 906 union irte_ga_hi hi; 907 }; 908 909 struct irq_2_irte { 910 u16 devid; /* Device ID for IRTE table */ 911 u16 index; /* Index into IRTE table*/ 912 }; 913 914 struct amd_ir_data { 915 u32 cached_ga_tag; 916 struct irq_2_irte irq_2_irte; 917 struct msi_msg msi_entry; 918 void *entry; /* Pointer to union irte or struct irte_ga */ 919 void *ref; /* Pointer to the actual irte */ 920 921 /** 922 * Store information for activate/de-activate 923 * Guest virtual APIC mode during runtime. 924 */ 925 struct irq_cfg *cfg; 926 int ga_vector; 927 int ga_root_ptr; 928 int ga_tag; 929 }; 930 931 struct amd_irte_ops { 932 void (*prepare)(void *, u32, bool, u8, u32, int); 933 void (*activate)(void *, u16, u16); 934 void (*deactivate)(void *, u16, u16); 935 void (*set_affinity)(void *, u16, u16, u8, u32); 936 void *(*get)(struct irq_remap_table *, int); 937 void (*set_allocated)(struct irq_remap_table *, int); 938 bool (*is_allocated)(struct irq_remap_table *, int); 939 void (*clear_allocated)(struct irq_remap_table *, int); 940 }; 941 942 #ifdef CONFIG_IRQ_REMAP 943 extern struct amd_irte_ops irte_32_ops; 944 extern struct amd_irte_ops irte_128_ops; 945 #endif 946 947 #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 948