1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <jroedel@suse.de> 5 * Leo Duran <leo.duran@amd.com> 6 */ 7 8 #ifndef _ASM_X86_AMD_IOMMU_TYPES_H 9 #define _ASM_X86_AMD_IOMMU_TYPES_H 10 11 #include <linux/types.h> 12 #include <linux/mutex.h> 13 #include <linux/msi.h> 14 #include <linux/list.h> 15 #include <linux/spinlock.h> 16 #include <linux/pci.h> 17 #include <linux/irqreturn.h> 18 #include <linux/io-pgtable.h> 19 20 /* 21 * Maximum number of IOMMUs supported 22 */ 23 #define MAX_IOMMUS 32 24 25 /* 26 * some size calculation constants 27 */ 28 #define DEV_TABLE_ENTRY_SIZE 32 29 #define ALIAS_TABLE_ENTRY_SIZE 2 30 #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) 31 32 /* Capability offsets used by the driver */ 33 #define MMIO_CAP_HDR_OFFSET 0x00 34 #define MMIO_RANGE_OFFSET 0x0c 35 #define MMIO_MISC_OFFSET 0x10 36 37 /* Masks, shifts and macros to parse the device range capability */ 38 #define MMIO_RANGE_LD_MASK 0xff000000 39 #define MMIO_RANGE_FD_MASK 0x00ff0000 40 #define MMIO_RANGE_BUS_MASK 0x0000ff00 41 #define MMIO_RANGE_LD_SHIFT 24 42 #define MMIO_RANGE_FD_SHIFT 16 43 #define MMIO_RANGE_BUS_SHIFT 8 44 #define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) 45 #define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) 46 #define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) 47 #define MMIO_MSI_NUM(x) ((x) & 0x1f) 48 49 /* Flag masks for the AMD IOMMU exclusion range */ 50 #define MMIO_EXCL_ENABLE_MASK 0x01ULL 51 #define MMIO_EXCL_ALLOW_MASK 0x02ULL 52 53 /* Used offsets into the MMIO space */ 54 #define MMIO_DEV_TABLE_OFFSET 0x0000 55 #define MMIO_CMD_BUF_OFFSET 0x0008 56 #define MMIO_EVT_BUF_OFFSET 0x0010 57 #define MMIO_CONTROL_OFFSET 0x0018 58 #define MMIO_EXCL_BASE_OFFSET 0x0020 59 #define MMIO_EXCL_LIMIT_OFFSET 0x0028 60 #define MMIO_EXT_FEATURES 0x0030 61 #define MMIO_PPR_LOG_OFFSET 0x0038 62 #define MMIO_GA_LOG_BASE_OFFSET 0x00e0 63 #define MMIO_GA_LOG_TAIL_OFFSET 0x00e8 64 #define MMIO_MSI_ADDR_LO_OFFSET 0x015C 65 #define MMIO_MSI_ADDR_HI_OFFSET 0x0160 66 #define MMIO_MSI_DATA_OFFSET 0x0164 67 #define MMIO_INTCAPXT_EVT_OFFSET 0x0170 68 #define MMIO_INTCAPXT_PPR_OFFSET 0x0178 69 #define MMIO_INTCAPXT_GALOG_OFFSET 0x0180 70 #define MMIO_CMD_HEAD_OFFSET 0x2000 71 #define MMIO_CMD_TAIL_OFFSET 0x2008 72 #define MMIO_EVT_HEAD_OFFSET 0x2010 73 #define MMIO_EVT_TAIL_OFFSET 0x2018 74 #define MMIO_STATUS_OFFSET 0x2020 75 #define MMIO_PPR_HEAD_OFFSET 0x2030 76 #define MMIO_PPR_TAIL_OFFSET 0x2038 77 #define MMIO_GA_HEAD_OFFSET 0x2040 78 #define MMIO_GA_TAIL_OFFSET 0x2048 79 #define MMIO_CNTR_CONF_OFFSET 0x4000 80 #define MMIO_CNTR_REG_OFFSET 0x40000 81 #define MMIO_REG_END_OFFSET 0x80000 82 83 84 85 /* Extended Feature Bits */ 86 #define FEATURE_PREFETCH (1ULL<<0) 87 #define FEATURE_PPR (1ULL<<1) 88 #define FEATURE_X2APIC (1ULL<<2) 89 #define FEATURE_NX (1ULL<<3) 90 #define FEATURE_GT (1ULL<<4) 91 #define FEATURE_IA (1ULL<<6) 92 #define FEATURE_GA (1ULL<<7) 93 #define FEATURE_HE (1ULL<<8) 94 #define FEATURE_PC (1ULL<<9) 95 #define FEATURE_GAM_VAPIC (1ULL<<21) 96 #define FEATURE_EPHSUP (1ULL<<50) 97 #define FEATURE_SNP (1ULL<<63) 98 99 #define FEATURE_PASID_SHIFT 32 100 #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) 101 102 #define FEATURE_GLXVAL_SHIFT 14 103 #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) 104 105 /* Note: 106 * The current driver only support 16-bit PASID. 107 * Currently, hardware only implement upto 16-bit PASID 108 * even though the spec says it could have upto 20 bits. 109 */ 110 #define PASID_MASK 0x0000ffff 111 112 /* MMIO status bits */ 113 #define MMIO_STATUS_EVT_OVERFLOW_INT_MASK (1 << 0) 114 #define MMIO_STATUS_EVT_INT_MASK (1 << 1) 115 #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) 116 #define MMIO_STATUS_PPR_INT_MASK (1 << 6) 117 #define MMIO_STATUS_GALOG_RUN_MASK (1 << 8) 118 #define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9) 119 #define MMIO_STATUS_GALOG_INT_MASK (1 << 10) 120 121 /* event logging constants */ 122 #define EVENT_ENTRY_SIZE 0x10 123 #define EVENT_TYPE_SHIFT 28 124 #define EVENT_TYPE_MASK 0xf 125 #define EVENT_TYPE_ILL_DEV 0x1 126 #define EVENT_TYPE_IO_FAULT 0x2 127 #define EVENT_TYPE_DEV_TAB_ERR 0x3 128 #define EVENT_TYPE_PAGE_TAB_ERR 0x4 129 #define EVENT_TYPE_ILL_CMD 0x5 130 #define EVENT_TYPE_CMD_HARD_ERR 0x6 131 #define EVENT_TYPE_IOTLB_INV_TO 0x7 132 #define EVENT_TYPE_INV_DEV_REQ 0x8 133 #define EVENT_TYPE_INV_PPR_REQ 0x9 134 #define EVENT_TYPE_RMP_FAULT 0xd 135 #define EVENT_TYPE_RMP_HW_ERR 0xe 136 #define EVENT_DEVID_MASK 0xffff 137 #define EVENT_DEVID_SHIFT 0 138 #define EVENT_DOMID_MASK_LO 0xffff 139 #define EVENT_DOMID_MASK_HI 0xf0000 140 #define EVENT_FLAGS_MASK 0xfff 141 #define EVENT_FLAGS_SHIFT 0x10 142 #define EVENT_FLAG_RW 0x020 143 #define EVENT_FLAG_I 0x008 144 145 /* feature control bits */ 146 #define CONTROL_IOMMU_EN 0x00ULL 147 #define CONTROL_HT_TUN_EN 0x01ULL 148 #define CONTROL_EVT_LOG_EN 0x02ULL 149 #define CONTROL_EVT_INT_EN 0x03ULL 150 #define CONTROL_COMWAIT_EN 0x04ULL 151 #define CONTROL_INV_TIMEOUT 0x05ULL 152 #define CONTROL_PASSPW_EN 0x08ULL 153 #define CONTROL_RESPASSPW_EN 0x09ULL 154 #define CONTROL_COHERENT_EN 0x0aULL 155 #define CONTROL_ISOC_EN 0x0bULL 156 #define CONTROL_CMDBUF_EN 0x0cULL 157 #define CONTROL_PPRLOG_EN 0x0dULL 158 #define CONTROL_PPRINT_EN 0x0eULL 159 #define CONTROL_PPR_EN 0x0fULL 160 #define CONTROL_GT_EN 0x10ULL 161 #define CONTROL_GA_EN 0x11ULL 162 #define CONTROL_GAM_EN 0x19ULL 163 #define CONTROL_GALOG_EN 0x1CULL 164 #define CONTROL_GAINT_EN 0x1DULL 165 #define CONTROL_XT_EN 0x32ULL 166 #define CONTROL_INTCAPXT_EN 0x33ULL 167 168 #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) 169 #define CTRL_INV_TO_NONE 0 170 #define CTRL_INV_TO_1MS 1 171 #define CTRL_INV_TO_10MS 2 172 #define CTRL_INV_TO_100MS 3 173 #define CTRL_INV_TO_1S 4 174 #define CTRL_INV_TO_10S 5 175 #define CTRL_INV_TO_100S 6 176 177 /* command specific defines */ 178 #define CMD_COMPL_WAIT 0x01 179 #define CMD_INV_DEV_ENTRY 0x02 180 #define CMD_INV_IOMMU_PAGES 0x03 181 #define CMD_INV_IOTLB_PAGES 0x04 182 #define CMD_INV_IRT 0x05 183 #define CMD_COMPLETE_PPR 0x07 184 #define CMD_INV_ALL 0x08 185 186 #define CMD_COMPL_WAIT_STORE_MASK 0x01 187 #define CMD_COMPL_WAIT_INT_MASK 0x02 188 #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 189 #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 190 #define CMD_INV_IOMMU_PAGES_GN_MASK 0x04 191 192 #define PPR_STATUS_MASK 0xf 193 #define PPR_STATUS_SHIFT 12 194 195 #define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL 196 197 /* macros and definitions for device table entries */ 198 #define DEV_ENTRY_VALID 0x00 199 #define DEV_ENTRY_TRANSLATION 0x01 200 #define DEV_ENTRY_PPR 0x34 201 #define DEV_ENTRY_IR 0x3d 202 #define DEV_ENTRY_IW 0x3e 203 #define DEV_ENTRY_NO_PAGE_FAULT 0x62 204 #define DEV_ENTRY_EX 0x67 205 #define DEV_ENTRY_SYSMGT1 0x68 206 #define DEV_ENTRY_SYSMGT2 0x69 207 #define DEV_ENTRY_IRQ_TBL_EN 0x80 208 #define DEV_ENTRY_INIT_PASS 0xb8 209 #define DEV_ENTRY_EINT_PASS 0xb9 210 #define DEV_ENTRY_NMI_PASS 0xba 211 #define DEV_ENTRY_LINT0_PASS 0xbe 212 #define DEV_ENTRY_LINT1_PASS 0xbf 213 #define DEV_ENTRY_MODE_MASK 0x07 214 #define DEV_ENTRY_MODE_SHIFT 0x09 215 216 #define MAX_DEV_TABLE_ENTRIES 0xffff 217 218 /* constants to configure the command buffer */ 219 #define CMD_BUFFER_SIZE 8192 220 #define CMD_BUFFER_UNINITIALIZED 1 221 #define CMD_BUFFER_ENTRIES 512 222 #define MMIO_CMD_SIZE_SHIFT 56 223 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) 224 225 /* constants for event buffer handling */ 226 #define EVT_BUFFER_SIZE 8192 /* 512 entries */ 227 #define EVT_LEN_MASK (0x9ULL << 56) 228 229 /* Constants for PPR Log handling */ 230 #define PPR_LOG_ENTRIES 512 231 #define PPR_LOG_SIZE_SHIFT 56 232 #define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT) 233 #define PPR_ENTRY_SIZE 16 234 #define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES) 235 236 #define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL) 237 #define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL) 238 #define PPR_DEVID(x) ((x) & 0xffffULL) 239 #define PPR_TAG(x) (((x) >> 32) & 0x3ffULL) 240 #define PPR_PASID1(x) (((x) >> 16) & 0xffffULL) 241 #define PPR_PASID2(x) (((x) >> 42) & 0xfULL) 242 #define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x)) 243 244 #define PPR_REQ_FAULT 0x01 245 246 /* Constants for GA Log handling */ 247 #define GA_LOG_ENTRIES 512 248 #define GA_LOG_SIZE_SHIFT 56 249 #define GA_LOG_SIZE_512 (0x8ULL << GA_LOG_SIZE_SHIFT) 250 #define GA_ENTRY_SIZE 8 251 #define GA_LOG_SIZE (GA_ENTRY_SIZE * GA_LOG_ENTRIES) 252 253 #define GA_TAG(x) (u32)(x & 0xffffffffULL) 254 #define GA_DEVID(x) (u16)(((x) >> 32) & 0xffffULL) 255 #define GA_REQ_TYPE(x) (((x) >> 60) & 0xfULL) 256 257 #define GA_GUEST_NR 0x1 258 259 #define IOMMU_IN_ADDR_BIT_SIZE 52 260 #define IOMMU_OUT_ADDR_BIT_SIZE 52 261 262 /* 263 * This bitmap is used to advertise the page sizes our hardware support 264 * to the IOMMU core, which will then use this information to split 265 * physically contiguous memory regions it is mapping into page sizes 266 * that we support. 267 * 268 * 512GB Pages are not supported due to a hardware bug 269 */ 270 #define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38)) 271 272 /* Bit value definition for dte irq remapping fields*/ 273 #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) 274 #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) 275 #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) 276 #define DTE_IRQ_REMAP_ENABLE 1ULL 277 278 /* 279 * AMD IOMMU hardware only support 512 IRTEs despite 280 * the architectural limitation of 2048 entries. 281 */ 282 #define DTE_INTTAB_ALIGNMENT 128 283 #define DTE_INTTABLEN_VALUE 9ULL 284 #define DTE_INTTABLEN (DTE_INTTABLEN_VALUE << 1) 285 #define DTE_INTTABLEN_MASK (0xfULL << 1) 286 #define MAX_IRQS_PER_TABLE (1 << DTE_INTTABLEN_VALUE) 287 288 #define PAGE_MODE_NONE 0x00 289 #define PAGE_MODE_1_LEVEL 0x01 290 #define PAGE_MODE_2_LEVEL 0x02 291 #define PAGE_MODE_3_LEVEL 0x03 292 #define PAGE_MODE_4_LEVEL 0x04 293 #define PAGE_MODE_5_LEVEL 0x05 294 #define PAGE_MODE_6_LEVEL 0x06 295 #define PAGE_MODE_7_LEVEL 0x07 296 297 #define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) 298 #define PM_LEVEL_SIZE(x) (((x) < 6) ? \ 299 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ 300 (0xffffffffffffffffULL)) 301 #define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) 302 #define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) 303 #define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ 304 IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW) 305 #define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) 306 307 #define PM_MAP_4k 0 308 #define PM_ADDR_MASK 0x000ffffffffff000ULL 309 #define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ 310 (~((1ULL << (12 + ((lvl) * 9))) - 1))) 311 #define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) 312 313 /* 314 * Returns the page table level to use for a given page size 315 * Pagesize is expected to be a power-of-two 316 */ 317 #define PAGE_SIZE_LEVEL(pagesize) \ 318 ((__ffs(pagesize) - 12) / 9) 319 /* 320 * Returns the number of ptes to use for a given page size 321 * Pagesize is expected to be a power-of-two 322 */ 323 #define PAGE_SIZE_PTE_COUNT(pagesize) \ 324 (1ULL << ((__ffs(pagesize) - 12) % 9)) 325 326 /* 327 * Aligns a given io-virtual address to a given page size 328 * Pagesize is expected to be a power-of-two 329 */ 330 #define PAGE_SIZE_ALIGN(address, pagesize) \ 331 ((address) & ~((pagesize) - 1)) 332 /* 333 * Creates an IOMMU PTE for an address and a given pagesize 334 * The PTE has no permission bits set 335 * Pagesize is expected to be a power-of-two larger than 4096 336 */ 337 #define PAGE_SIZE_PTE(address, pagesize) \ 338 (((address) | ((pagesize) - 1)) & \ 339 (~(pagesize >> 1)) & PM_ADDR_MASK) 340 341 /* 342 * Takes a PTE value with mode=0x07 and returns the page size it maps 343 */ 344 #define PTE_PAGE_SIZE(pte) \ 345 (1ULL << (1 + ffz(((pte) | 0xfffULL)))) 346 347 /* 348 * Takes a page-table level and returns the default page-size for this level 349 */ 350 #define PTE_LEVEL_PAGE_SIZE(level) \ 351 (1ULL << (12 + (9 * (level)))) 352 353 /* 354 * Bit value definition for I/O PTE fields 355 */ 356 #define IOMMU_PTE_PR (1ULL << 0) 357 #define IOMMU_PTE_U (1ULL << 59) 358 #define IOMMU_PTE_FC (1ULL << 60) 359 #define IOMMU_PTE_IR (1ULL << 61) 360 #define IOMMU_PTE_IW (1ULL << 62) 361 362 /* 363 * Bit value definition for DTE fields 364 */ 365 #define DTE_FLAG_V (1ULL << 0) 366 #define DTE_FLAG_TV (1ULL << 1) 367 #define DTE_FLAG_IR (1ULL << 61) 368 #define DTE_FLAG_IW (1ULL << 62) 369 370 #define DTE_FLAG_IOTLB (1ULL << 32) 371 #define DTE_FLAG_GV (1ULL << 55) 372 #define DTE_FLAG_MASK (0x3ffULL << 32) 373 #define DTE_GLX_SHIFT (56) 374 #define DTE_GLX_MASK (3) 375 #define DEV_DOMID_MASK 0xffffULL 376 377 #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) 378 #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) 379 #define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) 380 381 #define DTE_GCR3_INDEX_A 0 382 #define DTE_GCR3_INDEX_B 1 383 #define DTE_GCR3_INDEX_C 1 384 385 #define DTE_GCR3_SHIFT_A 58 386 #define DTE_GCR3_SHIFT_B 16 387 #define DTE_GCR3_SHIFT_C 43 388 389 #define GCR3_VALID 0x01ULL 390 391 #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) 392 #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) 393 #define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK)) 394 #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) 395 396 #define IOMMU_PROT_MASK 0x03 397 #define IOMMU_PROT_IR 0x01 398 #define IOMMU_PROT_IW 0x02 399 400 #define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2) 401 402 /* IOMMU capabilities */ 403 #define IOMMU_CAP_IOTLB 24 404 #define IOMMU_CAP_NPCACHE 26 405 #define IOMMU_CAP_EFR 27 406 407 /* IOMMU IVINFO */ 408 #define IOMMU_IVINFO_OFFSET 36 409 #define IOMMU_IVINFO_EFRSUP BIT(0) 410 411 /* IOMMU Feature Reporting Field (for IVHD type 10h */ 412 #define IOMMU_FEAT_GASUP_SHIFT 6 413 414 /* IOMMU Extended Feature Register (EFR) */ 415 #define IOMMU_EFR_XTSUP_SHIFT 2 416 #define IOMMU_EFR_GASUP_SHIFT 7 417 #define IOMMU_EFR_MSICAPMMIOSUP_SHIFT 46 418 419 #define MAX_DOMAIN_ID 65536 420 421 /* Protection domain flags */ 422 #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ 423 #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops 424 domain for an IOMMU */ 425 #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page 426 translation */ 427 #define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */ 428 429 extern bool amd_iommu_dump; 430 #define DUMP_printk(format, arg...) \ 431 do { \ 432 if (amd_iommu_dump) \ 433 pr_info("AMD-Vi: " format, ## arg); \ 434 } while(0); 435 436 /* global flag if IOMMUs cache non-present entries */ 437 extern bool amd_iommu_np_cache; 438 /* Only true if all IOMMUs support device IOTLBs */ 439 extern bool amd_iommu_iotlb_sup; 440 441 struct irq_remap_table { 442 raw_spinlock_t lock; 443 unsigned min_index; 444 u32 *table; 445 }; 446 447 extern struct irq_remap_table **irq_lookup_table; 448 449 /* Interrupt remapping feature used? */ 450 extern bool amd_iommu_irq_remap; 451 452 /* kmem_cache to get tables with 128 byte alignement */ 453 extern struct kmem_cache *amd_iommu_irq_cache; 454 455 /* 456 * Make iterating over all IOMMUs easier 457 */ 458 #define for_each_iommu(iommu) \ 459 list_for_each_entry((iommu), &amd_iommu_list, list) 460 #define for_each_iommu_safe(iommu, next) \ 461 list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) 462 463 #define APERTURE_RANGE_SHIFT 27 /* 128 MB */ 464 #define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) 465 #define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) 466 #define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ 467 #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) 468 #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) 469 470 /* 471 * This struct is used to pass information about 472 * incoming PPR faults around. 473 */ 474 struct amd_iommu_fault { 475 u64 address; /* IO virtual address of the fault*/ 476 u32 pasid; /* Address space identifier */ 477 u16 device_id; /* Originating PCI device id */ 478 u16 tag; /* PPR tag */ 479 u16 flags; /* Fault flags */ 480 481 }; 482 483 484 struct iommu_domain; 485 struct irq_domain; 486 struct amd_irte_ops; 487 488 #define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0) 489 490 #define io_pgtable_to_data(x) \ 491 container_of((x), struct amd_io_pgtable, iop) 492 493 #define io_pgtable_ops_to_data(x) \ 494 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) 495 496 #define io_pgtable_ops_to_domain(x) \ 497 container_of(io_pgtable_ops_to_data(x), \ 498 struct protection_domain, iop) 499 500 #define io_pgtable_cfg_to_data(x) \ 501 container_of((x), struct amd_io_pgtable, pgtbl_cfg) 502 503 struct amd_io_pgtable { 504 struct io_pgtable_cfg pgtbl_cfg; 505 struct io_pgtable iop; 506 int mode; 507 u64 *root; 508 atomic64_t pt_root; /* pgtable root and pgtable mode */ 509 }; 510 511 /* 512 * This structure contains generic data for IOMMU protection domains 513 * independent of their use. 514 */ 515 struct protection_domain { 516 struct list_head dev_list; /* List of all devices in this domain */ 517 struct iommu_domain domain; /* generic domain handle used by 518 iommu core code */ 519 struct amd_io_pgtable iop; 520 spinlock_t lock; /* mostly used to lock the page table*/ 521 u16 id; /* the domain id written to the device table */ 522 int glx; /* Number of levels for GCR3 table */ 523 u64 *gcr3_tbl; /* Guest CR3 table */ 524 unsigned long flags; /* flags to find out type of domain */ 525 unsigned dev_cnt; /* devices assigned to this domain */ 526 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ 527 }; 528 529 /* 530 * Structure where we save information about one hardware AMD IOMMU in the 531 * system. 532 */ 533 struct amd_iommu { 534 struct list_head list; 535 536 /* Index within the IOMMU array */ 537 int index; 538 539 /* locks the accesses to the hardware */ 540 raw_spinlock_t lock; 541 542 /* Pointer to PCI device of this IOMMU */ 543 struct pci_dev *dev; 544 545 /* Cache pdev to root device for resume quirks */ 546 struct pci_dev *root_pdev; 547 548 /* physical address of MMIO space */ 549 u64 mmio_phys; 550 551 /* physical end address of MMIO space */ 552 u64 mmio_phys_end; 553 554 /* virtual address of MMIO space */ 555 u8 __iomem *mmio_base; 556 557 /* capabilities of that IOMMU read from ACPI */ 558 u32 cap; 559 560 /* flags read from acpi table */ 561 u8 acpi_flags; 562 563 /* Extended features */ 564 u64 features; 565 566 /* IOMMUv2 */ 567 bool is_iommu_v2; 568 569 /* PCI device id of the IOMMU device */ 570 u16 devid; 571 572 /* 573 * Capability pointer. There could be more than one IOMMU per PCI 574 * device function if there are more than one AMD IOMMU capability 575 * pointers. 576 */ 577 u16 cap_ptr; 578 579 /* pci domain of this IOMMU */ 580 u16 pci_seg; 581 582 /* start of exclusion range of that IOMMU */ 583 u64 exclusion_start; 584 /* length of exclusion range of that IOMMU */ 585 u64 exclusion_length; 586 587 /* command buffer virtual address */ 588 u8 *cmd_buf; 589 u32 cmd_buf_head; 590 u32 cmd_buf_tail; 591 592 /* event buffer virtual address */ 593 u8 *evt_buf; 594 595 /* Base of the PPR log, if present */ 596 u8 *ppr_log; 597 598 /* Base of the GA log, if present */ 599 u8 *ga_log; 600 601 /* Tail of the GA log, if present */ 602 u8 *ga_log_tail; 603 604 /* true if interrupts for this IOMMU are already enabled */ 605 bool int_enabled; 606 607 /* if one, we need to send a completion wait command */ 608 bool need_sync; 609 610 /* Handle for IOMMU core code */ 611 struct iommu_device iommu; 612 613 /* 614 * We can't rely on the BIOS to restore all values on reinit, so we 615 * need to stash them 616 */ 617 618 /* The iommu BAR */ 619 u32 stored_addr_lo; 620 u32 stored_addr_hi; 621 622 /* 623 * Each iommu has 6 l1s, each of which is documented as having 0x12 624 * registers 625 */ 626 u32 stored_l1[6][0x12]; 627 628 /* The l2 indirect registers */ 629 u32 stored_l2[0x83]; 630 631 /* The maximum PC banks and counters/bank (PCSup=1) */ 632 u8 max_banks; 633 u8 max_counters; 634 #ifdef CONFIG_IRQ_REMAP 635 struct irq_domain *ir_domain; 636 struct irq_domain *msi_domain; 637 638 struct amd_irte_ops *irte_ops; 639 #endif 640 641 u32 flags; 642 volatile u64 *cmd_sem; 643 u64 cmd_sem_val; 644 645 #ifdef CONFIG_AMD_IOMMU_DEBUGFS 646 /* DebugFS Info */ 647 struct dentry *debugfs; 648 #endif 649 }; 650 651 static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) 652 { 653 struct iommu_device *iommu = dev_to_iommu_device(dev); 654 655 return container_of(iommu, struct amd_iommu, iommu); 656 } 657 658 #define ACPIHID_UID_LEN 256 659 #define ACPIHID_HID_LEN 9 660 661 struct acpihid_map_entry { 662 struct list_head list; 663 u8 uid[ACPIHID_UID_LEN]; 664 u8 hid[ACPIHID_HID_LEN]; 665 u16 devid; 666 u16 root_devid; 667 bool cmd_line; 668 struct iommu_group *group; 669 }; 670 671 struct devid_map { 672 struct list_head list; 673 u8 id; 674 u16 devid; 675 bool cmd_line; 676 }; 677 678 /* 679 * This struct contains device specific data for the IOMMU 680 */ 681 struct iommu_dev_data { 682 /*Protect against attach/detach races */ 683 spinlock_t lock; 684 685 struct list_head list; /* For domain->dev_list */ 686 struct llist_node dev_data_list; /* For global dev_data_list */ 687 struct protection_domain *domain; /* Domain the device is bound to */ 688 struct pci_dev *pdev; 689 u16 devid; /* PCI Device ID */ 690 bool iommu_v2; /* Device can make use of IOMMUv2 */ 691 struct { 692 bool enabled; 693 int qdep; 694 } ats; /* ATS state */ 695 bool pri_tlp; /* PASID TLB required for 696 PPR completions */ 697 bool use_vapic; /* Enable device to use vapic mode */ 698 bool defer_attach; 699 700 struct ratelimit_state rs; /* Ratelimit IOPF messages */ 701 }; 702 703 /* Map HPET and IOAPIC ids to the devid used by the IOMMU */ 704 extern struct list_head ioapic_map; 705 extern struct list_head hpet_map; 706 extern struct list_head acpihid_map; 707 708 /* 709 * List with all IOMMUs in the system. This list is not locked because it is 710 * only written and read at driver initialization or suspend time 711 */ 712 extern struct list_head amd_iommu_list; 713 714 /* 715 * Array with pointers to each IOMMU struct 716 * The indices are referenced in the protection domains 717 */ 718 extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; 719 720 /* 721 * Structure defining one entry in the device table 722 */ 723 struct dev_table_entry { 724 u64 data[4]; 725 }; 726 727 /* 728 * One entry for unity mappings parsed out of the ACPI table. 729 */ 730 struct unity_map_entry { 731 struct list_head list; 732 733 /* starting device id this entry is used for (including) */ 734 u16 devid_start; 735 /* end device id this entry is used for (including) */ 736 u16 devid_end; 737 738 /* start address to unity map (including) */ 739 u64 address_start; 740 /* end address to unity map (including) */ 741 u64 address_end; 742 743 /* required protection */ 744 int prot; 745 }; 746 747 /* 748 * List of all unity mappings. It is not locked because as runtime it is only 749 * read. It is created at ACPI table parsing time. 750 */ 751 extern struct list_head amd_iommu_unity_map; 752 753 /* 754 * Data structures for device handling 755 */ 756 757 /* 758 * Device table used by hardware. Read and write accesses by software are 759 * locked with the amd_iommu_pd_table lock. 760 */ 761 extern struct dev_table_entry *amd_iommu_dev_table; 762 763 /* 764 * Alias table to find requestor ids to device ids. Not locked because only 765 * read on runtime. 766 */ 767 extern u16 *amd_iommu_alias_table; 768 769 /* 770 * Reverse lookup table to find the IOMMU which translates a specific device. 771 */ 772 extern struct amd_iommu **amd_iommu_rlookup_table; 773 774 /* size of the dma_ops aperture as power of 2 */ 775 extern unsigned amd_iommu_aperture_order; 776 777 /* largest PCI device id we expect translation requests for */ 778 extern u16 amd_iommu_last_bdf; 779 780 /* allocation bitmap for domain ids */ 781 extern unsigned long *amd_iommu_pd_alloc_bitmap; 782 783 /* Smallest max PASID supported by any IOMMU in the system */ 784 extern u32 amd_iommu_max_pasid; 785 786 extern bool amd_iommu_v2_present; 787 788 extern bool amd_iommu_force_isolation; 789 790 /* Max levels of glxval supported */ 791 extern int amd_iommu_max_glx_val; 792 793 /* 794 * This function flushes all internal caches of 795 * the IOMMU used by this driver. 796 */ 797 extern void iommu_flush_all_caches(struct amd_iommu *iommu); 798 799 static inline int get_ioapic_devid(int id) 800 { 801 struct devid_map *entry; 802 803 list_for_each_entry(entry, &ioapic_map, list) { 804 if (entry->id == id) 805 return entry->devid; 806 } 807 808 return -EINVAL; 809 } 810 811 static inline int get_hpet_devid(int id) 812 { 813 struct devid_map *entry; 814 815 list_for_each_entry(entry, &hpet_map, list) { 816 if (entry->id == id) 817 return entry->devid; 818 } 819 820 return -EINVAL; 821 } 822 823 enum amd_iommu_intr_mode_type { 824 AMD_IOMMU_GUEST_IR_LEGACY, 825 826 /* This mode is not visible to users. It is used when 827 * we cannot fully enable vAPIC and fallback to only support 828 * legacy interrupt remapping via 128-bit IRTE. 829 */ 830 AMD_IOMMU_GUEST_IR_LEGACY_GA, 831 AMD_IOMMU_GUEST_IR_VAPIC, 832 }; 833 834 #define AMD_IOMMU_GUEST_IR_GA(x) (x == AMD_IOMMU_GUEST_IR_VAPIC || \ 835 x == AMD_IOMMU_GUEST_IR_LEGACY_GA) 836 837 #define AMD_IOMMU_GUEST_IR_VAPIC(x) (x == AMD_IOMMU_GUEST_IR_VAPIC) 838 839 union irte { 840 u32 val; 841 struct { 842 u32 valid : 1, 843 no_fault : 1, 844 int_type : 3, 845 rq_eoi : 1, 846 dm : 1, 847 rsvd_1 : 1, 848 destination : 8, 849 vector : 8, 850 rsvd_2 : 8; 851 } fields; 852 }; 853 854 #define APICID_TO_IRTE_DEST_LO(x) (x & 0xffffff) 855 #define APICID_TO_IRTE_DEST_HI(x) ((x >> 24) & 0xff) 856 857 union irte_ga_lo { 858 u64 val; 859 860 /* For int remapping */ 861 struct { 862 u64 valid : 1, 863 no_fault : 1, 864 /* ------ */ 865 int_type : 3, 866 rq_eoi : 1, 867 dm : 1, 868 /* ------ */ 869 guest_mode : 1, 870 destination : 24, 871 ga_tag : 32; 872 } fields_remap; 873 874 /* For guest vAPIC */ 875 struct { 876 u64 valid : 1, 877 no_fault : 1, 878 /* ------ */ 879 ga_log_intr : 1, 880 rsvd1 : 3, 881 is_run : 1, 882 /* ------ */ 883 guest_mode : 1, 884 destination : 24, 885 ga_tag : 32; 886 } fields_vapic; 887 }; 888 889 union irte_ga_hi { 890 u64 val; 891 struct { 892 u64 vector : 8, 893 rsvd_1 : 4, 894 ga_root_ptr : 40, 895 rsvd_2 : 4, 896 destination : 8; 897 } fields; 898 }; 899 900 struct irte_ga { 901 union irte_ga_lo lo; 902 union irte_ga_hi hi; 903 }; 904 905 struct irq_2_irte { 906 u16 devid; /* Device ID for IRTE table */ 907 u16 index; /* Index into IRTE table*/ 908 }; 909 910 struct amd_ir_data { 911 u32 cached_ga_tag; 912 struct irq_2_irte irq_2_irte; 913 struct msi_msg msi_entry; 914 void *entry; /* Pointer to union irte or struct irte_ga */ 915 void *ref; /* Pointer to the actual irte */ 916 917 /** 918 * Store information for activate/de-activate 919 * Guest virtual APIC mode during runtime. 920 */ 921 struct irq_cfg *cfg; 922 int ga_vector; 923 int ga_root_ptr; 924 int ga_tag; 925 }; 926 927 struct amd_irte_ops { 928 void (*prepare)(void *, u32, bool, u8, u32, int); 929 void (*activate)(void *, u16, u16); 930 void (*deactivate)(void *, u16, u16); 931 void (*set_affinity)(void *, u16, u16, u8, u32); 932 void *(*get)(struct irq_remap_table *, int); 933 void (*set_allocated)(struct irq_remap_table *, int); 934 bool (*is_allocated)(struct irq_remap_table *, int); 935 void (*clear_allocated)(struct irq_remap_table *, int); 936 }; 937 938 #ifdef CONFIG_IRQ_REMAP 939 extern struct amd_irte_ops irte_32_ops; 940 extern struct amd_irte_ops irte_128_ops; 941 #endif 942 943 #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 944