1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright © 2018 Intel Corporation. 4 * 5 * Authors: Gayatri Kammela <gayatri.kammela@intel.com> 6 * Sohil Mehta <sohil.mehta@intel.com> 7 * Jacob Pan <jacob.jun.pan@linux.intel.com> 8 * Lu Baolu <baolu.lu@linux.intel.com> 9 */ 10 11 #include <linux/debugfs.h> 12 #include <linux/dmar.h> 13 #include <linux/intel-iommu.h> 14 #include <linux/pci.h> 15 16 #include <asm/irq_remapping.h> 17 18 #include "pasid.h" 19 20 struct tbl_walk { 21 u16 bus; 22 u16 devfn; 23 u32 pasid; 24 struct root_entry *rt_entry; 25 struct context_entry *ctx_entry; 26 struct pasid_entry *pasid_tbl_entry; 27 }; 28 29 struct iommu_regset { 30 int offset; 31 const char *regs; 32 }; 33 34 #define IOMMU_REGSET_ENTRY(_reg_) \ 35 { DMAR_##_reg_##_REG, __stringify(_reg_) } 36 37 static const struct iommu_regset iommu_regs_32[] = { 38 IOMMU_REGSET_ENTRY(VER), 39 IOMMU_REGSET_ENTRY(GCMD), 40 IOMMU_REGSET_ENTRY(GSTS), 41 IOMMU_REGSET_ENTRY(FSTS), 42 IOMMU_REGSET_ENTRY(FECTL), 43 IOMMU_REGSET_ENTRY(FEDATA), 44 IOMMU_REGSET_ENTRY(FEADDR), 45 IOMMU_REGSET_ENTRY(FEUADDR), 46 IOMMU_REGSET_ENTRY(PMEN), 47 IOMMU_REGSET_ENTRY(PLMBASE), 48 IOMMU_REGSET_ENTRY(PLMLIMIT), 49 IOMMU_REGSET_ENTRY(ICS), 50 IOMMU_REGSET_ENTRY(PRS), 51 IOMMU_REGSET_ENTRY(PECTL), 52 IOMMU_REGSET_ENTRY(PEDATA), 53 IOMMU_REGSET_ENTRY(PEADDR), 54 IOMMU_REGSET_ENTRY(PEUADDR), 55 }; 56 57 static const struct iommu_regset iommu_regs_64[] = { 58 IOMMU_REGSET_ENTRY(CAP), 59 IOMMU_REGSET_ENTRY(ECAP), 60 IOMMU_REGSET_ENTRY(RTADDR), 61 IOMMU_REGSET_ENTRY(CCMD), 62 IOMMU_REGSET_ENTRY(AFLOG), 63 IOMMU_REGSET_ENTRY(PHMBASE), 64 IOMMU_REGSET_ENTRY(PHMLIMIT), 65 IOMMU_REGSET_ENTRY(IQH), 66 IOMMU_REGSET_ENTRY(IQT), 67 IOMMU_REGSET_ENTRY(IQA), 68 IOMMU_REGSET_ENTRY(IRTA), 69 IOMMU_REGSET_ENTRY(PQH), 70 IOMMU_REGSET_ENTRY(PQT), 71 IOMMU_REGSET_ENTRY(PQA), 72 IOMMU_REGSET_ENTRY(MTRRCAP), 73 IOMMU_REGSET_ENTRY(MTRRDEF), 74 IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000), 75 IOMMU_REGSET_ENTRY(MTRR_FIX16K_80000), 76 IOMMU_REGSET_ENTRY(MTRR_FIX16K_A0000), 77 IOMMU_REGSET_ENTRY(MTRR_FIX4K_C0000), 78 IOMMU_REGSET_ENTRY(MTRR_FIX4K_C8000), 79 IOMMU_REGSET_ENTRY(MTRR_FIX4K_D0000), 80 IOMMU_REGSET_ENTRY(MTRR_FIX4K_D8000), 81 IOMMU_REGSET_ENTRY(MTRR_FIX4K_E0000), 82 IOMMU_REGSET_ENTRY(MTRR_FIX4K_E8000), 83 IOMMU_REGSET_ENTRY(MTRR_FIX4K_F0000), 84 IOMMU_REGSET_ENTRY(MTRR_FIX4K_F8000), 85 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE0), 86 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK0), 87 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE1), 88 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK1), 89 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE2), 90 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK2), 91 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE3), 92 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK3), 93 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE4), 94 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK4), 95 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE5), 96 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK5), 97 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE6), 98 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK6), 99 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE7), 100 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK7), 101 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE8), 102 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8), 103 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9), 104 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9), 105 IOMMU_REGSET_ENTRY(VCCAP), 106 IOMMU_REGSET_ENTRY(VCMD), 107 IOMMU_REGSET_ENTRY(VCRSP), 108 }; 109 110 static int iommu_regset_show(struct seq_file *m, void *unused) 111 { 112 struct dmar_drhd_unit *drhd; 113 struct intel_iommu *iommu; 114 unsigned long flag; 115 int i, ret = 0; 116 u64 value; 117 118 rcu_read_lock(); 119 for_each_active_iommu(iommu, drhd) { 120 if (!drhd->reg_base_addr) { 121 seq_puts(m, "IOMMU: Invalid base address\n"); 122 ret = -EINVAL; 123 goto out; 124 } 125 126 seq_printf(m, "IOMMU: %s Register Base Address: %llx\n", 127 iommu->name, drhd->reg_base_addr); 128 seq_puts(m, "Name\t\t\tOffset\t\tContents\n"); 129 /* 130 * Publish the contents of the 64-bit hardware registers 131 * by adding the offset to the pointer (virtual address). 132 */ 133 raw_spin_lock_irqsave(&iommu->register_lock, flag); 134 for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) { 135 value = dmar_readl(iommu->reg + iommu_regs_32[i].offset); 136 seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n", 137 iommu_regs_32[i].regs, iommu_regs_32[i].offset, 138 value); 139 } 140 for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) { 141 value = dmar_readq(iommu->reg + iommu_regs_64[i].offset); 142 seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n", 143 iommu_regs_64[i].regs, iommu_regs_64[i].offset, 144 value); 145 } 146 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 147 seq_putc(m, '\n'); 148 } 149 out: 150 rcu_read_unlock(); 151 152 return ret; 153 } 154 DEFINE_SHOW_ATTRIBUTE(iommu_regset); 155 156 static inline void print_tbl_walk(struct seq_file *m) 157 { 158 struct tbl_walk *tbl_wlk = m->private; 159 160 seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t", 161 tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn), 162 PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi, 163 tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi, 164 tbl_wlk->ctx_entry->lo); 165 166 /* 167 * A legacy mode DMAR doesn't support PASID, hence default it to -1 168 * indicating that it's invalid. Also, default all PASID related fields 169 * to 0. 170 */ 171 if (!tbl_wlk->pasid_tbl_entry) 172 seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1, 173 (u64)0, (u64)0, (u64)0); 174 else 175 seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", 176 tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2], 177 tbl_wlk->pasid_tbl_entry->val[1], 178 tbl_wlk->pasid_tbl_entry->val[0]); 179 } 180 181 static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry, 182 u16 dir_idx) 183 { 184 struct tbl_walk *tbl_wlk = m->private; 185 u8 tbl_idx; 186 187 for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) { 188 if (pasid_pte_is_present(tbl_entry)) { 189 tbl_wlk->pasid_tbl_entry = tbl_entry; 190 tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx; 191 print_tbl_walk(m); 192 } 193 194 tbl_entry++; 195 } 196 } 197 198 static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr, 199 u16 pasid_dir_size) 200 { 201 struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr); 202 struct pasid_entry *pasid_tbl; 203 u16 dir_idx; 204 205 for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) { 206 pasid_tbl = get_pasid_table_from_pde(dir_entry); 207 if (pasid_tbl) 208 pasid_tbl_walk(m, pasid_tbl, dir_idx); 209 210 dir_entry++; 211 } 212 } 213 214 static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus) 215 { 216 struct context_entry *context; 217 u16 devfn, pasid_dir_size; 218 u64 pasid_dir_ptr; 219 220 for (devfn = 0; devfn < 256; devfn++) { 221 struct tbl_walk tbl_wlk = {0}; 222 223 /* 224 * Scalable mode root entry points to upper scalable mode 225 * context table and lower scalable mode context table. Each 226 * scalable mode context table has 128 context entries where as 227 * legacy mode context table has 256 context entries. So in 228 * scalable mode, the context entries for former 128 devices are 229 * in the lower scalable mode context table, while the latter 230 * 128 devices are in the upper scalable mode context table. 231 * In scalable mode, when devfn > 127, iommu_context_addr() 232 * automatically refers to upper scalable mode context table and 233 * hence the caller doesn't have to worry about differences 234 * between scalable mode and non scalable mode. 235 */ 236 context = iommu_context_addr(iommu, bus, devfn, 0); 237 if (!context) 238 return; 239 240 if (!context_present(context)) 241 continue; 242 243 tbl_wlk.bus = bus; 244 tbl_wlk.devfn = devfn; 245 tbl_wlk.rt_entry = &iommu->root_entry[bus]; 246 tbl_wlk.ctx_entry = context; 247 m->private = &tbl_wlk; 248 249 if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) { 250 pasid_dir_ptr = context->lo & VTD_PAGE_MASK; 251 pasid_dir_size = get_pasid_dir_size(context); 252 pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size); 253 continue; 254 } 255 256 print_tbl_walk(m); 257 } 258 } 259 260 static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu) 261 { 262 unsigned long flags; 263 u16 bus; 264 265 spin_lock_irqsave(&iommu->lock, flags); 266 seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name, 267 (u64)virt_to_phys(iommu->root_entry)); 268 seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n"); 269 270 /* 271 * No need to check if the root entry is present or not because 272 * iommu_context_addr() performs the same check before returning 273 * context entry. 274 */ 275 for (bus = 0; bus < 256; bus++) 276 ctx_tbl_walk(m, iommu, bus); 277 278 spin_unlock_irqrestore(&iommu->lock, flags); 279 } 280 281 static int dmar_translation_struct_show(struct seq_file *m, void *unused) 282 { 283 struct dmar_drhd_unit *drhd; 284 struct intel_iommu *iommu; 285 u32 sts; 286 287 rcu_read_lock(); 288 for_each_active_iommu(iommu, drhd) { 289 sts = dmar_readl(iommu->reg + DMAR_GSTS_REG); 290 if (!(sts & DMA_GSTS_TES)) { 291 seq_printf(m, "DMA Remapping is not enabled on %s\n", 292 iommu->name); 293 continue; 294 } 295 root_tbl_walk(m, iommu); 296 seq_putc(m, '\n'); 297 } 298 rcu_read_unlock(); 299 300 return 0; 301 } 302 DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct); 303 304 static inline unsigned long level_to_directory_size(int level) 305 { 306 return BIT_ULL(VTD_PAGE_SHIFT + VTD_STRIDE_SHIFT * (level - 1)); 307 } 308 309 static inline void 310 dump_page_info(struct seq_file *m, unsigned long iova, u64 *path) 311 { 312 seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\n", 313 iova >> VTD_PAGE_SHIFT, path[5], path[4], 314 path[3], path[2], path[1]); 315 } 316 317 static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde, 318 int level, unsigned long start, 319 u64 *path) 320 { 321 int i; 322 323 if (level > 5 || level < 1) 324 return; 325 326 for (i = 0; i < BIT_ULL(VTD_STRIDE_SHIFT); 327 i++, pde++, start += level_to_directory_size(level)) { 328 if (!dma_pte_present(pde)) 329 continue; 330 331 path[level] = pde->val; 332 if (dma_pte_superpage(pde) || level == 1) 333 dump_page_info(m, start, path); 334 else 335 pgtable_walk_level(m, phys_to_virt(dma_pte_addr(pde)), 336 level - 1, start, path); 337 path[level] = 0; 338 } 339 } 340 341 static int show_device_domain_translation(struct device *dev, void *data) 342 { 343 struct dmar_domain *domain = find_domain(dev); 344 struct seq_file *m = data; 345 u64 path[6] = { 0 }; 346 347 if (!domain) 348 return 0; 349 350 seq_printf(m, "Device %s with pasid %d @0x%llx\n", 351 dev_name(dev), domain->default_pasid, 352 (u64)virt_to_phys(domain->pgd)); 353 seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n"); 354 355 pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path); 356 seq_putc(m, '\n'); 357 358 return 0; 359 } 360 361 static int domain_translation_struct_show(struct seq_file *m, void *unused) 362 { 363 unsigned long flags; 364 int ret; 365 366 spin_lock_irqsave(&device_domain_lock, flags); 367 ret = bus_for_each_dev(&pci_bus_type, NULL, m, 368 show_device_domain_translation); 369 spin_unlock_irqrestore(&device_domain_lock, flags); 370 371 return ret; 372 } 373 DEFINE_SHOW_ATTRIBUTE(domain_translation_struct); 374 375 static void invalidation_queue_entry_show(struct seq_file *m, 376 struct intel_iommu *iommu) 377 { 378 int index, shift = qi_shift(iommu); 379 struct qi_desc *desc; 380 int offset; 381 382 if (ecap_smts(iommu->ecap)) 383 seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tqw2\t\t\tqw3\t\t\tstatus\n"); 384 else 385 seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tstatus\n"); 386 387 for (index = 0; index < QI_LENGTH; index++) { 388 offset = index << shift; 389 desc = iommu->qi->desc + offset; 390 if (ecap_smts(iommu->ecap)) 391 seq_printf(m, "%5d\t%016llx\t%016llx\t%016llx\t%016llx\t%016x\n", 392 index, desc->qw0, desc->qw1, 393 desc->qw2, desc->qw3, 394 iommu->qi->desc_status[index]); 395 else 396 seq_printf(m, "%5d\t%016llx\t%016llx\t%016x\n", 397 index, desc->qw0, desc->qw1, 398 iommu->qi->desc_status[index]); 399 } 400 } 401 402 static int invalidation_queue_show(struct seq_file *m, void *unused) 403 { 404 struct dmar_drhd_unit *drhd; 405 struct intel_iommu *iommu; 406 unsigned long flags; 407 struct q_inval *qi; 408 int shift; 409 410 rcu_read_lock(); 411 for_each_active_iommu(iommu, drhd) { 412 qi = iommu->qi; 413 shift = qi_shift(iommu); 414 415 if (!qi || !ecap_qis(iommu->ecap)) 416 continue; 417 418 seq_printf(m, "Invalidation queue on IOMMU: %s\n", iommu->name); 419 420 raw_spin_lock_irqsave(&qi->q_lock, flags); 421 seq_printf(m, " Base: 0x%llx\tHead: %lld\tTail: %lld\n", 422 (u64)virt_to_phys(qi->desc), 423 dmar_readq(iommu->reg + DMAR_IQH_REG) >> shift, 424 dmar_readq(iommu->reg + DMAR_IQT_REG) >> shift); 425 invalidation_queue_entry_show(m, iommu); 426 raw_spin_unlock_irqrestore(&qi->q_lock, flags); 427 seq_putc(m, '\n'); 428 } 429 rcu_read_unlock(); 430 431 return 0; 432 } 433 DEFINE_SHOW_ATTRIBUTE(invalidation_queue); 434 435 #ifdef CONFIG_IRQ_REMAP 436 static void ir_tbl_remap_entry_show(struct seq_file *m, 437 struct intel_iommu *iommu) 438 { 439 struct irte *ri_entry; 440 unsigned long flags; 441 int idx; 442 443 seq_puts(m, " Entry SrcID DstID Vct IRTE_high\t\tIRTE_low\n"); 444 445 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 446 for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) { 447 ri_entry = &iommu->ir_table->base[idx]; 448 if (!ri_entry->present || ri_entry->p_pst) 449 continue; 450 451 seq_printf(m, " %-5d %02x:%02x.%01x %08x %02x %016llx\t%016llx\n", 452 idx, PCI_BUS_NUM(ri_entry->sid), 453 PCI_SLOT(ri_entry->sid), PCI_FUNC(ri_entry->sid), 454 ri_entry->dest_id, ri_entry->vector, 455 ri_entry->high, ri_entry->low); 456 } 457 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 458 } 459 460 static void ir_tbl_posted_entry_show(struct seq_file *m, 461 struct intel_iommu *iommu) 462 { 463 struct irte *pi_entry; 464 unsigned long flags; 465 int idx; 466 467 seq_puts(m, " Entry SrcID PDA_high PDA_low Vct IRTE_high\t\tIRTE_low\n"); 468 469 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 470 for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) { 471 pi_entry = &iommu->ir_table->base[idx]; 472 if (!pi_entry->present || !pi_entry->p_pst) 473 continue; 474 475 seq_printf(m, " %-5d %02x:%02x.%01x %08x %08x %02x %016llx\t%016llx\n", 476 idx, PCI_BUS_NUM(pi_entry->sid), 477 PCI_SLOT(pi_entry->sid), PCI_FUNC(pi_entry->sid), 478 pi_entry->pda_h, pi_entry->pda_l << 6, 479 pi_entry->vector, pi_entry->high, 480 pi_entry->low); 481 } 482 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 483 } 484 485 /* 486 * For active IOMMUs go through the Interrupt remapping 487 * table and print valid entries in a table format for 488 * Remapped and Posted Interrupts. 489 */ 490 static int ir_translation_struct_show(struct seq_file *m, void *unused) 491 { 492 struct dmar_drhd_unit *drhd; 493 struct intel_iommu *iommu; 494 u64 irta; 495 u32 sts; 496 497 rcu_read_lock(); 498 for_each_active_iommu(iommu, drhd) { 499 if (!ecap_ir_support(iommu->ecap)) 500 continue; 501 502 seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n", 503 iommu->name); 504 505 sts = dmar_readl(iommu->reg + DMAR_GSTS_REG); 506 if (iommu->ir_table && (sts & DMA_GSTS_IRES)) { 507 irta = virt_to_phys(iommu->ir_table->base); 508 seq_printf(m, " IR table address:%llx\n", irta); 509 ir_tbl_remap_entry_show(m, iommu); 510 } else { 511 seq_puts(m, "Interrupt Remapping is not enabled\n"); 512 } 513 seq_putc(m, '\n'); 514 } 515 516 seq_puts(m, "****\n\n"); 517 518 for_each_active_iommu(iommu, drhd) { 519 if (!cap_pi_support(iommu->cap)) 520 continue; 521 522 seq_printf(m, "Posted Interrupt supported on IOMMU: %s\n", 523 iommu->name); 524 525 if (iommu->ir_table) { 526 irta = virt_to_phys(iommu->ir_table->base); 527 seq_printf(m, " IR table address:%llx\n", irta); 528 ir_tbl_posted_entry_show(m, iommu); 529 } else { 530 seq_puts(m, "Interrupt Remapping is not enabled\n"); 531 } 532 seq_putc(m, '\n'); 533 } 534 rcu_read_unlock(); 535 536 return 0; 537 } 538 DEFINE_SHOW_ATTRIBUTE(ir_translation_struct); 539 #endif 540 541 void __init intel_iommu_debugfs_init(void) 542 { 543 struct dentry *intel_iommu_debug = debugfs_create_dir("intel", 544 iommu_debugfs_dir); 545 546 debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL, 547 &iommu_regset_fops); 548 debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug, 549 NULL, &dmar_translation_struct_fops); 550 debugfs_create_file("domain_translation_struct", 0444, 551 intel_iommu_debug, NULL, 552 &domain_translation_struct_fops); 553 debugfs_create_file("invalidation_queue", 0444, intel_iommu_debug, 554 NULL, &invalidation_queue_fops); 555 #ifdef CONFIG_IRQ_REMAP 556 debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug, 557 NULL, &ir_translation_struct_fops); 558 #endif 559 } 560