1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2016, Rashmica Gupta, IBM Corp. 4 * 5 * This traverses the kernel virtual memory and dumps the pages that are in 6 * the hash pagetable, along with their flags to 7 * /sys/kernel/debug/kernel_hash_pagetable. 8 * 9 * If radix is enabled then there is no hash page table and so no debugfs file 10 * is generated. 11 */ 12 #include <linux/debugfs.h> 13 #include <linux/fs.h> 14 #include <linux/io.h> 15 #include <linux/mm.h> 16 #include <linux/sched.h> 17 #include <linux/seq_file.h> 18 #include <linux/const.h> 19 #include <asm/page.h> 20 #include <asm/pgalloc.h> 21 #include <asm/plpar_wrappers.h> 22 #include <linux/memblock.h> 23 #include <asm/firmware.h> 24 25 struct pg_state { 26 struct seq_file *seq; 27 const struct addr_marker *marker; 28 unsigned long start_address; 29 unsigned int level; 30 u64 current_flags; 31 }; 32 33 struct addr_marker { 34 unsigned long start_address; 35 const char *name; 36 }; 37 38 static struct addr_marker address_markers[] = { 39 { 0, "Start of kernel VM" }, 40 { 0, "vmalloc() Area" }, 41 { 0, "vmalloc() End" }, 42 { 0, "isa I/O start" }, 43 { 0, "isa I/O end" }, 44 { 0, "phb I/O start" }, 45 { 0, "phb I/O end" }, 46 { 0, "I/O remap start" }, 47 { 0, "I/O remap end" }, 48 { 0, "vmemmap start" }, 49 { -1, NULL }, 50 }; 51 52 struct flag_info { 53 u64 mask; 54 u64 val; 55 const char *set; 56 const char *clear; 57 bool is_val; 58 int shift; 59 }; 60 61 static const struct flag_info v_flag_array[] = { 62 { 63 .mask = SLB_VSID_B, 64 .val = SLB_VSID_B_256M, 65 .set = "ssize: 256M", 66 .clear = "ssize: 1T ", 67 }, { 68 .mask = HPTE_V_SECONDARY, 69 .val = HPTE_V_SECONDARY, 70 .set = "secondary", 71 .clear = "primary ", 72 }, { 73 .mask = HPTE_V_VALID, 74 .val = HPTE_V_VALID, 75 .set = "valid ", 76 .clear = "invalid", 77 }, { 78 .mask = HPTE_V_BOLTED, 79 .val = HPTE_V_BOLTED, 80 .set = "bolted", 81 .clear = "", 82 } 83 }; 84 85 static const struct flag_info r_flag_array[] = { 86 { 87 .mask = HPTE_R_PP0 | HPTE_R_PP, 88 .val = PP_RWXX, 89 .set = "prot:RW--", 90 }, { 91 .mask = HPTE_R_PP0 | HPTE_R_PP, 92 .val = PP_RWRX, 93 .set = "prot:RWR-", 94 }, { 95 .mask = HPTE_R_PP0 | HPTE_R_PP, 96 .val = PP_RWRW, 97 .set = "prot:RWRW", 98 }, { 99 .mask = HPTE_R_PP0 | HPTE_R_PP, 100 .val = PP_RXRX, 101 .set = "prot:R-R-", 102 }, { 103 .mask = HPTE_R_PP0 | HPTE_R_PP, 104 .val = PP_RXXX, 105 .set = "prot:R---", 106 }, { 107 .mask = HPTE_R_KEY_HI | HPTE_R_KEY_LO, 108 .val = HPTE_R_KEY_HI | HPTE_R_KEY_LO, 109 .set = "key", 110 .clear = "", 111 .is_val = true, 112 }, { 113 .mask = HPTE_R_R, 114 .val = HPTE_R_R, 115 .set = "ref", 116 .clear = " ", 117 }, { 118 .mask = HPTE_R_C, 119 .val = HPTE_R_C, 120 .set = "changed", 121 .clear = " ", 122 }, { 123 .mask = HPTE_R_N, 124 .val = HPTE_R_N, 125 .set = "no execute", 126 }, { 127 .mask = HPTE_R_WIMG, 128 .val = HPTE_R_W, 129 .set = "writethru", 130 }, { 131 .mask = HPTE_R_WIMG, 132 .val = HPTE_R_I, 133 .set = "no cache", 134 }, { 135 .mask = HPTE_R_WIMG, 136 .val = HPTE_R_G, 137 .set = "guarded", 138 } 139 }; 140 141 static int calculate_pagesize(struct pg_state *st, int ps, char s[]) 142 { 143 static const char units[] = "BKMGTPE"; 144 const char *unit = units; 145 146 while (ps > 9 && unit[1]) { 147 ps -= 10; 148 unit++; 149 } 150 seq_printf(st->seq, " %s_ps: %i%c\t", s, 1<<ps, *unit); 151 return ps; 152 } 153 154 static void dump_flag_info(struct pg_state *st, const struct flag_info 155 *flag, u64 pte, int num) 156 { 157 unsigned int i; 158 159 for (i = 0; i < num; i++, flag++) { 160 const char *s = NULL; 161 u64 val; 162 163 /* flag not defined so don't check it */ 164 if (flag->mask == 0) 165 continue; 166 /* Some 'flags' are actually values */ 167 if (flag->is_val) { 168 val = pte & flag->val; 169 if (flag->shift) 170 val = val >> flag->shift; 171 seq_printf(st->seq, " %s:%llx", flag->set, val); 172 } else { 173 if ((pte & flag->mask) == flag->val) 174 s = flag->set; 175 else 176 s = flag->clear; 177 if (s) 178 seq_printf(st->seq, " %s", s); 179 } 180 } 181 } 182 183 static void dump_hpte_info(struct pg_state *st, unsigned long ea, u64 v, u64 r, 184 unsigned long rpn, int bps, int aps, unsigned long lp) 185 { 186 int aps_index; 187 188 while (ea >= st->marker[1].start_address) { 189 st->marker++; 190 seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 191 } 192 seq_printf(st->seq, "0x%lx:\t", ea); 193 seq_printf(st->seq, "AVPN:%llx\t", HPTE_V_AVPN_VAL(v)); 194 dump_flag_info(st, v_flag_array, v, ARRAY_SIZE(v_flag_array)); 195 seq_printf(st->seq, " rpn: %lx\t", rpn); 196 dump_flag_info(st, r_flag_array, r, ARRAY_SIZE(r_flag_array)); 197 198 calculate_pagesize(st, bps, "base"); 199 aps_index = calculate_pagesize(st, aps, "actual"); 200 if (aps_index != 2) 201 seq_printf(st->seq, "LP enc: %lx", lp); 202 seq_putc(st->seq, '\n'); 203 } 204 205 206 static int native_find(unsigned long ea, int psize, bool primary, u64 *v, u64 207 *r) 208 { 209 struct hash_pte *hptep; 210 unsigned long hash, vsid, vpn, hpte_group, want_v, hpte_v; 211 int i, ssize = mmu_kernel_ssize; 212 unsigned long shift = mmu_psize_defs[psize].shift; 213 214 /* calculate hash */ 215 vsid = get_kernel_vsid(ea, ssize); 216 vpn = hpt_vpn(ea, vsid, ssize); 217 hash = hpt_hash(vpn, shift, ssize); 218 want_v = hpte_encode_avpn(vpn, psize, ssize); 219 220 /* to check in the secondary hash table, we invert the hash */ 221 if (!primary) 222 hash = ~hash; 223 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; 224 for (i = 0; i < HPTES_PER_GROUP; i++) { 225 hptep = htab_address + hpte_group; 226 hpte_v = be64_to_cpu(hptep->v); 227 228 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { 229 /* HPTE matches */ 230 *v = be64_to_cpu(hptep->v); 231 *r = be64_to_cpu(hptep->r); 232 return 0; 233 } 234 ++hpte_group; 235 } 236 return -1; 237 } 238 239 static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *r) 240 { 241 struct hash_pte ptes[4]; 242 unsigned long vsid, vpn, hash, hpte_group, want_v; 243 int i, j, ssize = mmu_kernel_ssize; 244 long lpar_rc = 0; 245 unsigned long shift = mmu_psize_defs[psize].shift; 246 247 /* calculate hash */ 248 vsid = get_kernel_vsid(ea, ssize); 249 vpn = hpt_vpn(ea, vsid, ssize); 250 hash = hpt_hash(vpn, shift, ssize); 251 want_v = hpte_encode_avpn(vpn, psize, ssize); 252 253 /* to check in the secondary hash table, we invert the hash */ 254 if (!primary) 255 hash = ~hash; 256 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; 257 /* see if we can find an entry in the hpte with this hash */ 258 for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { 259 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); 260 261 if (lpar_rc != H_SUCCESS) 262 continue; 263 for (j = 0; j < 4; j++) { 264 if (HPTE_V_COMPARE(ptes[j].v, want_v) && 265 (ptes[j].v & HPTE_V_VALID)) { 266 /* HPTE matches */ 267 *v = ptes[j].v; 268 *r = ptes[j].r; 269 return 0; 270 } 271 } 272 } 273 return -1; 274 } 275 276 static void decode_r(int bps, unsigned long r, unsigned long *rpn, int *aps, 277 unsigned long *lp_bits) 278 { 279 struct mmu_psize_def entry; 280 unsigned long arpn, mask, lp; 281 int penc = -2, idx = 0, shift; 282 283 /*. 284 * The LP field has 8 bits. Depending on the actual page size, some of 285 * these bits are concatenated with the APRN to get the RPN. The rest 286 * of the bits in the LP field is the LP value and is an encoding for 287 * the base page size and the actual page size. 288 * 289 * - find the mmu entry for our base page size 290 * - go through all page encodings and use the associated mask to 291 * find an encoding that matches our encoding in the LP field. 292 */ 293 arpn = (r & HPTE_R_RPN) >> HPTE_R_RPN_SHIFT; 294 lp = arpn & 0xff; 295 296 entry = mmu_psize_defs[bps]; 297 while (idx < MMU_PAGE_COUNT) { 298 penc = entry.penc[idx]; 299 if ((penc != -1) && (mmu_psize_defs[idx].shift)) { 300 shift = mmu_psize_defs[idx].shift - HPTE_R_RPN_SHIFT; 301 mask = (0x1 << (shift)) - 1; 302 if ((lp & mask) == penc) { 303 *aps = mmu_psize_to_shift(idx); 304 *lp_bits = lp & mask; 305 *rpn = arpn >> shift; 306 return; 307 } 308 } 309 idx++; 310 } 311 } 312 313 static int base_hpte_find(unsigned long ea, int psize, bool primary, u64 *v, 314 u64 *r) 315 { 316 if (IS_ENABLED(CONFIG_PPC_PSERIES) && firmware_has_feature(FW_FEATURE_LPAR)) 317 return pseries_find(ea, psize, primary, v, r); 318 319 return native_find(ea, psize, primary, v, r); 320 } 321 322 static unsigned long hpte_find(struct pg_state *st, unsigned long ea, int psize) 323 { 324 unsigned long slot; 325 u64 v = 0, r = 0; 326 unsigned long rpn, lp_bits; 327 int base_psize = 0, actual_psize = 0; 328 329 if (ea < PAGE_OFFSET) 330 return -1; 331 332 /* Look in primary table */ 333 slot = base_hpte_find(ea, psize, true, &v, &r); 334 335 /* Look in secondary table */ 336 if (slot == -1) 337 slot = base_hpte_find(ea, psize, false, &v, &r); 338 339 /* No entry found */ 340 if (slot == -1) 341 return -1; 342 343 /* 344 * We found an entry in the hash page table: 345 * - check that this has the same base page 346 * - find the actual page size 347 * - find the RPN 348 */ 349 base_psize = mmu_psize_to_shift(psize); 350 351 if ((v & HPTE_V_LARGE) == HPTE_V_LARGE) { 352 decode_r(psize, r, &rpn, &actual_psize, &lp_bits); 353 } else { 354 /* 4K actual page size */ 355 actual_psize = 12; 356 rpn = (r & HPTE_R_RPN) >> HPTE_R_RPN_SHIFT; 357 /* In this case there are no LP bits */ 358 lp_bits = -1; 359 } 360 /* 361 * We didn't find a matching encoding, so the PTE we found isn't for 362 * this address. 363 */ 364 if (actual_psize == -1) 365 return -1; 366 367 dump_hpte_info(st, ea, v, r, rpn, base_psize, actual_psize, lp_bits); 368 return 0; 369 } 370 371 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) 372 { 373 pte_t *pte = pte_offset_kernel(pmd, 0); 374 unsigned long addr, pteval, psize; 375 int i, status; 376 377 for (i = 0; i < PTRS_PER_PTE; i++, pte++) { 378 addr = start + i * PAGE_SIZE; 379 pteval = pte_val(*pte); 380 381 if (addr < VMALLOC_END) 382 psize = mmu_vmalloc_psize; 383 else 384 psize = mmu_io_psize; 385 386 /* check for secret 4K mappings */ 387 if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && 388 ((pteval & H_PAGE_COMBO) == H_PAGE_COMBO || 389 (pteval & H_PAGE_4K_PFN) == H_PAGE_4K_PFN)) 390 psize = mmu_io_psize; 391 392 /* check for hashpte */ 393 status = hpte_find(st, addr, psize); 394 395 if (((pteval & H_PAGE_HASHPTE) != H_PAGE_HASHPTE) 396 && (status != -1)) { 397 /* found a hpte that is not in the linux page tables */ 398 seq_printf(st->seq, "page probably bolted before linux" 399 " pagetables were set: addr:%lx, pteval:%lx\n", 400 addr, pteval); 401 } 402 } 403 } 404 405 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) 406 { 407 pmd_t *pmd = pmd_offset(pud, 0); 408 unsigned long addr; 409 unsigned int i; 410 411 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { 412 addr = start + i * PMD_SIZE; 413 if (!pmd_none(*pmd)) 414 /* pmd exists */ 415 walk_pte(st, pmd, addr); 416 } 417 } 418 419 static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start) 420 { 421 pud_t *pud = pud_offset(p4d, 0); 422 unsigned long addr; 423 unsigned int i; 424 425 for (i = 0; i < PTRS_PER_PUD; i++, pud++) { 426 addr = start + i * PUD_SIZE; 427 if (!pud_none(*pud)) 428 /* pud exists */ 429 walk_pmd(st, pud, addr); 430 } 431 } 432 433 static void walk_p4d(struct pg_state *st, pgd_t *pgd, unsigned long start) 434 { 435 p4d_t *p4d = p4d_offset(pgd, 0); 436 unsigned long addr; 437 unsigned int i; 438 439 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) { 440 addr = start + i * P4D_SIZE; 441 if (!p4d_none(*p4d)) 442 /* p4d exists */ 443 walk_pud(st, p4d, addr); 444 } 445 } 446 447 static void walk_pagetables(struct pg_state *st) 448 { 449 pgd_t *pgd = pgd_offset_k(0UL); 450 unsigned int i; 451 unsigned long addr; 452 453 /* 454 * Traverse the linux pagetable structure and dump pages that are in 455 * the hash pagetable. 456 */ 457 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { 458 addr = KERN_VIRT_START + i * PGDIR_SIZE; 459 if (!pgd_none(*pgd)) 460 /* pgd exists */ 461 walk_p4d(st, pgd, addr); 462 } 463 } 464 465 466 static void walk_linearmapping(struct pg_state *st) 467 { 468 unsigned long addr; 469 470 /* 471 * Traverse the linear mapping section of virtual memory and dump pages 472 * that are in the hash pagetable. 473 */ 474 unsigned long psize = 1 << mmu_psize_defs[mmu_linear_psize].shift; 475 476 for (addr = PAGE_OFFSET; addr < PAGE_OFFSET + 477 memblock_end_of_DRAM(); addr += psize) 478 hpte_find(st, addr, mmu_linear_psize); 479 } 480 481 static void walk_vmemmap(struct pg_state *st) 482 { 483 struct vmemmap_backing *ptr = vmemmap_list; 484 485 if (!IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) 486 return; 487 /* 488 * Traverse the vmemmaped memory and dump pages that are in the hash 489 * pagetable. 490 */ 491 while (ptr->list) { 492 hpte_find(st, ptr->virt_addr, mmu_vmemmap_psize); 493 ptr = ptr->list; 494 } 495 seq_puts(st->seq, "---[ vmemmap end ]---\n"); 496 } 497 498 static void populate_markers(void) 499 { 500 address_markers[0].start_address = PAGE_OFFSET; 501 address_markers[1].start_address = VMALLOC_START; 502 address_markers[2].start_address = VMALLOC_END; 503 address_markers[3].start_address = ISA_IO_BASE; 504 address_markers[4].start_address = ISA_IO_END; 505 address_markers[5].start_address = PHB_IO_BASE; 506 address_markers[6].start_address = PHB_IO_END; 507 address_markers[7].start_address = IOREMAP_BASE; 508 address_markers[8].start_address = IOREMAP_END; 509 address_markers[9].start_address = H_VMEMMAP_START; 510 } 511 512 static int ptdump_show(struct seq_file *m, void *v) 513 { 514 struct pg_state st = { 515 .seq = m, 516 .start_address = PAGE_OFFSET, 517 .marker = address_markers, 518 }; 519 /* 520 * Traverse the 0xc, 0xd and 0xf areas of the kernel virtual memory and 521 * dump pages that are in the hash pagetable. 522 */ 523 walk_linearmapping(&st); 524 walk_pagetables(&st); 525 walk_vmemmap(&st); 526 return 0; 527 } 528 529 static int ptdump_open(struct inode *inode, struct file *file) 530 { 531 return single_open(file, ptdump_show, NULL); 532 } 533 534 static const struct file_operations ptdump_fops = { 535 .open = ptdump_open, 536 .read = seq_read, 537 .llseek = seq_lseek, 538 .release = single_release, 539 }; 540 541 static int ptdump_init(void) 542 { 543 if (!radix_enabled()) { 544 populate_markers(); 545 debugfs_create_file("kernel_hash_pagetable", 0400, NULL, NULL, 546 &ptdump_fops); 547 } 548 return 0; 549 } 550 device_initcall(ptdump_init); 551