xref: /openbmc/linux/mm/ptdump.c (revision 002dff36)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/pagewalk.h>
4 #include <linux/ptdump.h>
5 #include <linux/kasan.h>
6 
7 #ifdef CONFIG_KASAN
8 /*
9  * This is an optimization for KASAN=y case. Since all kasan page tables
10  * eventually point to the kasan_early_shadow_page we could call note_page()
11  * right away without walking through lower level page tables. This saves
12  * us dozens of seconds (minutes for 5-level config) while checking for
13  * W+X mapping or reading kernel_page_tables debugfs file.
14  */
15 static inline int note_kasan_page_table(struct mm_walk *walk,
16 					unsigned long addr)
17 {
18 	struct ptdump_state *st = walk->private;
19 
20 	st->note_page(st, addr, 4, pte_val(kasan_early_shadow_pte[0]));
21 
22 	walk->action = ACTION_CONTINUE;
23 
24 	return 0;
25 }
26 #endif
27 
28 static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
29 			    unsigned long next, struct mm_walk *walk)
30 {
31 	struct ptdump_state *st = walk->private;
32 	pgd_t val = READ_ONCE(*pgd);
33 
34 #if CONFIG_PGTABLE_LEVELS > 4 && defined(CONFIG_KASAN)
35 	if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d)))
36 		return note_kasan_page_table(walk, addr);
37 #endif
38 
39 	if (st->effective_prot)
40 		st->effective_prot(st, 0, pgd_val(val));
41 
42 	if (pgd_leaf(val))
43 		st->note_page(st, addr, 0, pgd_val(val));
44 
45 	return 0;
46 }
47 
48 static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
49 			    unsigned long next, struct mm_walk *walk)
50 {
51 	struct ptdump_state *st = walk->private;
52 	p4d_t val = READ_ONCE(*p4d);
53 
54 #if CONFIG_PGTABLE_LEVELS > 3 && defined(CONFIG_KASAN)
55 	if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud)))
56 		return note_kasan_page_table(walk, addr);
57 #endif
58 
59 	if (st->effective_prot)
60 		st->effective_prot(st, 1, p4d_val(val));
61 
62 	if (p4d_leaf(val))
63 		st->note_page(st, addr, 1, p4d_val(val));
64 
65 	return 0;
66 }
67 
68 static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
69 			    unsigned long next, struct mm_walk *walk)
70 {
71 	struct ptdump_state *st = walk->private;
72 	pud_t val = READ_ONCE(*pud);
73 
74 #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_KASAN)
75 	if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd)))
76 		return note_kasan_page_table(walk, addr);
77 #endif
78 
79 	if (st->effective_prot)
80 		st->effective_prot(st, 2, pud_val(val));
81 
82 	if (pud_leaf(val))
83 		st->note_page(st, addr, 2, pud_val(val));
84 
85 	return 0;
86 }
87 
88 static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
89 			    unsigned long next, struct mm_walk *walk)
90 {
91 	struct ptdump_state *st = walk->private;
92 	pmd_t val = READ_ONCE(*pmd);
93 
94 #if defined(CONFIG_KASAN)
95 	if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte)))
96 		return note_kasan_page_table(walk, addr);
97 #endif
98 
99 	if (st->effective_prot)
100 		st->effective_prot(st, 3, pmd_val(val));
101 	if (pmd_leaf(val))
102 		st->note_page(st, addr, 3, pmd_val(val));
103 
104 	return 0;
105 }
106 
107 static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
108 			    unsigned long next, struct mm_walk *walk)
109 {
110 	struct ptdump_state *st = walk->private;
111 	pte_t val = READ_ONCE(*pte);
112 
113 	if (st->effective_prot)
114 		st->effective_prot(st, 4, pte_val(val));
115 
116 	st->note_page(st, addr, 4, pte_val(val));
117 
118 	return 0;
119 }
120 
121 static int ptdump_hole(unsigned long addr, unsigned long next,
122 		       int depth, struct mm_walk *walk)
123 {
124 	struct ptdump_state *st = walk->private;
125 
126 	st->note_page(st, addr, depth, 0);
127 
128 	return 0;
129 }
130 
131 static const struct mm_walk_ops ptdump_ops = {
132 	.pgd_entry	= ptdump_pgd_entry,
133 	.p4d_entry	= ptdump_p4d_entry,
134 	.pud_entry	= ptdump_pud_entry,
135 	.pmd_entry	= ptdump_pmd_entry,
136 	.pte_entry	= ptdump_pte_entry,
137 	.pte_hole	= ptdump_hole,
138 };
139 
140 void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
141 {
142 	const struct ptdump_range *range = st->range;
143 
144 	mmap_read_lock(mm);
145 	while (range->start != range->end) {
146 		walk_page_range_novma(mm, range->start, range->end,
147 				      &ptdump_ops, pgd, st);
148 		range++;
149 	}
150 	mmap_read_unlock(mm);
151 
152 	/* Flush out the last page */
153 	st->note_page(st, 0, -1, 0);
154 }
155