xref: /openbmc/linux/mm/page_table_check.c (revision cbabf03c)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright (c) 2021, Google LLC.
5  * Pasha Tatashin <pasha.tatashin@soleen.com>
6  */
7 #include <linux/mm.h>
8 #include <linux/page_table_check.h>
9 
10 #undef pr_fmt
11 #define pr_fmt(fmt)	"page_table_check: " fmt
12 
13 struct page_table_check {
14 	atomic_t anon_map_count;
15 	atomic_t file_map_count;
16 };
17 
18 static bool __page_table_check_enabled __initdata =
19 				IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
20 
21 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
22 EXPORT_SYMBOL(page_table_check_disabled);
23 
24 static int __init early_page_table_check_param(char *buf)
25 {
26 	return strtobool(buf, &__page_table_check_enabled);
27 }
28 
29 early_param("page_table_check", early_page_table_check_param);
30 
31 static bool __init need_page_table_check(void)
32 {
33 	return __page_table_check_enabled;
34 }
35 
36 static void __init init_page_table_check(void)
37 {
38 	if (!__page_table_check_enabled)
39 		return;
40 	static_branch_disable(&page_table_check_disabled);
41 }
42 
43 struct page_ext_operations page_table_check_ops = {
44 	.size = sizeof(struct page_table_check),
45 	.need = need_page_table_check,
46 	.init = init_page_table_check,
47 };
48 
49 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
50 {
51 	BUG_ON(!page_ext);
52 	return (void *)(page_ext) + page_table_check_ops.offset;
53 }
54 
55 static inline bool pte_user_accessible_page(pte_t pte)
56 {
57 	return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
58 }
59 
60 static inline bool pmd_user_accessible_page(pmd_t pmd)
61 {
62 	return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) &&
63 		(pmd_val(pmd) & _PAGE_USER);
64 }
65 
66 static inline bool pud_user_accessible_page(pud_t pud)
67 {
68 	return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) &&
69 		(pud_val(pud) & _PAGE_USER);
70 }
71 
72 /*
73  * An enty is removed from the page table, decrement the counters for that page
74  * verify that it is of correct type and counters do not become negative.
75  */
76 static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
77 				   unsigned long pfn, unsigned long pgcnt)
78 {
79 	struct page_ext *page_ext;
80 	struct page *page;
81 	unsigned long i;
82 	bool anon;
83 
84 	if (!pfn_valid(pfn))
85 		return;
86 
87 	page = pfn_to_page(pfn);
88 	page_ext = lookup_page_ext(page);
89 	anon = PageAnon(page);
90 
91 	for (i = 0; i < pgcnt; i++) {
92 		struct page_table_check *ptc = get_page_table_check(page_ext);
93 
94 		if (anon) {
95 			BUG_ON(atomic_read(&ptc->file_map_count));
96 			BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
97 		} else {
98 			BUG_ON(atomic_read(&ptc->anon_map_count));
99 			BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
100 		}
101 		page_ext = page_ext_next(page_ext);
102 	}
103 }
104 
105 /*
106  * A new enty is added to the page table, increment the counters for that page
107  * verify that it is of correct type and is not being mapped with a different
108  * type to a different process.
109  */
110 static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
111 				 unsigned long pfn, unsigned long pgcnt,
112 				 bool rw)
113 {
114 	struct page_ext *page_ext;
115 	struct page *page;
116 	unsigned long i;
117 	bool anon;
118 
119 	if (!pfn_valid(pfn))
120 		return;
121 
122 	page = pfn_to_page(pfn);
123 	page_ext = lookup_page_ext(page);
124 	anon = PageAnon(page);
125 
126 	for (i = 0; i < pgcnt; i++) {
127 		struct page_table_check *ptc = get_page_table_check(page_ext);
128 
129 		if (anon) {
130 			BUG_ON(atomic_read(&ptc->file_map_count));
131 			BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
132 		} else {
133 			BUG_ON(atomic_read(&ptc->anon_map_count));
134 			BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
135 		}
136 		page_ext = page_ext_next(page_ext);
137 	}
138 }
139 
140 /*
141  * page is on free list, or is being allocated, verify that counters are zeroes
142  * crash if they are not.
143  */
144 void __page_table_check_zero(struct page *page, unsigned int order)
145 {
146 	struct page_ext *page_ext = lookup_page_ext(page);
147 	unsigned long i;
148 
149 	BUG_ON(!page_ext);
150 	for (i = 0; i < (1ul << order); i++) {
151 		struct page_table_check *ptc = get_page_table_check(page_ext);
152 
153 		BUG_ON(atomic_read(&ptc->anon_map_count));
154 		BUG_ON(atomic_read(&ptc->file_map_count));
155 		page_ext = page_ext_next(page_ext);
156 	}
157 }
158 
159 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
160 				  pte_t pte)
161 {
162 	if (&init_mm == mm)
163 		return;
164 
165 	if (pte_user_accessible_page(pte)) {
166 		page_table_check_clear(mm, addr, pte_pfn(pte),
167 				       PAGE_SIZE >> PAGE_SHIFT);
168 	}
169 }
170 EXPORT_SYMBOL(__page_table_check_pte_clear);
171 
172 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
173 				  pmd_t pmd)
174 {
175 	if (&init_mm == mm)
176 		return;
177 
178 	if (pmd_user_accessible_page(pmd)) {
179 		page_table_check_clear(mm, addr, pmd_pfn(pmd),
180 				       PMD_PAGE_SIZE >> PAGE_SHIFT);
181 	}
182 }
183 EXPORT_SYMBOL(__page_table_check_pmd_clear);
184 
185 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
186 				  pud_t pud)
187 {
188 	if (&init_mm == mm)
189 		return;
190 
191 	if (pud_user_accessible_page(pud)) {
192 		page_table_check_clear(mm, addr, pud_pfn(pud),
193 				       PUD_PAGE_SIZE >> PAGE_SHIFT);
194 	}
195 }
196 EXPORT_SYMBOL(__page_table_check_pud_clear);
197 
198 void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
199 				pte_t *ptep, pte_t pte)
200 {
201 	if (&init_mm == mm)
202 		return;
203 
204 	__page_table_check_pte_clear(mm, addr, *ptep);
205 	if (pte_user_accessible_page(pte)) {
206 		page_table_check_set(mm, addr, pte_pfn(pte),
207 				     PAGE_SIZE >> PAGE_SHIFT,
208 				     pte_write(pte));
209 	}
210 }
211 EXPORT_SYMBOL(__page_table_check_pte_set);
212 
213 void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
214 				pmd_t *pmdp, pmd_t pmd)
215 {
216 	if (&init_mm == mm)
217 		return;
218 
219 	__page_table_check_pmd_clear(mm, addr, *pmdp);
220 	if (pmd_user_accessible_page(pmd)) {
221 		page_table_check_set(mm, addr, pmd_pfn(pmd),
222 				     PMD_PAGE_SIZE >> PAGE_SHIFT,
223 				     pmd_write(pmd));
224 	}
225 }
226 EXPORT_SYMBOL(__page_table_check_pmd_set);
227 
228 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
229 				pud_t *pudp, pud_t pud)
230 {
231 	if (&init_mm == mm)
232 		return;
233 
234 	__page_table_check_pud_clear(mm, addr, *pudp);
235 	if (pud_user_accessible_page(pud)) {
236 		page_table_check_set(mm, addr, pud_pfn(pud),
237 				     PUD_PAGE_SIZE >> PAGE_SHIFT,
238 				     pud_write(pud));
239 	}
240 }
241 EXPORT_SYMBOL(__page_table_check_pud_set);
242 
243 void __page_table_check_pte_clear_range(struct mm_struct *mm,
244 					unsigned long addr,
245 					pmd_t pmd)
246 {
247 	if (&init_mm == mm)
248 		return;
249 
250 	if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
251 		pte_t *ptep = pte_offset_map(&pmd, addr);
252 		unsigned long i;
253 
254 		pte_unmap(ptep);
255 		for (i = 0; i < PTRS_PER_PTE; i++) {
256 			__page_table_check_pte_clear(mm, addr, *ptep);
257 			addr += PAGE_SIZE;
258 			ptep++;
259 		}
260 	}
261 }
262