xref: /openbmc/linux/mm/page_table_check.c (revision 2b91c4a8)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright (c) 2021, Google LLC.
5  * Pasha Tatashin <pasha.tatashin@soleen.com>
6  */
7 #include <linux/kstrtox.h>
8 #include <linux/mm.h>
9 #include <linux/page_table_check.h>
10 
11 #undef pr_fmt
12 #define pr_fmt(fmt)	"page_table_check: " fmt
13 
14 struct page_table_check {
15 	atomic_t anon_map_count;
16 	atomic_t file_map_count;
17 };
18 
19 static bool __page_table_check_enabled __initdata =
20 				IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
21 
22 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
23 EXPORT_SYMBOL(page_table_check_disabled);
24 
25 static int __init early_page_table_check_param(char *buf)
26 {
27 	return kstrtobool(buf, &__page_table_check_enabled);
28 }
29 
30 early_param("page_table_check", early_page_table_check_param);
31 
32 static bool __init need_page_table_check(void)
33 {
34 	return __page_table_check_enabled;
35 }
36 
37 static void __init init_page_table_check(void)
38 {
39 	if (!__page_table_check_enabled)
40 		return;
41 	static_branch_disable(&page_table_check_disabled);
42 }
43 
44 struct page_ext_operations page_table_check_ops = {
45 	.size = sizeof(struct page_table_check),
46 	.need = need_page_table_check,
47 	.init = init_page_table_check,
48 	.need_shared_flags = false,
49 };
50 
51 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
52 {
53 	BUG_ON(!page_ext);
54 	return (void *)(page_ext) + page_table_check_ops.offset;
55 }
56 
57 /*
58  * An entry is removed from the page table, decrement the counters for that page
59  * verify that it is of correct type and counters do not become negative.
60  */
61 static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
62 				   unsigned long pfn, unsigned long pgcnt)
63 {
64 	struct page_ext *page_ext;
65 	struct page *page;
66 	unsigned long i;
67 	bool anon;
68 
69 	if (!pfn_valid(pfn))
70 		return;
71 
72 	page = pfn_to_page(pfn);
73 	page_ext = page_ext_get(page);
74 	anon = PageAnon(page);
75 
76 	for (i = 0; i < pgcnt; i++) {
77 		struct page_table_check *ptc = get_page_table_check(page_ext);
78 
79 		if (anon) {
80 			BUG_ON(atomic_read(&ptc->file_map_count));
81 			BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
82 		} else {
83 			BUG_ON(atomic_read(&ptc->anon_map_count));
84 			BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
85 		}
86 		page_ext = page_ext_next(page_ext);
87 	}
88 	page_ext_put(page_ext);
89 }
90 
91 /*
92  * A new entry is added to the page table, increment the counters for that page
93  * verify that it is of correct type and is not being mapped with a different
94  * type to a different process.
95  */
96 static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
97 				 unsigned long pfn, unsigned long pgcnt,
98 				 bool rw)
99 {
100 	struct page_ext *page_ext;
101 	struct page *page;
102 	unsigned long i;
103 	bool anon;
104 
105 	if (!pfn_valid(pfn))
106 		return;
107 
108 	page = pfn_to_page(pfn);
109 	page_ext = page_ext_get(page);
110 	anon = PageAnon(page);
111 
112 	for (i = 0; i < pgcnt; i++) {
113 		struct page_table_check *ptc = get_page_table_check(page_ext);
114 
115 		if (anon) {
116 			BUG_ON(atomic_read(&ptc->file_map_count));
117 			BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
118 		} else {
119 			BUG_ON(atomic_read(&ptc->anon_map_count));
120 			BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
121 		}
122 		page_ext = page_ext_next(page_ext);
123 	}
124 	page_ext_put(page_ext);
125 }
126 
127 /*
128  * page is on free list, or is being allocated, verify that counters are zeroes
129  * crash if they are not.
130  */
131 void __page_table_check_zero(struct page *page, unsigned int order)
132 {
133 	struct page_ext *page_ext;
134 	unsigned long i;
135 
136 	page_ext = page_ext_get(page);
137 	BUG_ON(!page_ext);
138 	for (i = 0; i < (1ul << order); i++) {
139 		struct page_table_check *ptc = get_page_table_check(page_ext);
140 
141 		BUG_ON(atomic_read(&ptc->anon_map_count));
142 		BUG_ON(atomic_read(&ptc->file_map_count));
143 		page_ext = page_ext_next(page_ext);
144 	}
145 	page_ext_put(page_ext);
146 }
147 
148 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
149 				  pte_t pte)
150 {
151 	if (&init_mm == mm)
152 		return;
153 
154 	if (pte_user_accessible_page(pte)) {
155 		page_table_check_clear(mm, addr, pte_pfn(pte),
156 				       PAGE_SIZE >> PAGE_SHIFT);
157 	}
158 }
159 EXPORT_SYMBOL(__page_table_check_pte_clear);
160 
161 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
162 				  pmd_t pmd)
163 {
164 	if (&init_mm == mm)
165 		return;
166 
167 	if (pmd_user_accessible_page(pmd)) {
168 		page_table_check_clear(mm, addr, pmd_pfn(pmd),
169 				       PMD_SIZE >> PAGE_SHIFT);
170 	}
171 }
172 EXPORT_SYMBOL(__page_table_check_pmd_clear);
173 
174 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
175 				  pud_t pud)
176 {
177 	if (&init_mm == mm)
178 		return;
179 
180 	if (pud_user_accessible_page(pud)) {
181 		page_table_check_clear(mm, addr, pud_pfn(pud),
182 				       PUD_SIZE >> PAGE_SHIFT);
183 	}
184 }
185 EXPORT_SYMBOL(__page_table_check_pud_clear);
186 
187 void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
188 				pte_t *ptep, pte_t pte)
189 {
190 	if (&init_mm == mm)
191 		return;
192 
193 	__page_table_check_pte_clear(mm, addr, *ptep);
194 	if (pte_user_accessible_page(pte)) {
195 		page_table_check_set(mm, addr, pte_pfn(pte),
196 				     PAGE_SIZE >> PAGE_SHIFT,
197 				     pte_write(pte));
198 	}
199 }
200 EXPORT_SYMBOL(__page_table_check_pte_set);
201 
202 void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
203 				pmd_t *pmdp, pmd_t pmd)
204 {
205 	if (&init_mm == mm)
206 		return;
207 
208 	__page_table_check_pmd_clear(mm, addr, *pmdp);
209 	if (pmd_user_accessible_page(pmd)) {
210 		page_table_check_set(mm, addr, pmd_pfn(pmd),
211 				     PMD_SIZE >> PAGE_SHIFT,
212 				     pmd_write(pmd));
213 	}
214 }
215 EXPORT_SYMBOL(__page_table_check_pmd_set);
216 
217 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
218 				pud_t *pudp, pud_t pud)
219 {
220 	if (&init_mm == mm)
221 		return;
222 
223 	__page_table_check_pud_clear(mm, addr, *pudp);
224 	if (pud_user_accessible_page(pud)) {
225 		page_table_check_set(mm, addr, pud_pfn(pud),
226 				     PUD_SIZE >> PAGE_SHIFT,
227 				     pud_write(pud));
228 	}
229 }
230 EXPORT_SYMBOL(__page_table_check_pud_set);
231 
232 void __page_table_check_pte_clear_range(struct mm_struct *mm,
233 					unsigned long addr,
234 					pmd_t pmd)
235 {
236 	if (&init_mm == mm)
237 		return;
238 
239 	if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
240 		pte_t *ptep = pte_offset_map(&pmd, addr);
241 		unsigned long i;
242 
243 		for (i = 0; i < PTRS_PER_PTE; i++) {
244 			__page_table_check_pte_clear(mm, addr, *ptep);
245 			addr += PAGE_SIZE;
246 			ptep++;
247 		}
248 		pte_unmap(ptep - PTRS_PER_PTE);
249 	}
250 }
251