xref: /openbmc/linux/mm/page_table_check.c (revision b755c25f)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright (c) 2021, Google LLC.
5  * Pasha Tatashin <pasha.tatashin@soleen.com>
6  */
7 #include <linux/kstrtox.h>
8 #include <linux/mm.h>
9 #include <linux/page_table_check.h>
10 
11 #undef pr_fmt
12 #define pr_fmt(fmt)	"page_table_check: " fmt
13 
14 struct page_table_check {
15 	atomic_t anon_map_count;
16 	atomic_t file_map_count;
17 };
18 
19 static bool __page_table_check_enabled __initdata =
20 				IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
21 
22 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
23 EXPORT_SYMBOL(page_table_check_disabled);
24 
25 static int __init early_page_table_check_param(char *buf)
26 {
27 	return kstrtobool(buf, &__page_table_check_enabled);
28 }
29 
30 early_param("page_table_check", early_page_table_check_param);
31 
32 static bool __init need_page_table_check(void)
33 {
34 	return __page_table_check_enabled;
35 }
36 
37 static void __init init_page_table_check(void)
38 {
39 	if (!__page_table_check_enabled)
40 		return;
41 	static_branch_disable(&page_table_check_disabled);
42 }
43 
44 struct page_ext_operations page_table_check_ops = {
45 	.size = sizeof(struct page_table_check),
46 	.need = need_page_table_check,
47 	.init = init_page_table_check,
48 	.need_shared_flags = false,
49 };
50 
51 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
52 {
53 	BUG_ON(!page_ext);
54 	return (void *)(page_ext) + page_table_check_ops.offset;
55 }
56 
57 /*
58  * An entry is removed from the page table, decrement the counters for that page
59  * verify that it is of correct type and counters do not become negative.
60  */
61 static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
62 				   unsigned long pfn, unsigned long pgcnt)
63 {
64 	struct page_ext *page_ext;
65 	struct page *page;
66 	unsigned long i;
67 	bool anon;
68 
69 	if (!pfn_valid(pfn))
70 		return;
71 
72 	page = pfn_to_page(pfn);
73 	page_ext = page_ext_get(page);
74 
75 	BUG_ON(PageSlab(page));
76 	anon = PageAnon(page);
77 
78 	for (i = 0; i < pgcnt; i++) {
79 		struct page_table_check *ptc = get_page_table_check(page_ext);
80 
81 		if (anon) {
82 			BUG_ON(atomic_read(&ptc->file_map_count));
83 			BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
84 		} else {
85 			BUG_ON(atomic_read(&ptc->anon_map_count));
86 			BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
87 		}
88 		page_ext = page_ext_next(page_ext);
89 	}
90 	page_ext_put(page_ext);
91 }
92 
93 /*
94  * A new entry is added to the page table, increment the counters for that page
95  * verify that it is of correct type and is not being mapped with a different
96  * type to a different process.
97  */
98 static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
99 				 unsigned long pfn, unsigned long pgcnt,
100 				 bool rw)
101 {
102 	struct page_ext *page_ext;
103 	struct page *page;
104 	unsigned long i;
105 	bool anon;
106 
107 	if (!pfn_valid(pfn))
108 		return;
109 
110 	page = pfn_to_page(pfn);
111 	page_ext = page_ext_get(page);
112 
113 	BUG_ON(PageSlab(page));
114 	anon = PageAnon(page);
115 
116 	for (i = 0; i < pgcnt; i++) {
117 		struct page_table_check *ptc = get_page_table_check(page_ext);
118 
119 		if (anon) {
120 			BUG_ON(atomic_read(&ptc->file_map_count));
121 			BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
122 		} else {
123 			BUG_ON(atomic_read(&ptc->anon_map_count));
124 			BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
125 		}
126 		page_ext = page_ext_next(page_ext);
127 	}
128 	page_ext_put(page_ext);
129 }
130 
131 /*
132  * page is on free list, or is being allocated, verify that counters are zeroes
133  * crash if they are not.
134  */
135 void __page_table_check_zero(struct page *page, unsigned int order)
136 {
137 	struct page_ext *page_ext;
138 	unsigned long i;
139 
140 	BUG_ON(PageSlab(page));
141 
142 	page_ext = page_ext_get(page);
143 	BUG_ON(!page_ext);
144 	for (i = 0; i < (1ul << order); i++) {
145 		struct page_table_check *ptc = get_page_table_check(page_ext);
146 
147 		BUG_ON(atomic_read(&ptc->anon_map_count));
148 		BUG_ON(atomic_read(&ptc->file_map_count));
149 		page_ext = page_ext_next(page_ext);
150 	}
151 	page_ext_put(page_ext);
152 }
153 
154 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
155 				  pte_t pte)
156 {
157 	if (&init_mm == mm)
158 		return;
159 
160 	if (pte_user_accessible_page(pte)) {
161 		page_table_check_clear(mm, addr, pte_pfn(pte),
162 				       PAGE_SIZE >> PAGE_SHIFT);
163 	}
164 }
165 EXPORT_SYMBOL(__page_table_check_pte_clear);
166 
167 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
168 				  pmd_t pmd)
169 {
170 	if (&init_mm == mm)
171 		return;
172 
173 	if (pmd_user_accessible_page(pmd)) {
174 		page_table_check_clear(mm, addr, pmd_pfn(pmd),
175 				       PMD_SIZE >> PAGE_SHIFT);
176 	}
177 }
178 EXPORT_SYMBOL(__page_table_check_pmd_clear);
179 
180 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
181 				  pud_t pud)
182 {
183 	if (&init_mm == mm)
184 		return;
185 
186 	if (pud_user_accessible_page(pud)) {
187 		page_table_check_clear(mm, addr, pud_pfn(pud),
188 				       PUD_SIZE >> PAGE_SHIFT);
189 	}
190 }
191 EXPORT_SYMBOL(__page_table_check_pud_clear);
192 
193 void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
194 				pte_t *ptep, pte_t pte)
195 {
196 	if (&init_mm == mm)
197 		return;
198 
199 	__page_table_check_pte_clear(mm, addr, ptep_get(ptep));
200 	if (pte_user_accessible_page(pte)) {
201 		page_table_check_set(mm, addr, pte_pfn(pte),
202 				     PAGE_SIZE >> PAGE_SHIFT,
203 				     pte_write(pte));
204 	}
205 }
206 EXPORT_SYMBOL(__page_table_check_pte_set);
207 
208 void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
209 				pmd_t *pmdp, pmd_t pmd)
210 {
211 	if (&init_mm == mm)
212 		return;
213 
214 	__page_table_check_pmd_clear(mm, addr, *pmdp);
215 	if (pmd_user_accessible_page(pmd)) {
216 		page_table_check_set(mm, addr, pmd_pfn(pmd),
217 				     PMD_SIZE >> PAGE_SHIFT,
218 				     pmd_write(pmd));
219 	}
220 }
221 EXPORT_SYMBOL(__page_table_check_pmd_set);
222 
223 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
224 				pud_t *pudp, pud_t pud)
225 {
226 	if (&init_mm == mm)
227 		return;
228 
229 	__page_table_check_pud_clear(mm, addr, *pudp);
230 	if (pud_user_accessible_page(pud)) {
231 		page_table_check_set(mm, addr, pud_pfn(pud),
232 				     PUD_SIZE >> PAGE_SHIFT,
233 				     pud_write(pud));
234 	}
235 }
236 EXPORT_SYMBOL(__page_table_check_pud_set);
237 
238 void __page_table_check_pte_clear_range(struct mm_struct *mm,
239 					unsigned long addr,
240 					pmd_t pmd)
241 {
242 	if (&init_mm == mm)
243 		return;
244 
245 	if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
246 		pte_t *ptep = pte_offset_map(&pmd, addr);
247 		unsigned long i;
248 
249 		if (WARN_ON(!ptep))
250 			return;
251 		for (i = 0; i < PTRS_PER_PTE; i++) {
252 			__page_table_check_pte_clear(mm, addr, ptep_get(ptep));
253 			addr += PAGE_SIZE;
254 			ptep++;
255 		}
256 		pte_unmap(ptep - PTRS_PER_PTE);
257 	}
258 }
259