xref: /openbmc/linux/mm/page_table_check.c (revision ecfb9f40)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright (c) 2021, Google LLC.
5  * Pasha Tatashin <pasha.tatashin@soleen.com>
6  */
7 #include <linux/kstrtox.h>
8 #include <linux/mm.h>
9 #include <linux/page_table_check.h>
10 
11 #undef pr_fmt
12 #define pr_fmt(fmt)	"page_table_check: " fmt
13 
14 struct page_table_check {
15 	atomic_t anon_map_count;
16 	atomic_t file_map_count;
17 };
18 
19 static bool __page_table_check_enabled __initdata =
20 				IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
21 
22 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
23 EXPORT_SYMBOL(page_table_check_disabled);
24 
25 static int __init early_page_table_check_param(char *buf)
26 {
27 	return kstrtobool(buf, &__page_table_check_enabled);
28 }
29 
30 early_param("page_table_check", early_page_table_check_param);
31 
32 static bool __init need_page_table_check(void)
33 {
34 	return __page_table_check_enabled;
35 }
36 
37 static void __init init_page_table_check(void)
38 {
39 	if (!__page_table_check_enabled)
40 		return;
41 	static_branch_disable(&page_table_check_disabled);
42 }
43 
44 struct page_ext_operations page_table_check_ops = {
45 	.size = sizeof(struct page_table_check),
46 	.need = need_page_table_check,
47 	.init = init_page_table_check,
48 };
49 
50 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
51 {
52 	BUG_ON(!page_ext);
53 	return (void *)(page_ext) + page_table_check_ops.offset;
54 }
55 
56 /*
57  * An entry is removed from the page table, decrement the counters for that page
58  * verify that it is of correct type and counters do not become negative.
59  */
60 static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
61 				   unsigned long pfn, unsigned long pgcnt)
62 {
63 	struct page_ext *page_ext;
64 	struct page *page;
65 	unsigned long i;
66 	bool anon;
67 
68 	if (!pfn_valid(pfn))
69 		return;
70 
71 	page = pfn_to_page(pfn);
72 	page_ext = page_ext_get(page);
73 	anon = PageAnon(page);
74 
75 	for (i = 0; i < pgcnt; i++) {
76 		struct page_table_check *ptc = get_page_table_check(page_ext);
77 
78 		if (anon) {
79 			BUG_ON(atomic_read(&ptc->file_map_count));
80 			BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
81 		} else {
82 			BUG_ON(atomic_read(&ptc->anon_map_count));
83 			BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
84 		}
85 		page_ext = page_ext_next(page_ext);
86 	}
87 	page_ext_put(page_ext);
88 }
89 
90 /*
91  * A new entry is added to the page table, increment the counters for that page
92  * verify that it is of correct type and is not being mapped with a different
93  * type to a different process.
94  */
95 static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
96 				 unsigned long pfn, unsigned long pgcnt,
97 				 bool rw)
98 {
99 	struct page_ext *page_ext;
100 	struct page *page;
101 	unsigned long i;
102 	bool anon;
103 
104 	if (!pfn_valid(pfn))
105 		return;
106 
107 	page = pfn_to_page(pfn);
108 	page_ext = page_ext_get(page);
109 	anon = PageAnon(page);
110 
111 	for (i = 0; i < pgcnt; i++) {
112 		struct page_table_check *ptc = get_page_table_check(page_ext);
113 
114 		if (anon) {
115 			BUG_ON(atomic_read(&ptc->file_map_count));
116 			BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
117 		} else {
118 			BUG_ON(atomic_read(&ptc->anon_map_count));
119 			BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
120 		}
121 		page_ext = page_ext_next(page_ext);
122 	}
123 	page_ext_put(page_ext);
124 }
125 
126 /*
127  * page is on free list, or is being allocated, verify that counters are zeroes
128  * crash if they are not.
129  */
130 void __page_table_check_zero(struct page *page, unsigned int order)
131 {
132 	struct page_ext *page_ext;
133 	unsigned long i;
134 
135 	page_ext = page_ext_get(page);
136 	BUG_ON(!page_ext);
137 	for (i = 0; i < (1ul << order); i++) {
138 		struct page_table_check *ptc = get_page_table_check(page_ext);
139 
140 		BUG_ON(atomic_read(&ptc->anon_map_count));
141 		BUG_ON(atomic_read(&ptc->file_map_count));
142 		page_ext = page_ext_next(page_ext);
143 	}
144 	page_ext_put(page_ext);
145 }
146 
147 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
148 				  pte_t pte)
149 {
150 	if (&init_mm == mm)
151 		return;
152 
153 	if (pte_user_accessible_page(pte)) {
154 		page_table_check_clear(mm, addr, pte_pfn(pte),
155 				       PAGE_SIZE >> PAGE_SHIFT);
156 	}
157 }
158 EXPORT_SYMBOL(__page_table_check_pte_clear);
159 
160 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
161 				  pmd_t pmd)
162 {
163 	if (&init_mm == mm)
164 		return;
165 
166 	if (pmd_user_accessible_page(pmd)) {
167 		page_table_check_clear(mm, addr, pmd_pfn(pmd),
168 				       PMD_SIZE >> PAGE_SHIFT);
169 	}
170 }
171 EXPORT_SYMBOL(__page_table_check_pmd_clear);
172 
173 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
174 				  pud_t pud)
175 {
176 	if (&init_mm == mm)
177 		return;
178 
179 	if (pud_user_accessible_page(pud)) {
180 		page_table_check_clear(mm, addr, pud_pfn(pud),
181 				       PUD_SIZE >> PAGE_SHIFT);
182 	}
183 }
184 EXPORT_SYMBOL(__page_table_check_pud_clear);
185 
186 void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
187 				pte_t *ptep, pte_t pte)
188 {
189 	if (&init_mm == mm)
190 		return;
191 
192 	__page_table_check_pte_clear(mm, addr, *ptep);
193 	if (pte_user_accessible_page(pte)) {
194 		page_table_check_set(mm, addr, pte_pfn(pte),
195 				     PAGE_SIZE >> PAGE_SHIFT,
196 				     pte_write(pte));
197 	}
198 }
199 EXPORT_SYMBOL(__page_table_check_pte_set);
200 
201 void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
202 				pmd_t *pmdp, pmd_t pmd)
203 {
204 	if (&init_mm == mm)
205 		return;
206 
207 	__page_table_check_pmd_clear(mm, addr, *pmdp);
208 	if (pmd_user_accessible_page(pmd)) {
209 		page_table_check_set(mm, addr, pmd_pfn(pmd),
210 				     PMD_SIZE >> PAGE_SHIFT,
211 				     pmd_write(pmd));
212 	}
213 }
214 EXPORT_SYMBOL(__page_table_check_pmd_set);
215 
216 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
217 				pud_t *pudp, pud_t pud)
218 {
219 	if (&init_mm == mm)
220 		return;
221 
222 	__page_table_check_pud_clear(mm, addr, *pudp);
223 	if (pud_user_accessible_page(pud)) {
224 		page_table_check_set(mm, addr, pud_pfn(pud),
225 				     PUD_SIZE >> PAGE_SHIFT,
226 				     pud_write(pud));
227 	}
228 }
229 EXPORT_SYMBOL(__page_table_check_pud_set);
230 
231 void __page_table_check_pte_clear_range(struct mm_struct *mm,
232 					unsigned long addr,
233 					pmd_t pmd)
234 {
235 	if (&init_mm == mm)
236 		return;
237 
238 	if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
239 		pte_t *ptep = pte_offset_map(&pmd, addr);
240 		unsigned long i;
241 
242 		for (i = 0; i < PTRS_PER_PTE; i++) {
243 			__page_table_check_pte_clear(mm, addr, *ptep);
244 			addr += PAGE_SIZE;
245 			ptep++;
246 		}
247 		pte_unmap(ptep - PTRS_PER_PTE);
248 	}
249 }
250