xref: /openbmc/linux/mm/page_table_check.c (revision f59a3ee6)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright (c) 2021, Google LLC.
5  * Pasha Tatashin <pasha.tatashin@soleen.com>
6  */
7 #include <linux/mm.h>
8 #include <linux/page_table_check.h>
9 
10 #undef pr_fmt
11 #define pr_fmt(fmt)	"page_table_check: " fmt
12 
13 struct page_table_check {
14 	atomic_t anon_map_count;
15 	atomic_t file_map_count;
16 };
17 
18 static bool __page_table_check_enabled __initdata =
19 				IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
20 
21 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
22 EXPORT_SYMBOL(page_table_check_disabled);
23 
24 static int __init early_page_table_check_param(char *buf)
25 {
26 	return strtobool(buf, &__page_table_check_enabled);
27 }
28 
29 early_param("page_table_check", early_page_table_check_param);
30 
31 static bool __init need_page_table_check(void)
32 {
33 	return __page_table_check_enabled;
34 }
35 
36 static void __init init_page_table_check(void)
37 {
38 	if (!__page_table_check_enabled)
39 		return;
40 	static_branch_disable(&page_table_check_disabled);
41 }
42 
43 struct page_ext_operations page_table_check_ops = {
44 	.size = sizeof(struct page_table_check),
45 	.need = need_page_table_check,
46 	.init = init_page_table_check,
47 };
48 
49 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
50 {
51 	BUG_ON(!page_ext);
52 	return (void *)(page_ext) + page_table_check_ops.offset;
53 }
54 
55 /*
56  * An entry is removed from the page table, decrement the counters for that page
57  * verify that it is of correct type and counters do not become negative.
58  */
59 static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
60 				   unsigned long pfn, unsigned long pgcnt)
61 {
62 	struct page_ext *page_ext;
63 	struct page *page;
64 	unsigned long i;
65 	bool anon;
66 
67 	if (!pfn_valid(pfn))
68 		return;
69 
70 	page = pfn_to_page(pfn);
71 	page_ext = page_ext_get(page);
72 	anon = PageAnon(page);
73 
74 	for (i = 0; i < pgcnt; i++) {
75 		struct page_table_check *ptc = get_page_table_check(page_ext);
76 
77 		if (anon) {
78 			BUG_ON(atomic_read(&ptc->file_map_count));
79 			BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
80 		} else {
81 			BUG_ON(atomic_read(&ptc->anon_map_count));
82 			BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
83 		}
84 		page_ext = page_ext_next(page_ext);
85 	}
86 	page_ext_put(page_ext);
87 }
88 
89 /*
90  * A new entry is added to the page table, increment the counters for that page
91  * verify that it is of correct type and is not being mapped with a different
92  * type to a different process.
93  */
94 static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
95 				 unsigned long pfn, unsigned long pgcnt,
96 				 bool rw)
97 {
98 	struct page_ext *page_ext;
99 	struct page *page;
100 	unsigned long i;
101 	bool anon;
102 
103 	if (!pfn_valid(pfn))
104 		return;
105 
106 	page = pfn_to_page(pfn);
107 	page_ext = page_ext_get(page);
108 	anon = PageAnon(page);
109 
110 	for (i = 0; i < pgcnt; i++) {
111 		struct page_table_check *ptc = get_page_table_check(page_ext);
112 
113 		if (anon) {
114 			BUG_ON(atomic_read(&ptc->file_map_count));
115 			BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
116 		} else {
117 			BUG_ON(atomic_read(&ptc->anon_map_count));
118 			BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
119 		}
120 		page_ext = page_ext_next(page_ext);
121 	}
122 	page_ext_put(page_ext);
123 }
124 
125 /*
126  * page is on free list, or is being allocated, verify that counters are zeroes
127  * crash if they are not.
128  */
129 void __page_table_check_zero(struct page *page, unsigned int order)
130 {
131 	struct page_ext *page_ext;
132 	unsigned long i;
133 
134 	page_ext = page_ext_get(page);
135 	BUG_ON(!page_ext);
136 	for (i = 0; i < (1ul << order); i++) {
137 		struct page_table_check *ptc = get_page_table_check(page_ext);
138 
139 		BUG_ON(atomic_read(&ptc->anon_map_count));
140 		BUG_ON(atomic_read(&ptc->file_map_count));
141 		page_ext = page_ext_next(page_ext);
142 	}
143 	page_ext_put(page_ext);
144 }
145 
146 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
147 				  pte_t pte)
148 {
149 	if (&init_mm == mm)
150 		return;
151 
152 	if (pte_user_accessible_page(pte)) {
153 		page_table_check_clear(mm, addr, pte_pfn(pte),
154 				       PAGE_SIZE >> PAGE_SHIFT);
155 	}
156 }
157 EXPORT_SYMBOL(__page_table_check_pte_clear);
158 
159 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
160 				  pmd_t pmd)
161 {
162 	if (&init_mm == mm)
163 		return;
164 
165 	if (pmd_user_accessible_page(pmd)) {
166 		page_table_check_clear(mm, addr, pmd_pfn(pmd),
167 				       PMD_SIZE >> PAGE_SHIFT);
168 	}
169 }
170 EXPORT_SYMBOL(__page_table_check_pmd_clear);
171 
172 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
173 				  pud_t pud)
174 {
175 	if (&init_mm == mm)
176 		return;
177 
178 	if (pud_user_accessible_page(pud)) {
179 		page_table_check_clear(mm, addr, pud_pfn(pud),
180 				       PUD_SIZE >> PAGE_SHIFT);
181 	}
182 }
183 EXPORT_SYMBOL(__page_table_check_pud_clear);
184 
185 void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
186 				pte_t *ptep, pte_t pte)
187 {
188 	if (&init_mm == mm)
189 		return;
190 
191 	__page_table_check_pte_clear(mm, addr, *ptep);
192 	if (pte_user_accessible_page(pte)) {
193 		page_table_check_set(mm, addr, pte_pfn(pte),
194 				     PAGE_SIZE >> PAGE_SHIFT,
195 				     pte_write(pte));
196 	}
197 }
198 EXPORT_SYMBOL(__page_table_check_pte_set);
199 
200 void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
201 				pmd_t *pmdp, pmd_t pmd)
202 {
203 	if (&init_mm == mm)
204 		return;
205 
206 	__page_table_check_pmd_clear(mm, addr, *pmdp);
207 	if (pmd_user_accessible_page(pmd)) {
208 		page_table_check_set(mm, addr, pmd_pfn(pmd),
209 				     PMD_SIZE >> PAGE_SHIFT,
210 				     pmd_write(pmd));
211 	}
212 }
213 EXPORT_SYMBOL(__page_table_check_pmd_set);
214 
215 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
216 				pud_t *pudp, pud_t pud)
217 {
218 	if (&init_mm == mm)
219 		return;
220 
221 	__page_table_check_pud_clear(mm, addr, *pudp);
222 	if (pud_user_accessible_page(pud)) {
223 		page_table_check_set(mm, addr, pud_pfn(pud),
224 				     PUD_SIZE >> PAGE_SHIFT,
225 				     pud_write(pud));
226 	}
227 }
228 EXPORT_SYMBOL(__page_table_check_pud_set);
229 
230 void __page_table_check_pte_clear_range(struct mm_struct *mm,
231 					unsigned long addr,
232 					pmd_t pmd)
233 {
234 	if (&init_mm == mm)
235 		return;
236 
237 	if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
238 		pte_t *ptep = pte_offset_map(&pmd, addr);
239 		unsigned long i;
240 
241 		for (i = 0; i < PTRS_PER_PTE; i++) {
242 			__page_table_check_pte_clear(mm, addr, *ptep);
243 			addr += PAGE_SIZE;
244 			ptep++;
245 		}
246 		pte_unmap(ptep - PTRS_PER_PTE);
247 	}
248 }
249