1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright (c) 2021, Google LLC.
5 * Pasha Tatashin <pasha.tatashin@soleen.com>
6 */
7 #include <linux/kstrtox.h>
8 #include <linux/mm.h>
9 #include <linux/page_table_check.h>
10
11 #undef pr_fmt
12 #define pr_fmt(fmt) "page_table_check: " fmt
13
14 struct page_table_check {
15 atomic_t anon_map_count;
16 atomic_t file_map_count;
17 };
18
19 static bool __page_table_check_enabled __initdata =
20 IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
21
22 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
23 EXPORT_SYMBOL(page_table_check_disabled);
24
early_page_table_check_param(char * buf)25 static int __init early_page_table_check_param(char *buf)
26 {
27 return kstrtobool(buf, &__page_table_check_enabled);
28 }
29
30 early_param("page_table_check", early_page_table_check_param);
31
need_page_table_check(void)32 static bool __init need_page_table_check(void)
33 {
34 return __page_table_check_enabled;
35 }
36
init_page_table_check(void)37 static void __init init_page_table_check(void)
38 {
39 if (!__page_table_check_enabled)
40 return;
41 static_branch_disable(&page_table_check_disabled);
42 }
43
44 struct page_ext_operations page_table_check_ops = {
45 .size = sizeof(struct page_table_check),
46 .need = need_page_table_check,
47 .init = init_page_table_check,
48 .need_shared_flags = false,
49 };
50
get_page_table_check(struct page_ext * page_ext)51 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
52 {
53 BUG_ON(!page_ext);
54 return page_ext_data(page_ext, &page_table_check_ops);
55 }
56
57 /*
58 * An entry is removed from the page table, decrement the counters for that page
59 * verify that it is of correct type and counters do not become negative.
60 */
page_table_check_clear(unsigned long pfn,unsigned long pgcnt)61 static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
62 {
63 struct page_ext *page_ext;
64 struct page *page;
65 unsigned long i;
66 bool anon;
67
68 if (!pfn_valid(pfn))
69 return;
70
71 page = pfn_to_page(pfn);
72 page_ext = page_ext_get(page);
73
74 if (!page_ext)
75 return;
76
77 BUG_ON(PageSlab(page));
78 anon = PageAnon(page);
79
80 for (i = 0; i < pgcnt; i++) {
81 struct page_table_check *ptc = get_page_table_check(page_ext);
82
83 if (anon) {
84 BUG_ON(atomic_read(&ptc->file_map_count));
85 BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
86 } else {
87 BUG_ON(atomic_read(&ptc->anon_map_count));
88 BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
89 }
90 page_ext = page_ext_next(page_ext);
91 }
92 page_ext_put(page_ext);
93 }
94
95 /*
96 * A new entry is added to the page table, increment the counters for that page
97 * verify that it is of correct type and is not being mapped with a different
98 * type to a different process.
99 */
page_table_check_set(unsigned long pfn,unsigned long pgcnt,bool rw)100 static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
101 bool rw)
102 {
103 struct page_ext *page_ext;
104 struct page *page;
105 unsigned long i;
106 bool anon;
107
108 if (!pfn_valid(pfn))
109 return;
110
111 page = pfn_to_page(pfn);
112 page_ext = page_ext_get(page);
113
114 if (!page_ext)
115 return;
116
117 BUG_ON(PageSlab(page));
118 anon = PageAnon(page);
119
120 for (i = 0; i < pgcnt; i++) {
121 struct page_table_check *ptc = get_page_table_check(page_ext);
122
123 if (anon) {
124 BUG_ON(atomic_read(&ptc->file_map_count));
125 BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
126 } else {
127 BUG_ON(atomic_read(&ptc->anon_map_count));
128 BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
129 }
130 page_ext = page_ext_next(page_ext);
131 }
132 page_ext_put(page_ext);
133 }
134
135 /*
136 * page is on free list, or is being allocated, verify that counters are zeroes
137 * crash if they are not.
138 */
__page_table_check_zero(struct page * page,unsigned int order)139 void __page_table_check_zero(struct page *page, unsigned int order)
140 {
141 struct page_ext *page_ext;
142 unsigned long i;
143
144 BUG_ON(PageSlab(page));
145
146 page_ext = page_ext_get(page);
147
148 if (!page_ext)
149 return;
150
151 for (i = 0; i < (1ul << order); i++) {
152 struct page_table_check *ptc = get_page_table_check(page_ext);
153
154 BUG_ON(atomic_read(&ptc->anon_map_count));
155 BUG_ON(atomic_read(&ptc->file_map_count));
156 page_ext = page_ext_next(page_ext);
157 }
158 page_ext_put(page_ext);
159 }
160
__page_table_check_pte_clear(struct mm_struct * mm,pte_t pte)161 void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
162 {
163 if (&init_mm == mm)
164 return;
165
166 if (pte_user_accessible_page(pte)) {
167 page_table_check_clear(pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT);
168 }
169 }
170 EXPORT_SYMBOL(__page_table_check_pte_clear);
171
__page_table_check_pmd_clear(struct mm_struct * mm,pmd_t pmd)172 void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
173 {
174 if (&init_mm == mm)
175 return;
176
177 if (pmd_user_accessible_page(pmd)) {
178 page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT);
179 }
180 }
181 EXPORT_SYMBOL(__page_table_check_pmd_clear);
182
__page_table_check_pud_clear(struct mm_struct * mm,pud_t pud)183 void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
184 {
185 if (&init_mm == mm)
186 return;
187
188 if (pud_user_accessible_page(pud)) {
189 page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT);
190 }
191 }
192 EXPORT_SYMBOL(__page_table_check_pud_clear);
193
__page_table_check_ptes_set(struct mm_struct * mm,pte_t * ptep,pte_t pte,unsigned int nr)194 void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
195 unsigned int nr)
196 {
197 unsigned int i;
198
199 if (&init_mm == mm)
200 return;
201
202 for (i = 0; i < nr; i++)
203 __page_table_check_pte_clear(mm, ptep_get(ptep + i));
204 if (pte_user_accessible_page(pte))
205 page_table_check_set(pte_pfn(pte), nr, pte_write(pte));
206 }
207 EXPORT_SYMBOL(__page_table_check_ptes_set);
208
__page_table_check_pmd_set(struct mm_struct * mm,pmd_t * pmdp,pmd_t pmd)209 void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd)
210 {
211 if (&init_mm == mm)
212 return;
213
214 __page_table_check_pmd_clear(mm, *pmdp);
215 if (pmd_user_accessible_page(pmd)) {
216 page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
217 pmd_write(pmd));
218 }
219 }
220 EXPORT_SYMBOL(__page_table_check_pmd_set);
221
__page_table_check_pud_set(struct mm_struct * mm,pud_t * pudp,pud_t pud)222 void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud)
223 {
224 if (&init_mm == mm)
225 return;
226
227 __page_table_check_pud_clear(mm, *pudp);
228 if (pud_user_accessible_page(pud)) {
229 page_table_check_set(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT,
230 pud_write(pud));
231 }
232 }
233 EXPORT_SYMBOL(__page_table_check_pud_set);
234
__page_table_check_pte_clear_range(struct mm_struct * mm,unsigned long addr,pmd_t pmd)235 void __page_table_check_pte_clear_range(struct mm_struct *mm,
236 unsigned long addr,
237 pmd_t pmd)
238 {
239 if (&init_mm == mm)
240 return;
241
242 if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
243 pte_t *ptep = pte_offset_map(&pmd, addr);
244 unsigned long i;
245
246 if (WARN_ON(!ptep))
247 return;
248 for (i = 0; i < PTRS_PER_PTE; i++) {
249 __page_table_check_pte_clear(mm, ptep_get(ptep));
250 addr += PAGE_SIZE;
251 ptep++;
252 }
253 pte_unmap(ptep - PTRS_PER_PTE);
254 }
255 }
256