1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Copyright (c) 2021, Google LLC. 5 * Pasha Tatashin <pasha.tatashin@soleen.com> 6 */ 7 #include <linux/mm.h> 8 #include <linux/page_table_check.h> 9 10 #undef pr_fmt 11 #define pr_fmt(fmt) "page_table_check: " fmt 12 13 struct page_table_check { 14 atomic_t anon_map_count; 15 atomic_t file_map_count; 16 }; 17 18 static bool __page_table_check_enabled __initdata = 19 IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED); 20 21 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled); 22 EXPORT_SYMBOL(page_table_check_disabled); 23 24 static int __init early_page_table_check_param(char *buf) 25 { 26 if (!buf) 27 return -EINVAL; 28 29 if (strcmp(buf, "on") == 0) 30 __page_table_check_enabled = true; 31 else if (strcmp(buf, "off") == 0) 32 __page_table_check_enabled = false; 33 34 return 0; 35 } 36 37 early_param("page_table_check", early_page_table_check_param); 38 39 static bool __init need_page_table_check(void) 40 { 41 return __page_table_check_enabled; 42 } 43 44 static void __init init_page_table_check(void) 45 { 46 if (!__page_table_check_enabled) 47 return; 48 static_branch_disable(&page_table_check_disabled); 49 } 50 51 struct page_ext_operations page_table_check_ops = { 52 .size = sizeof(struct page_table_check), 53 .need = need_page_table_check, 54 .init = init_page_table_check, 55 }; 56 57 static struct page_table_check *get_page_table_check(struct page_ext *page_ext) 58 { 59 BUG_ON(!page_ext); 60 return (void *)(page_ext) + page_table_check_ops.offset; 61 } 62 63 static inline bool pte_user_accessible_page(pte_t pte) 64 { 65 return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER); 66 } 67 68 static inline bool pmd_user_accessible_page(pmd_t pmd) 69 { 70 return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) && 71 (pmd_val(pmd) & _PAGE_USER); 72 } 73 74 static inline bool pud_user_accessible_page(pud_t pud) 75 { 76 return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) && 77 (pud_val(pud) & _PAGE_USER); 78 } 79 80 /* 81 * An enty is removed from the page table, decrement the counters for that page 82 * verify that it is of correct type and counters do not become negative. 83 */ 84 static void page_table_check_clear(struct mm_struct *mm, unsigned long addr, 85 unsigned long pfn, unsigned long pgcnt) 86 { 87 struct page_ext *page_ext; 88 struct page *page; 89 unsigned long i; 90 bool anon; 91 92 if (!pfn_valid(pfn)) 93 return; 94 95 page = pfn_to_page(pfn); 96 page_ext = lookup_page_ext(page); 97 anon = PageAnon(page); 98 99 for (i = 0; i < pgcnt; i++) { 100 struct page_table_check *ptc = get_page_table_check(page_ext); 101 102 if (anon) { 103 BUG_ON(atomic_read(&ptc->file_map_count)); 104 BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0); 105 } else { 106 BUG_ON(atomic_read(&ptc->anon_map_count)); 107 BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0); 108 } 109 page_ext = page_ext_next(page_ext); 110 } 111 } 112 113 /* 114 * A new enty is added to the page table, increment the counters for that page 115 * verify that it is of correct type and is not being mapped with a different 116 * type to a different process. 117 */ 118 static void page_table_check_set(struct mm_struct *mm, unsigned long addr, 119 unsigned long pfn, unsigned long pgcnt, 120 bool rw) 121 { 122 struct page_ext *page_ext; 123 struct page *page; 124 unsigned long i; 125 bool anon; 126 127 if (!pfn_valid(pfn)) 128 return; 129 130 page = pfn_to_page(pfn); 131 page_ext = lookup_page_ext(page); 132 anon = PageAnon(page); 133 134 for (i = 0; i < pgcnt; i++) { 135 struct page_table_check *ptc = get_page_table_check(page_ext); 136 137 if (anon) { 138 BUG_ON(atomic_read(&ptc->file_map_count)); 139 BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw); 140 } else { 141 BUG_ON(atomic_read(&ptc->anon_map_count)); 142 BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0); 143 } 144 page_ext = page_ext_next(page_ext); 145 } 146 } 147 148 /* 149 * page is on free list, or is being allocated, verify that counters are zeroes 150 * crash if they are not. 151 */ 152 void __page_table_check_zero(struct page *page, unsigned int order) 153 { 154 struct page_ext *page_ext = lookup_page_ext(page); 155 unsigned long i; 156 157 BUG_ON(!page_ext); 158 for (i = 0; i < (1ul << order); i++) { 159 struct page_table_check *ptc = get_page_table_check(page_ext); 160 161 BUG_ON(atomic_read(&ptc->anon_map_count)); 162 BUG_ON(atomic_read(&ptc->file_map_count)); 163 page_ext = page_ext_next(page_ext); 164 } 165 } 166 167 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr, 168 pte_t pte) 169 { 170 if (&init_mm == mm) 171 return; 172 173 if (pte_user_accessible_page(pte)) { 174 page_table_check_clear(mm, addr, pte_pfn(pte), 175 PAGE_SIZE >> PAGE_SHIFT); 176 } 177 } 178 EXPORT_SYMBOL(__page_table_check_pte_clear); 179 180 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr, 181 pmd_t pmd) 182 { 183 if (&init_mm == mm) 184 return; 185 186 if (pmd_user_accessible_page(pmd)) { 187 page_table_check_clear(mm, addr, pmd_pfn(pmd), 188 PMD_PAGE_SIZE >> PAGE_SHIFT); 189 } 190 } 191 EXPORT_SYMBOL(__page_table_check_pmd_clear); 192 193 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr, 194 pud_t pud) 195 { 196 if (&init_mm == mm) 197 return; 198 199 if (pud_user_accessible_page(pud)) { 200 page_table_check_clear(mm, addr, pud_pfn(pud), 201 PUD_PAGE_SIZE >> PAGE_SHIFT); 202 } 203 } 204 EXPORT_SYMBOL(__page_table_check_pud_clear); 205 206 void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr, 207 pte_t *ptep, pte_t pte) 208 { 209 if (&init_mm == mm) 210 return; 211 212 __page_table_check_pte_clear(mm, addr, *ptep); 213 if (pte_user_accessible_page(pte)) { 214 page_table_check_set(mm, addr, pte_pfn(pte), 215 PAGE_SIZE >> PAGE_SHIFT, 216 pte_write(pte)); 217 } 218 } 219 EXPORT_SYMBOL(__page_table_check_pte_set); 220 221 void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr, 222 pmd_t *pmdp, pmd_t pmd) 223 { 224 if (&init_mm == mm) 225 return; 226 227 __page_table_check_pmd_clear(mm, addr, *pmdp); 228 if (pmd_user_accessible_page(pmd)) { 229 page_table_check_set(mm, addr, pmd_pfn(pmd), 230 PMD_PAGE_SIZE >> PAGE_SHIFT, 231 pmd_write(pmd)); 232 } 233 } 234 EXPORT_SYMBOL(__page_table_check_pmd_set); 235 236 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr, 237 pud_t *pudp, pud_t pud) 238 { 239 if (&init_mm == mm) 240 return; 241 242 __page_table_check_pud_clear(mm, addr, *pudp); 243 if (pud_user_accessible_page(pud)) { 244 page_table_check_set(mm, addr, pud_pfn(pud), 245 PUD_PAGE_SIZE >> PAGE_SHIFT, 246 pud_write(pud)); 247 } 248 } 249 EXPORT_SYMBOL(__page_table_check_pud_set); 250 251 void __page_table_check_pte_clear_range(struct mm_struct *mm, 252 unsigned long addr, 253 pmd_t pmd) 254 { 255 if (&init_mm == mm) 256 return; 257 258 if (!pmd_bad(pmd) && !pmd_leaf(pmd)) { 259 pte_t *ptep = pte_offset_map(&pmd, addr); 260 unsigned long i; 261 262 pte_unmap(ptep); 263 for (i = 0; i < PTRS_PER_PTE; i++) { 264 __page_table_check_pte_clear(mm, addr, *ptep); 265 addr += PAGE_SIZE; 266 ptep++; 267 } 268 } 269 } 270