1 /* 2 * This implements the various checks for CONFIG_HARDENED_USERCOPY*, 3 * which are designed to protect kernel memory from needless exposure 4 * and overwrite under many unintended conditions. This code is based 5 * on PAX_USERCOPY, which is: 6 * 7 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source 8 * Security Inc. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 * 14 */ 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/mm.h> 18 #include <linux/slab.h> 19 #include <linux/sched.h> 20 #include <asm/sections.h> 21 22 enum { 23 BAD_STACK = -1, 24 NOT_STACK = 0, 25 GOOD_FRAME, 26 GOOD_STACK, 27 }; 28 29 /* 30 * Checks if a given pointer and length is contained by the current 31 * stack frame (if possible). 32 * 33 * Returns: 34 * NOT_STACK: not at all on the stack 35 * GOOD_FRAME: fully within a valid stack frame 36 * GOOD_STACK: fully on the stack (when can't do frame-checking) 37 * BAD_STACK: error condition (invalid stack position or bad stack frame) 38 */ 39 static noinline int check_stack_object(const void *obj, unsigned long len) 40 { 41 const void * const stack = task_stack_page(current); 42 const void * const stackend = stack + THREAD_SIZE; 43 int ret; 44 45 /* Object is not on the stack at all. */ 46 if (obj + len <= stack || stackend <= obj) 47 return NOT_STACK; 48 49 /* 50 * Reject: object partially overlaps the stack (passing the 51 * the check above means at least one end is within the stack, 52 * so if this check fails, the other end is outside the stack). 53 */ 54 if (obj < stack || stackend < obj + len) 55 return BAD_STACK; 56 57 /* Check if object is safely within a valid frame. */ 58 ret = arch_within_stack_frames(stack, stackend, obj, len); 59 if (ret) 60 return ret; 61 62 return GOOD_STACK; 63 } 64 65 static void report_usercopy(const void *ptr, unsigned long len, 66 bool to_user, const char *type) 67 { 68 pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", 69 to_user ? "exposure" : "overwrite", 70 to_user ? "from" : "to", ptr, type ? : "unknown", len); 71 /* 72 * For greater effect, it would be nice to do do_group_exit(), 73 * but BUG() actually hooks all the lock-breaking and per-arch 74 * Oops code, so that is used here instead. 75 */ 76 BUG(); 77 } 78 79 /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ 80 static bool overlaps(const void *ptr, unsigned long n, unsigned long low, 81 unsigned long high) 82 { 83 unsigned long check_low = (uintptr_t)ptr; 84 unsigned long check_high = check_low + n; 85 86 /* Does not overlap if entirely above or entirely below. */ 87 if (check_low >= high || check_high <= low) 88 return false; 89 90 return true; 91 } 92 93 /* Is this address range in the kernel text area? */ 94 static inline const char *check_kernel_text_object(const void *ptr, 95 unsigned long n) 96 { 97 unsigned long textlow = (unsigned long)_stext; 98 unsigned long texthigh = (unsigned long)_etext; 99 unsigned long textlow_linear, texthigh_linear; 100 101 if (overlaps(ptr, n, textlow, texthigh)) 102 return "<kernel text>"; 103 104 /* 105 * Some architectures have virtual memory mappings with a secondary 106 * mapping of the kernel text, i.e. there is more than one virtual 107 * kernel address that points to the kernel image. It is usually 108 * when there is a separate linear physical memory mapping, in that 109 * __pa() is not just the reverse of __va(). This can be detected 110 * and checked: 111 */ 112 textlow_linear = (unsigned long)lm_alias(textlow); 113 /* No different mapping: we're done. */ 114 if (textlow_linear == textlow) 115 return NULL; 116 117 /* Check the secondary mapping... */ 118 texthigh_linear = (unsigned long)lm_alias(texthigh); 119 if (overlaps(ptr, n, textlow_linear, texthigh_linear)) 120 return "<linear kernel text>"; 121 122 return NULL; 123 } 124 125 static inline const char *check_bogus_address(const void *ptr, unsigned long n) 126 { 127 /* Reject if object wraps past end of memory. */ 128 if ((unsigned long)ptr + n < (unsigned long)ptr) 129 return "<wrapped address>"; 130 131 /* Reject if NULL or ZERO-allocation. */ 132 if (ZERO_OR_NULL_PTR(ptr)) 133 return "<null>"; 134 135 return NULL; 136 } 137 138 /* Checks for allocs that are marked in some way as spanning multiple pages. */ 139 static inline const char *check_page_span(const void *ptr, unsigned long n, 140 struct page *page, bool to_user) 141 { 142 #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN 143 const void *end = ptr + n - 1; 144 struct page *endpage; 145 bool is_reserved, is_cma; 146 147 /* 148 * Sometimes the kernel data regions are not marked Reserved (see 149 * check below). And sometimes [_sdata,_edata) does not cover 150 * rodata and/or bss, so check each range explicitly. 151 */ 152 153 /* Allow reads of kernel rodata region (if not marked as Reserved). */ 154 if (ptr >= (const void *)__start_rodata && 155 end <= (const void *)__end_rodata) { 156 if (!to_user) 157 return "<rodata>"; 158 return NULL; 159 } 160 161 /* Allow kernel data region (if not marked as Reserved). */ 162 if (ptr >= (const void *)_sdata && end <= (const void *)_edata) 163 return NULL; 164 165 /* Allow kernel bss region (if not marked as Reserved). */ 166 if (ptr >= (const void *)__bss_start && 167 end <= (const void *)__bss_stop) 168 return NULL; 169 170 /* Is the object wholly within one base page? */ 171 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == 172 ((unsigned long)end & (unsigned long)PAGE_MASK))) 173 return NULL; 174 175 /* Allow if fully inside the same compound (__GFP_COMP) page. */ 176 endpage = virt_to_head_page(end); 177 if (likely(endpage == page)) 178 return NULL; 179 180 /* 181 * Reject if range is entirely either Reserved (i.e. special or 182 * device memory), or CMA. Otherwise, reject since the object spans 183 * several independently allocated pages. 184 */ 185 is_reserved = PageReserved(page); 186 is_cma = is_migrate_cma_page(page); 187 if (!is_reserved && !is_cma) 188 return "<spans multiple pages>"; 189 190 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { 191 page = virt_to_head_page(ptr); 192 if (is_reserved && !PageReserved(page)) 193 return "<spans Reserved and non-Reserved pages>"; 194 if (is_cma && !is_migrate_cma_page(page)) 195 return "<spans CMA and non-CMA pages>"; 196 } 197 #endif 198 199 return NULL; 200 } 201 202 static inline const char *check_heap_object(const void *ptr, unsigned long n, 203 bool to_user) 204 { 205 struct page *page; 206 207 /* 208 * Some architectures (arm64) return true for virt_addr_valid() on 209 * vmalloced addresses. Work around this by checking for vmalloc 210 * first. 211 * 212 * We also need to check for module addresses explicitly since we 213 * may copy static data from modules to userspace 214 */ 215 if (is_vmalloc_or_module_addr(ptr)) 216 return NULL; 217 218 if (!virt_addr_valid(ptr)) 219 return NULL; 220 221 page = virt_to_head_page(ptr); 222 223 /* Check slab allocator for flags and size. */ 224 if (PageSlab(page)) 225 return __check_heap_object(ptr, n, page); 226 227 /* Verify object does not incorrectly span multiple pages. */ 228 return check_page_span(ptr, n, page, to_user); 229 } 230 231 /* 232 * Validates that the given object is: 233 * - not bogus address 234 * - known-safe heap or stack object 235 * - not in kernel text 236 */ 237 void __check_object_size(const void *ptr, unsigned long n, bool to_user) 238 { 239 const char *err; 240 241 /* Skip all tests if size is zero. */ 242 if (!n) 243 return; 244 245 /* Check for invalid addresses. */ 246 err = check_bogus_address(ptr, n); 247 if (err) 248 goto report; 249 250 /* Check for bad heap object. */ 251 err = check_heap_object(ptr, n, to_user); 252 if (err) 253 goto report; 254 255 /* Check for bad stack object. */ 256 switch (check_stack_object(ptr, n)) { 257 case NOT_STACK: 258 /* Object is not touching the current process stack. */ 259 break; 260 case GOOD_FRAME: 261 case GOOD_STACK: 262 /* 263 * Object is either in the correct frame (when it 264 * is possible to check) or just generally on the 265 * process stack (when frame checking not available). 266 */ 267 return; 268 default: 269 err = "<process stack>"; 270 goto report; 271 } 272 273 /* Check for object in kernel to avoid text exposure. */ 274 err = check_kernel_text_object(ptr, n); 275 if (!err) 276 return; 277 278 report: 279 report_usercopy(ptr, n, to_user, err); 280 } 281 EXPORT_SYMBOL(__check_object_size); 282