xref: /openbmc/linux/mm/usercopy.c (revision efb339a83368ab25de1a18c0fdff85e01c13a1ea)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
4   * which are designed to protect kernel memory from needless exposure
5   * and overwrite under many unintended conditions. This code is based
6   * on PAX_USERCOPY, which is:
7   *
8   * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
9   * Security Inc.
10   */
11  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12  
13  #include <linux/mm.h>
14  #include <linux/highmem.h>
15  #include <linux/kstrtox.h>
16  #include <linux/slab.h>
17  #include <linux/sched.h>
18  #include <linux/sched/task.h>
19  #include <linux/sched/task_stack.h>
20  #include <linux/thread_info.h>
21  #include <linux/vmalloc.h>
22  #include <linux/atomic.h>
23  #include <linux/jump_label.h>
24  #include <asm/sections.h>
25  #include "slab.h"
26  
27  /*
28   * Checks if a given pointer and length is contained by the current
29   * stack frame (if possible).
30   *
31   * Returns:
32   *	NOT_STACK: not at all on the stack
33   *	GOOD_FRAME: fully within a valid stack frame
34   *	GOOD_STACK: within the current stack (when can't frame-check exactly)
35   *	BAD_STACK: error condition (invalid stack position or bad stack frame)
36   */
37  static noinline int check_stack_object(const void *obj, unsigned long len)
38  {
39  	const void * const stack = task_stack_page(current);
40  	const void * const stackend = stack + THREAD_SIZE;
41  	int ret;
42  
43  	/* Object is not on the stack at all. */
44  	if (obj + len <= stack || stackend <= obj)
45  		return NOT_STACK;
46  
47  	/*
48  	 * Reject: object partially overlaps the stack (passing the
49  	 * check above means at least one end is within the stack,
50  	 * so if this check fails, the other end is outside the stack).
51  	 */
52  	if (obj < stack || stackend < obj + len)
53  		return BAD_STACK;
54  
55  	/* Check if object is safely within a valid frame. */
56  	ret = arch_within_stack_frames(stack, stackend, obj, len);
57  	if (ret)
58  		return ret;
59  
60  	/* Finally, check stack depth if possible. */
61  #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
62  	if (IS_ENABLED(CONFIG_STACK_GROWSUP)) {
63  		if ((void *)current_stack_pointer < obj + len)
64  			return BAD_STACK;
65  	} else {
66  		if (obj < (void *)current_stack_pointer)
67  			return BAD_STACK;
68  	}
69  #endif
70  
71  	return GOOD_STACK;
72  }
73  
74  /*
75   * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
76   * an unexpected state during a copy_from_user() or copy_to_user() call.
77   * There are several checks being performed on the buffer by the
78   * __check_object_size() function. Normal stack buffer usage should never
79   * trip the checks, and kernel text addressing will always trip the check.
80   * For cache objects, it is checking that only the whitelisted range of
81   * bytes for a given cache is being accessed (via the cache's usersize and
82   * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
83   * kmem_cache_create_usercopy() function to create the cache (and
84   * carefully audit the whitelist range).
85   */
86  void __noreturn usercopy_abort(const char *name, const char *detail,
87  			       bool to_user, unsigned long offset,
88  			       unsigned long len)
89  {
90  	pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
91  		 to_user ? "exposure" : "overwrite",
92  		 to_user ? "from" : "to",
93  		 name ? : "unknown?!",
94  		 detail ? " '" : "", detail ? : "", detail ? "'" : "",
95  		 offset, len);
96  
97  	/*
98  	 * For greater effect, it would be nice to do do_group_exit(),
99  	 * but BUG() actually hooks all the lock-breaking and per-arch
100  	 * Oops code, so that is used here instead.
101  	 */
102  	BUG();
103  }
104  
105  /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
106  static bool overlaps(const unsigned long ptr, unsigned long n,
107  		     unsigned long low, unsigned long high)
108  {
109  	const unsigned long check_low = ptr;
110  	unsigned long check_high = check_low + n;
111  
112  	/* Does not overlap if entirely above or entirely below. */
113  	if (check_low >= high || check_high <= low)
114  		return false;
115  
116  	return true;
117  }
118  
119  /* Is this address range in the kernel text area? */
120  static inline void check_kernel_text_object(const unsigned long ptr,
121  					    unsigned long n, bool to_user)
122  {
123  	unsigned long textlow = (unsigned long)_stext;
124  	unsigned long texthigh = (unsigned long)_etext;
125  	unsigned long textlow_linear, texthigh_linear;
126  
127  	if (overlaps(ptr, n, textlow, texthigh))
128  		usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
129  
130  	/*
131  	 * Some architectures have virtual memory mappings with a secondary
132  	 * mapping of the kernel text, i.e. there is more than one virtual
133  	 * kernel address that points to the kernel image. It is usually
134  	 * when there is a separate linear physical memory mapping, in that
135  	 * __pa() is not just the reverse of __va(). This can be detected
136  	 * and checked:
137  	 */
138  	textlow_linear = (unsigned long)lm_alias(textlow);
139  	/* No different mapping: we're done. */
140  	if (textlow_linear == textlow)
141  		return;
142  
143  	/* Check the secondary mapping... */
144  	texthigh_linear = (unsigned long)lm_alias(texthigh);
145  	if (overlaps(ptr, n, textlow_linear, texthigh_linear))
146  		usercopy_abort("linear kernel text", NULL, to_user,
147  			       ptr - textlow_linear, n);
148  }
149  
150  static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
151  				       bool to_user)
152  {
153  	/* Reject if object wraps past end of memory. */
154  	if (ptr + (n - 1) < ptr)
155  		usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
156  
157  	/* Reject if NULL or ZERO-allocation. */
158  	if (ZERO_OR_NULL_PTR(ptr))
159  		usercopy_abort("null address", NULL, to_user, ptr, n);
160  }
161  
162  static inline void check_heap_object(const void *ptr, unsigned long n,
163  				     bool to_user)
164  {
165  	unsigned long addr = (unsigned long)ptr;
166  	unsigned long offset;
167  	struct folio *folio;
168  
169  	if (is_kmap_addr(ptr)) {
170  		offset = offset_in_page(ptr);
171  		if (n > PAGE_SIZE - offset)
172  			usercopy_abort("kmap", NULL, to_user, offset, n);
173  		return;
174  	}
175  
176  	if (is_vmalloc_addr(ptr)) {
177  		struct vmap_area *area = find_vmap_area(addr);
178  
179  		if (!area)
180  			usercopy_abort("vmalloc", "no area", to_user, 0, n);
181  
182  		if (n > area->va_end - addr) {
183  			offset = addr - area->va_start;
184  			usercopy_abort("vmalloc", NULL, to_user, offset, n);
185  		}
186  		return;
187  	}
188  
189  	if (!virt_addr_valid(ptr))
190  		return;
191  
192  	folio = virt_to_folio(ptr);
193  
194  	if (folio_test_slab(folio)) {
195  		/* Check slab allocator for flags and size. */
196  		__check_heap_object(ptr, n, folio_slab(folio), to_user);
197  	} else if (folio_test_large(folio)) {
198  		offset = ptr - folio_address(folio);
199  		if (n > folio_size(folio) - offset)
200  			usercopy_abort("page alloc", NULL, to_user, offset, n);
201  	}
202  }
203  
204  static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
205  
206  /*
207   * Validates that the given object is:
208   * - not bogus address
209   * - fully contained by stack (or stack frame, when available)
210   * - fully within SLAB object (or object whitelist area, when available)
211   * - not in kernel text
212   */
213  void __check_object_size(const void *ptr, unsigned long n, bool to_user)
214  {
215  	if (static_branch_unlikely(&bypass_usercopy_checks))
216  		return;
217  
218  	/* Skip all tests if size is zero. */
219  	if (!n)
220  		return;
221  
222  	/* Check for invalid addresses. */
223  	check_bogus_address((const unsigned long)ptr, n, to_user);
224  
225  	/* Check for bad stack object. */
226  	switch (check_stack_object(ptr, n)) {
227  	case NOT_STACK:
228  		/* Object is not touching the current process stack. */
229  		break;
230  	case GOOD_FRAME:
231  	case GOOD_STACK:
232  		/*
233  		 * Object is either in the correct frame (when it
234  		 * is possible to check) or just generally on the
235  		 * process stack (when frame checking not available).
236  		 */
237  		return;
238  	default:
239  		usercopy_abort("process stack", NULL, to_user,
240  #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
241  			IS_ENABLED(CONFIG_STACK_GROWSUP) ?
242  				ptr - (void *)current_stack_pointer :
243  				(void *)current_stack_pointer - ptr,
244  #else
245  			0,
246  #endif
247  			n);
248  	}
249  
250  	/* Check for bad heap object. */
251  	check_heap_object(ptr, n, to_user);
252  
253  	/* Check for object in kernel to avoid text exposure. */
254  	check_kernel_text_object((const unsigned long)ptr, n, to_user);
255  }
256  EXPORT_SYMBOL(__check_object_size);
257  
258  static bool enable_checks __initdata = true;
259  
260  static int __init parse_hardened_usercopy(char *str)
261  {
262  	if (kstrtobool(str, &enable_checks))
263  		pr_warn("Invalid option string for hardened_usercopy: '%s'\n",
264  			str);
265  	return 1;
266  }
267  
268  __setup("hardened_usercopy=", parse_hardened_usercopy);
269  
270  static int __init set_hardened_usercopy(void)
271  {
272  	if (enable_checks == false)
273  		static_branch_enable(&bypass_usercopy_checks);
274  	return 1;
275  }
276  
277  late_initcall(set_hardened_usercopy);
278