xref: /openbmc/linux/arch/arm/include/asm/uaccess.h (revision 297e77e5)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  arch/arm/include/asm/uaccess.h
4  */
5 #ifndef _ASMARM_UACCESS_H
6 #define _ASMARM_UACCESS_H
7 
8 /*
9  * User space memory access functions
10  */
11 #include <linux/string.h>
12 #include <asm/memory.h>
13 #include <asm/domain.h>
14 #include <asm/unified.h>
15 #include <asm/compiler.h>
16 
17 #include <asm/extable.h>
18 
19 /*
20  * These two functions allow hooking accesses to userspace to increase
21  * system integrity by ensuring that the kernel can not inadvertantly
22  * perform such accesses (eg, via list poison values) which could then
23  * be exploited for priviledge escalation.
24  */
25 static __always_inline unsigned int uaccess_save_and_enable(void)
26 {
27 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
28 	unsigned int old_domain = get_domain();
29 
30 	/* Set the current domain access to permit user accesses */
31 	set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
32 		   domain_val(DOMAIN_USER, DOMAIN_CLIENT));
33 
34 	return old_domain;
35 #else
36 	return 0;
37 #endif
38 }
39 
40 static __always_inline void uaccess_restore(unsigned int flags)
41 {
42 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
43 	/* Restore the user access mask */
44 	set_domain(flags);
45 #endif
46 }
47 
48 /*
49  * These two are intentionally not defined anywhere - if the kernel
50  * code generates any references to them, that's a bug.
51  */
52 extern int __get_user_bad(void);
53 extern int __put_user_bad(void);
54 
55 /*
56  * Note that this is actually 0x1,0000,0000
57  */
58 #define KERNEL_DS	0x00000000
59 
60 #ifdef CONFIG_MMU
61 
62 #define USER_DS		TASK_SIZE
63 #define get_fs()	(current_thread_info()->addr_limit)
64 
65 static inline void set_fs(mm_segment_t fs)
66 {
67 	current_thread_info()->addr_limit = fs;
68 
69 	/*
70 	 * Prevent a mispredicted conditional call to set_fs from forwarding
71 	 * the wrong address limit to access_ok under speculation.
72 	 */
73 	dsb(nsh);
74 	isb();
75 
76 	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
77 }
78 
79 #define uaccess_kernel()	(get_fs() == KERNEL_DS)
80 
81 /*
82  * We use 33-bit arithmetic here.  Success returns zero, failure returns
83  * addr_limit.  We take advantage that addr_limit will be zero for KERNEL_DS,
84  * so this will always return success in that case.
85  */
86 #define __range_ok(addr, size) ({ \
87 	unsigned long flag, roksum; \
88 	__chk_user_ptr(addr);	\
89 	__asm__(".syntax unified\n" \
90 		"adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \
91 		: "=&r" (flag), "=&r" (roksum) \
92 		: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
93 		: "cc"); \
94 	flag; })
95 
96 /*
97  * This is a type: either unsigned long, if the argument fits into
98  * that type, or otherwise unsigned long long.
99  */
100 #define __inttype(x) \
101 	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
102 
103 /*
104  * Sanitise a uaccess pointer such that it becomes NULL if addr+size
105  * is above the current addr_limit.
106  */
107 #define uaccess_mask_range_ptr(ptr, size)			\
108 	((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
109 static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
110 						    size_t size)
111 {
112 	void __user *safe_ptr = (void __user *)ptr;
113 	unsigned long tmp;
114 
115 	asm volatile(
116 	"	.syntax unified\n"
117 	"	sub	%1, %3, #1\n"
118 	"	subs	%1, %1, %0\n"
119 	"	addhs	%1, %1, #1\n"
120 	"	subshs	%1, %1, %2\n"
121 	"	movlo	%0, #0\n"
122 	: "+r" (safe_ptr), "=&r" (tmp)
123 	: "r" (size), "r" (current_thread_info()->addr_limit)
124 	: "cc");
125 
126 	csdb();
127 	return safe_ptr;
128 }
129 
130 /*
131  * Single-value transfer routines.  They automatically use the right
132  * size if we just have the right pointer type.  Note that the functions
133  * which read from user space (*get_*) need to take care not to leak
134  * kernel data even if the calling code is buggy and fails to check
135  * the return value.  This means zeroing out the destination variable
136  * or buffer on error.  Normally this is done out of line by the
137  * fixup code, but there are a few places where it intrudes on the
138  * main code path.  When we only write to user space, there is no
139  * problem.
140  */
141 extern int __get_user_1(void *);
142 extern int __get_user_2(void *);
143 extern int __get_user_4(void *);
144 extern int __get_user_32t_8(void *);
145 extern int __get_user_8(void *);
146 extern int __get_user_64t_1(void *);
147 extern int __get_user_64t_2(void *);
148 extern int __get_user_64t_4(void *);
149 
150 #define __GUP_CLOBBER_1	"lr", "cc"
151 #ifdef CONFIG_CPU_USE_DOMAINS
152 #define __GUP_CLOBBER_2	"ip", "lr", "cc"
153 #else
154 #define __GUP_CLOBBER_2 "lr", "cc"
155 #endif
156 #define __GUP_CLOBBER_4	"lr", "cc"
157 #define __GUP_CLOBBER_32t_8 "lr", "cc"
158 #define __GUP_CLOBBER_8	"lr", "cc"
159 
160 #define __get_user_x(__r2, __p, __e, __l, __s)				\
161 	   __asm__ __volatile__ (					\
162 		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
163 		__asmeq("%3", "r1")					\
164 		"bl	__get_user_" #__s				\
165 		: "=&r" (__e), "=r" (__r2)				\
166 		: "0" (__p), "r" (__l)					\
167 		: __GUP_CLOBBER_##__s)
168 
169 /* narrowing a double-word get into a single 32bit word register: */
170 #ifdef __ARMEB__
171 #define __get_user_x_32t(__r2, __p, __e, __l, __s)			\
172 	__get_user_x(__r2, __p, __e, __l, 32t_8)
173 #else
174 #define __get_user_x_32t __get_user_x
175 #endif
176 
177 /*
178  * storing result into proper least significant word of 64bit target var,
179  * different only for big endian case where 64 bit __r2 lsw is r3:
180  */
181 #ifdef __ARMEB__
182 #define __get_user_x_64t(__r2, __p, __e, __l, __s)		        \
183 	   __asm__ __volatile__ (					\
184 		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
185 		__asmeq("%3", "r1")					\
186 		"bl	__get_user_64t_" #__s				\
187 		: "=&r" (__e), "=r" (__r2)				\
188 		: "0" (__p), "r" (__l)					\
189 		: __GUP_CLOBBER_##__s)
190 #else
191 #define __get_user_x_64t __get_user_x
192 #endif
193 
194 
195 #define __get_user_check(x, p)						\
196 	({								\
197 		unsigned long __limit = current_thread_info()->addr_limit - 1; \
198 		register typeof(*(p)) __user *__p asm("r0") = (p);	\
199 		register __inttype(x) __r2 asm("r2");			\
200 		register unsigned long __l asm("r1") = __limit;		\
201 		register int __e asm("r0");				\
202 		unsigned int __ua_flags = uaccess_save_and_enable();	\
203 		switch (sizeof(*(__p))) {				\
204 		case 1:							\
205 			if (sizeof((x)) >= 8)				\
206 				__get_user_x_64t(__r2, __p, __e, __l, 1); \
207 			else						\
208 				__get_user_x(__r2, __p, __e, __l, 1);	\
209 			break;						\
210 		case 2:							\
211 			if (sizeof((x)) >= 8)				\
212 				__get_user_x_64t(__r2, __p, __e, __l, 2); \
213 			else						\
214 				__get_user_x(__r2, __p, __e, __l, 2);	\
215 			break;						\
216 		case 4:							\
217 			if (sizeof((x)) >= 8)				\
218 				__get_user_x_64t(__r2, __p, __e, __l, 4); \
219 			else						\
220 				__get_user_x(__r2, __p, __e, __l, 4);	\
221 			break;						\
222 		case 8:							\
223 			if (sizeof((x)) < 8)				\
224 				__get_user_x_32t(__r2, __p, __e, __l, 4); \
225 			else						\
226 				__get_user_x(__r2, __p, __e, __l, 8);	\
227 			break;						\
228 		default: __e = __get_user_bad(); break;			\
229 		}							\
230 		uaccess_restore(__ua_flags);				\
231 		x = (typeof(*(p))) __r2;				\
232 		__e;							\
233 	})
234 
235 #define get_user(x, p)							\
236 	({								\
237 		might_fault();						\
238 		__get_user_check(x, p);					\
239 	 })
240 
241 extern int __put_user_1(void *, unsigned int);
242 extern int __put_user_2(void *, unsigned int);
243 extern int __put_user_4(void *, unsigned int);
244 extern int __put_user_8(void *, unsigned long long);
245 
246 #define __put_user_check(__pu_val, __ptr, __err, __s)			\
247 	({								\
248 		unsigned long __limit = current_thread_info()->addr_limit - 1; \
249 		register typeof(__pu_val) __r2 asm("r2") = __pu_val;	\
250 		register const void __user *__p asm("r0") = __ptr;	\
251 		register unsigned long __l asm("r1") = __limit;		\
252 		register int __e asm("r0");				\
253 		__asm__ __volatile__ (					\
254 			__asmeq("%0", "r0") __asmeq("%2", "r2")		\
255 			__asmeq("%3", "r1")				\
256 			"bl	__put_user_" #__s			\
257 			: "=&r" (__e)					\
258 			: "0" (__p), "r" (__r2), "r" (__l)		\
259 			: "ip", "lr", "cc");				\
260 		__err = __e;						\
261 	})
262 
263 #else /* CONFIG_MMU */
264 
265 /*
266  * uClinux has only one addr space, so has simplified address limits.
267  */
268 #define USER_DS			KERNEL_DS
269 
270 #define uaccess_kernel()	(true)
271 #define __addr_ok(addr)		((void)(addr), 1)
272 #define __range_ok(addr, size)	((void)(addr), 0)
273 #define get_fs()		(KERNEL_DS)
274 
275 static inline void set_fs(mm_segment_t fs)
276 {
277 }
278 
279 #define get_user(x, p)	__get_user(x, p)
280 #define __put_user_check __put_user_nocheck
281 
282 #endif /* CONFIG_MMU */
283 
284 #define access_ok(addr, size)	(__range_ok(addr, size) == 0)
285 
286 #define user_addr_max() \
287 	(uaccess_kernel() ? ~0UL : get_fs())
288 
289 #ifdef CONFIG_CPU_SPECTRE
290 /*
291  * When mitigating Spectre variant 1, it is not worth fixing the non-
292  * verifying accessors, because we need to add verification of the
293  * address space there.  Force these to use the standard get_user()
294  * version instead.
295  */
296 #define __get_user(x, ptr) get_user(x, ptr)
297 #else
298 
299 /*
300  * The "__xxx" versions of the user access functions do not verify the
301  * address space - it must have been done previously with a separate
302  * "access_ok()" call.
303  *
304  * The "xxx_error" versions set the third argument to EFAULT if an
305  * error occurs, and leave it unchanged on success.  Note that these
306  * versions are void (ie, don't return a value as such).
307  */
308 #define __get_user(x, ptr)						\
309 ({									\
310 	long __gu_err = 0;						\
311 	__get_user_err((x), (ptr), __gu_err);				\
312 	__gu_err;							\
313 })
314 
315 #define __get_user_err(x, ptr, err)					\
316 do {									\
317 	unsigned long __gu_addr = (unsigned long)(ptr);			\
318 	unsigned long __gu_val;						\
319 	unsigned int __ua_flags;					\
320 	__chk_user_ptr(ptr);						\
321 	might_fault();							\
322 	__ua_flags = uaccess_save_and_enable();				\
323 	switch (sizeof(*(ptr))) {					\
324 	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err);	break;	\
325 	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err);	break;	\
326 	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err);	break;	\
327 	default: (__gu_val) = __get_user_bad();				\
328 	}								\
329 	uaccess_restore(__ua_flags);					\
330 	(x) = (__typeof__(*(ptr)))__gu_val;				\
331 } while (0)
332 
333 #define __get_user_asm(x, addr, err, instr)			\
334 	__asm__ __volatile__(					\
335 	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
336 	"2:\n"							\
337 	"	.pushsection .text.fixup,\"ax\"\n"		\
338 	"	.align	2\n"					\
339 	"3:	mov	%0, %3\n"				\
340 	"	mov	%1, #0\n"				\
341 	"	b	2b\n"					\
342 	"	.popsection\n"					\
343 	"	.pushsection __ex_table,\"a\"\n"		\
344 	"	.align	3\n"					\
345 	"	.long	1b, 3b\n"				\
346 	"	.popsection"					\
347 	: "+r" (err), "=&r" (x)					\
348 	: "r" (addr), "i" (-EFAULT)				\
349 	: "cc")
350 
351 #define __get_user_asm_byte(x, addr, err)			\
352 	__get_user_asm(x, addr, err, ldrb)
353 
354 #if __LINUX_ARM_ARCH__ >= 6
355 
356 #define __get_user_asm_half(x, addr, err)			\
357 	__get_user_asm(x, addr, err, ldrh)
358 
359 #else
360 
361 #ifndef __ARMEB__
362 #define __get_user_asm_half(x, __gu_addr, err)			\
363 ({								\
364 	unsigned long __b1, __b2;				\
365 	__get_user_asm_byte(__b1, __gu_addr, err);		\
366 	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
367 	(x) = __b1 | (__b2 << 8);				\
368 })
369 #else
370 #define __get_user_asm_half(x, __gu_addr, err)			\
371 ({								\
372 	unsigned long __b1, __b2;				\
373 	__get_user_asm_byte(__b1, __gu_addr, err);		\
374 	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
375 	(x) = (__b1 << 8) | __b2;				\
376 })
377 #endif
378 
379 #endif /* __LINUX_ARM_ARCH__ >= 6 */
380 
381 #define __get_user_asm_word(x, addr, err)			\
382 	__get_user_asm(x, addr, err, ldr)
383 #endif
384 
385 
386 #define __put_user_switch(x, ptr, __err, __fn)				\
387 	do {								\
388 		const __typeof__(*(ptr)) __user *__pu_ptr = (ptr);	\
389 		__typeof__(*(ptr)) __pu_val = (x);			\
390 		unsigned int __ua_flags;				\
391 		might_fault();						\
392 		__ua_flags = uaccess_save_and_enable();			\
393 		switch (sizeof(*(ptr))) {				\
394 		case 1: __fn(__pu_val, __pu_ptr, __err, 1); break;	\
395 		case 2:	__fn(__pu_val, __pu_ptr, __err, 2); break;	\
396 		case 4:	__fn(__pu_val, __pu_ptr, __err, 4); break;	\
397 		case 8:	__fn(__pu_val, __pu_ptr, __err, 8); break;	\
398 		default: __err = __put_user_bad(); break;		\
399 		}							\
400 		uaccess_restore(__ua_flags);				\
401 	} while (0)
402 
403 #define put_user(x, ptr)						\
404 ({									\
405 	int __pu_err = 0;						\
406 	__put_user_switch((x), (ptr), __pu_err, __put_user_check);	\
407 	__pu_err;							\
408 })
409 
410 #ifdef CONFIG_CPU_SPECTRE
411 /*
412  * When mitigating Spectre variant 1.1, all accessors need to include
413  * verification of the address space.
414  */
415 #define __put_user(x, ptr) put_user(x, ptr)
416 
417 #else
418 #define __put_user(x, ptr)						\
419 ({									\
420 	long __pu_err = 0;						\
421 	__put_user_switch((x), (ptr), __pu_err, __put_user_nocheck);	\
422 	__pu_err;							\
423 })
424 
425 #define __put_user_nocheck(x, __pu_ptr, __err, __size)			\
426 	do {								\
427 		unsigned long __pu_addr = (unsigned long)__pu_ptr;	\
428 		__put_user_nocheck_##__size(x, __pu_addr, __err);	\
429 	} while (0)
430 
431 #define __put_user_nocheck_1 __put_user_asm_byte
432 #define __put_user_nocheck_2 __put_user_asm_half
433 #define __put_user_nocheck_4 __put_user_asm_word
434 #define __put_user_nocheck_8 __put_user_asm_dword
435 
436 #define __put_user_asm(x, __pu_addr, err, instr)		\
437 	__asm__ __volatile__(					\
438 	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
439 	"2:\n"							\
440 	"	.pushsection .text.fixup,\"ax\"\n"		\
441 	"	.align	2\n"					\
442 	"3:	mov	%0, %3\n"				\
443 	"	b	2b\n"					\
444 	"	.popsection\n"					\
445 	"	.pushsection __ex_table,\"a\"\n"		\
446 	"	.align	3\n"					\
447 	"	.long	1b, 3b\n"				\
448 	"	.popsection"					\
449 	: "+r" (err)						\
450 	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
451 	: "cc")
452 
453 #define __put_user_asm_byte(x, __pu_addr, err)			\
454 	__put_user_asm(x, __pu_addr, err, strb)
455 
456 #if __LINUX_ARM_ARCH__ >= 6
457 
458 #define __put_user_asm_half(x, __pu_addr, err)			\
459 	__put_user_asm(x, __pu_addr, err, strh)
460 
461 #else
462 
463 #ifndef __ARMEB__
464 #define __put_user_asm_half(x, __pu_addr, err)			\
465 ({								\
466 	unsigned long __temp = (__force unsigned long)(x);	\
467 	__put_user_asm_byte(__temp, __pu_addr, err);		\
468 	__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err);	\
469 })
470 #else
471 #define __put_user_asm_half(x, __pu_addr, err)			\
472 ({								\
473 	unsigned long __temp = (__force unsigned long)(x);	\
474 	__put_user_asm_byte(__temp >> 8, __pu_addr, err);	\
475 	__put_user_asm_byte(__temp, __pu_addr + 1, err);	\
476 })
477 #endif
478 
479 #endif /* __LINUX_ARM_ARCH__ >= 6 */
480 
481 #define __put_user_asm_word(x, __pu_addr, err)			\
482 	__put_user_asm(x, __pu_addr, err, str)
483 
484 #ifndef __ARMEB__
485 #define	__reg_oper0	"%R2"
486 #define	__reg_oper1	"%Q2"
487 #else
488 #define	__reg_oper0	"%Q2"
489 #define	__reg_oper1	"%R2"
490 #endif
491 
492 #define __put_user_asm_dword(x, __pu_addr, err)			\
493 	__asm__ __volatile__(					\
494  ARM(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1], #4\n"	) \
495  ARM(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1]\n"	) \
496  THUMB(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1]\n"	) \
497  THUMB(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1, #4]\n"	) \
498 	"3:\n"							\
499 	"	.pushsection .text.fixup,\"ax\"\n"		\
500 	"	.align	2\n"					\
501 	"4:	mov	%0, %3\n"				\
502 	"	b	3b\n"					\
503 	"	.popsection\n"					\
504 	"	.pushsection __ex_table,\"a\"\n"		\
505 	"	.align	3\n"					\
506 	"	.long	1b, 4b\n"				\
507 	"	.long	2b, 4b\n"				\
508 	"	.popsection"					\
509 	: "+r" (err), "+r" (__pu_addr)				\
510 	: "r" (x), "i" (-EFAULT)				\
511 	: "cc")
512 
513 #endif /* !CONFIG_CPU_SPECTRE */
514 
515 #ifdef CONFIG_MMU
516 extern unsigned long __must_check
517 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
518 
519 static inline unsigned long __must_check
520 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
521 {
522 	unsigned int __ua_flags;
523 
524 	__ua_flags = uaccess_save_and_enable();
525 	n = arm_copy_from_user(to, from, n);
526 	uaccess_restore(__ua_flags);
527 	return n;
528 }
529 
530 extern unsigned long __must_check
531 arm_copy_to_user(void __user *to, const void *from, unsigned long n);
532 extern unsigned long __must_check
533 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
534 
535 static inline unsigned long __must_check
536 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
537 {
538 #ifndef CONFIG_UACCESS_WITH_MEMCPY
539 	unsigned int __ua_flags;
540 	__ua_flags = uaccess_save_and_enable();
541 	n = arm_copy_to_user(to, from, n);
542 	uaccess_restore(__ua_flags);
543 	return n;
544 #else
545 	return arm_copy_to_user(to, from, n);
546 #endif
547 }
548 
549 extern unsigned long __must_check
550 arm_clear_user(void __user *addr, unsigned long n);
551 extern unsigned long __must_check
552 __clear_user_std(void __user *addr, unsigned long n);
553 
554 static inline unsigned long __must_check
555 __clear_user(void __user *addr, unsigned long n)
556 {
557 	unsigned int __ua_flags = uaccess_save_and_enable();
558 	n = arm_clear_user(addr, n);
559 	uaccess_restore(__ua_flags);
560 	return n;
561 }
562 
563 #else
564 static inline unsigned long
565 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
566 {
567 	memcpy(to, (const void __force *)from, n);
568 	return 0;
569 }
570 static inline unsigned long
571 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
572 {
573 	memcpy((void __force *)to, from, n);
574 	return 0;
575 }
576 #define __clear_user(addr, n)		(memset((void __force *)addr, 0, n), 0)
577 #endif
578 #define INLINE_COPY_TO_USER
579 #define INLINE_COPY_FROM_USER
580 
581 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
582 {
583 	if (access_ok(to, n))
584 		n = __clear_user(to, n);
585 	return n;
586 }
587 
588 /* These are from lib/ code, and use __get_user() and friends */
589 extern long strncpy_from_user(char *dest, const char __user *src, long count);
590 
591 extern __must_check long strnlen_user(const char __user *str, long n);
592 
593 #endif /* _ASMARM_UACCESS_H */
594