xref: /openbmc/linux/arch/x86/include/asm/uaccess.h (revision 6014bc27561f2cc63e0acc18adbc4ed810834e32)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_H
3 #define _ASM_X86_UACCESS_H
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/instrumented.h>
9 #include <linux/kasan-checks.h>
10 #include <linux/mm_types.h>
11 #include <linux/string.h>
12 #include <linux/mmap_lock.h>
13 #include <asm/asm.h>
14 #include <asm/page.h>
15 #include <asm/smap.h>
16 #include <asm/extable.h>
17 #include <asm/tlbflush.h>
18 
19 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
20 static inline bool pagefault_disabled(void);
21 # define WARN_ON_IN_IRQ()	\
22 	WARN_ON_ONCE(!in_task() && !pagefault_disabled())
23 #else
24 # define WARN_ON_IN_IRQ()
25 #endif
26 
27 #ifdef CONFIG_ADDRESS_MASKING
28 /*
29  * Mask out tag bits from the address.
30  *
31  * Magic with the 'sign' allows to untag userspace pointer without any branches
32  * while leaving kernel addresses intact.
33  */
34 static inline unsigned long __untagged_addr(unsigned long addr)
35 {
36 	long sign;
37 
38 	/*
39 	 * Refer tlbstate_untag_mask directly to avoid RIP-relative relocation
40 	 * in alternative instructions. The relocation gets wrong when gets
41 	 * copied to the target place.
42 	 */
43 	asm (ALTERNATIVE("",
44 			 "sar $63, %[sign]\n\t" /* user_ptr ? 0 : -1UL */
45 			 "or %%gs:tlbstate_untag_mask, %[sign]\n\t"
46 			 "and %[sign], %[addr]\n\t", X86_FEATURE_LAM)
47 	     : [addr] "+r" (addr), [sign] "=r" (sign)
48 	     : "m" (tlbstate_untag_mask), "[sign]" (addr));
49 
50 	return addr;
51 }
52 
53 #define untagged_addr(addr)	({					\
54 	unsigned long __addr = (__force unsigned long)(addr);		\
55 	(__force __typeof__(addr))__untagged_addr(__addr);		\
56 })
57 
58 static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
59 						   unsigned long addr)
60 {
61 	long sign = addr >> 63;
62 
63 	mmap_assert_locked(mm);
64 	addr &= (mm)->context.untag_mask | sign;
65 
66 	return addr;
67 }
68 
69 #define untagged_addr_remote(mm, addr)	({				\
70 	unsigned long __addr = (__force unsigned long)(addr);		\
71 	(__force __typeof__(addr))__untagged_addr_remote(mm, __addr);	\
72 })
73 
74 #else
75 #define untagged_addr(addr)	(addr)
76 #endif
77 
78 #ifdef CONFIG_X86_64
79 /*
80  * On x86-64, we may have tag bits in the user pointer. Rather than
81  * mask them off, just change the rules for __access_ok().
82  *
83  * Make the rule be that 'ptr+size' must not overflow, and must not
84  * have the high bit set. Compilers generally understand about
85  * unsigned overflow and the CF bit and generate reasonable code for
86  * this. Although it looks like the combination confuses at least
87  * clang (and instead of just doing an "add" followed by a test of
88  * SF and CF, you'll see that unnecessary comparison).
89  *
90  * For the common case of small sizes that can be checked at compile
91  * time, don't even bother with the addition, and just check that the
92  * base pointer is ok.
93  */
94 static inline bool __access_ok(const void __user *ptr, unsigned long size)
95 {
96 	if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
97 		return (long)ptr >= 0;
98 	} else {
99 		unsigned long sum = size + (unsigned long)ptr;
100 		return (long) sum >= 0 && sum >= (unsigned long)ptr;
101 	}
102 }
103 #define __access_ok __access_ok
104 #endif
105 
106 /**
107  * access_ok - Checks if a user space pointer is valid
108  * @addr: User space pointer to start of block to check
109  * @size: Size of block to check
110  *
111  * Context: User context only. This function may sleep if pagefaults are
112  *          enabled.
113  *
114  * Checks if a pointer to a block of memory in user space is valid.
115  *
116  * Note that, depending on architecture, this function probably just
117  * checks that the pointer is in the user space range - after calling
118  * this function, memory access functions may still return -EFAULT.
119  *
120  * Return: true (nonzero) if the memory block may be valid, false (zero)
121  * if it is definitely invalid.
122  *
123  * This should not be x86-specific. The only odd things out here is
124  * the WARN_ON_IN_IRQ(), which doesn't exist in the generic version.
125  */
126 #define access_ok(addr, size)			\
127 ({						\
128 	WARN_ON_IN_IRQ();			\
129 	likely(__access_ok(addr, size));	\
130 })
131 
132 #include <asm-generic/access_ok.h>
133 
134 extern int __get_user_1(void);
135 extern int __get_user_2(void);
136 extern int __get_user_4(void);
137 extern int __get_user_8(void);
138 extern int __get_user_nocheck_1(void);
139 extern int __get_user_nocheck_2(void);
140 extern int __get_user_nocheck_4(void);
141 extern int __get_user_nocheck_8(void);
142 extern int __get_user_bad(void);
143 
144 #define __uaccess_begin() stac()
145 #define __uaccess_end()   clac()
146 #define __uaccess_begin_nospec()	\
147 ({					\
148 	stac();				\
149 	barrier_nospec();		\
150 })
151 
152 /*
153  * This is the smallest unsigned integer type that can fit a value
154  * (up to 'long long')
155  */
156 #define __inttype(x) __typeof__(		\
157 	__typefits(x,char,			\
158 	  __typefits(x,short,			\
159 	    __typefits(x,int,			\
160 	      __typefits(x,long,0ULL)))))
161 
162 #define __typefits(x,type,not) \
163 	__builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
164 
165 /*
166  * This is used for both get_user() and __get_user() to expand to
167  * the proper special function call that has odd calling conventions
168  * due to returning both a value and an error, and that depends on
169  * the size of the pointer passed in.
170  *
171  * Careful: we have to cast the result to the type of the pointer
172  * for sign reasons.
173  *
174  * The use of _ASM_DX as the register specifier is a bit of a
175  * simplification, as gcc only cares about it as the starting point
176  * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
177  * (%ecx being the next register in gcc's x86 register sequence), and
178  * %rdx on 64 bits.
179  *
180  * Clang/LLVM cares about the size of the register, but still wants
181  * the base register for something that ends up being a pair.
182  */
183 #define do_get_user_call(fn,x,ptr)					\
184 ({									\
185 	int __ret_gu;							\
186 	register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);		\
187 	__chk_user_ptr(ptr);						\
188 	asm volatile("call __" #fn "_%P4"				\
189 		     : "=a" (__ret_gu), "=r" (__val_gu),		\
190 			ASM_CALL_CONSTRAINT				\
191 		     : "0" (ptr), "i" (sizeof(*(ptr))));		\
192 	instrument_get_user(__val_gu);					\
193 	(x) = (__force __typeof__(*(ptr))) __val_gu;			\
194 	__builtin_expect(__ret_gu, 0);					\
195 })
196 
197 /**
198  * get_user - Get a simple variable from user space.
199  * @x:   Variable to store result.
200  * @ptr: Source address, in user space.
201  *
202  * Context: User context only. This function may sleep if pagefaults are
203  *          enabled.
204  *
205  * This macro copies a single simple variable from user space to kernel
206  * space.  It supports simple types like char and int, but not larger
207  * data types like structures or arrays.
208  *
209  * @ptr must have pointer-to-simple-variable type, and the result of
210  * dereferencing @ptr must be assignable to @x without a cast.
211  *
212  * Return: zero on success, or -EFAULT on error.
213  * On error, the variable @x is set to zero.
214  */
215 #define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
216 
217 /**
218  * __get_user - Get a simple variable from user space, with less checking.
219  * @x:   Variable to store result.
220  * @ptr: Source address, in user space.
221  *
222  * Context: User context only. This function may sleep if pagefaults are
223  *          enabled.
224  *
225  * This macro copies a single simple variable from user space to kernel
226  * space.  It supports simple types like char and int, but not larger
227  * data types like structures or arrays.
228  *
229  * @ptr must have pointer-to-simple-variable type, and the result of
230  * dereferencing @ptr must be assignable to @x without a cast.
231  *
232  * Caller must check the pointer with access_ok() before calling this
233  * function.
234  *
235  * Return: zero on success, or -EFAULT on error.
236  * On error, the variable @x is set to zero.
237  */
238 #define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
239 
240 
241 #ifdef CONFIG_X86_32
242 #define __put_user_goto_u64(x, addr, label)			\
243 	asm_volatile_goto("\n"					\
244 		     "1:	movl %%eax,0(%1)\n"		\
245 		     "2:	movl %%edx,4(%1)\n"		\
246 		     _ASM_EXTABLE_UA(1b, %l2)			\
247 		     _ASM_EXTABLE_UA(2b, %l2)			\
248 		     : : "A" (x), "r" (addr)			\
249 		     : : label)
250 
251 #else
252 #define __put_user_goto_u64(x, ptr, label) \
253 	__put_user_goto(x, ptr, "q", "er", label)
254 #endif
255 
256 extern void __put_user_bad(void);
257 
258 /*
259  * Strange magic calling convention: pointer in %ecx,
260  * value in %eax(:%edx), return value in %ecx. clobbers %rbx
261  */
262 extern void __put_user_1(void);
263 extern void __put_user_2(void);
264 extern void __put_user_4(void);
265 extern void __put_user_8(void);
266 extern void __put_user_nocheck_1(void);
267 extern void __put_user_nocheck_2(void);
268 extern void __put_user_nocheck_4(void);
269 extern void __put_user_nocheck_8(void);
270 
271 /*
272  * ptr must be evaluated and assigned to the temporary __ptr_pu before
273  * the assignment of x to __val_pu, to avoid any function calls
274  * involved in the ptr expression (possibly implicitly generated due
275  * to KASAN) from clobbering %ax.
276  */
277 #define do_put_user_call(fn,x,ptr)					\
278 ({									\
279 	int __ret_pu;							\
280 	void __user *__ptr_pu;						\
281 	register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX);		\
282 	__typeof__(*(ptr)) __x = (x); /* eval x once */			\
283 	__typeof__(ptr) __ptr = (ptr); /* eval ptr once */		\
284 	__chk_user_ptr(__ptr);						\
285 	__ptr_pu = __ptr;						\
286 	__val_pu = __x;							\
287 	asm volatile("call __" #fn "_%P[size]"				\
288 		     : "=c" (__ret_pu),					\
289 			ASM_CALL_CONSTRAINT				\
290 		     : "0" (__ptr_pu),					\
291 		       "r" (__val_pu),					\
292 		       [size] "i" (sizeof(*(ptr)))			\
293 		     :"ebx");						\
294 	instrument_put_user(__x, __ptr, sizeof(*(ptr)));		\
295 	__builtin_expect(__ret_pu, 0);					\
296 })
297 
298 /**
299  * put_user - Write a simple value into user space.
300  * @x:   Value to copy to user space.
301  * @ptr: Destination address, in user space.
302  *
303  * Context: User context only. This function may sleep if pagefaults are
304  *          enabled.
305  *
306  * This macro copies a single simple value from kernel space to user
307  * space.  It supports simple types like char and int, but not larger
308  * data types like structures or arrays.
309  *
310  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
311  * to the result of dereferencing @ptr.
312  *
313  * Return: zero on success, or -EFAULT on error.
314  */
315 #define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
316 
317 /**
318  * __put_user - Write a simple value into user space, with less checking.
319  * @x:   Value to copy to user space.
320  * @ptr: Destination address, in user space.
321  *
322  * Context: User context only. This function may sleep if pagefaults are
323  *          enabled.
324  *
325  * This macro copies a single simple value from kernel space to user
326  * space.  It supports simple types like char and int, but not larger
327  * data types like structures or arrays.
328  *
329  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
330  * to the result of dereferencing @ptr.
331  *
332  * Caller must check the pointer with access_ok() before calling this
333  * function.
334  *
335  * Return: zero on success, or -EFAULT on error.
336  */
337 #define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
338 
339 #define __put_user_size(x, ptr, size, label)				\
340 do {									\
341 	__typeof__(*(ptr)) __x = (x); /* eval x once */			\
342 	__typeof__(ptr) __ptr = (ptr); /* eval ptr once */		\
343 	__chk_user_ptr(__ptr);						\
344 	switch (size) {							\
345 	case 1:								\
346 		__put_user_goto(__x, __ptr, "b", "iq", label);		\
347 		break;							\
348 	case 2:								\
349 		__put_user_goto(__x, __ptr, "w", "ir", label);		\
350 		break;							\
351 	case 4:								\
352 		__put_user_goto(__x, __ptr, "l", "ir", label);		\
353 		break;							\
354 	case 8:								\
355 		__put_user_goto_u64(__x, __ptr, label);			\
356 		break;							\
357 	default:							\
358 		__put_user_bad();					\
359 	}								\
360 	instrument_put_user(__x, __ptr, size);				\
361 } while (0)
362 
363 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
364 
365 #ifdef CONFIG_X86_32
366 #define __get_user_asm_u64(x, ptr, label) do {				\
367 	unsigned int __gu_low, __gu_high;				\
368 	const unsigned int __user *__gu_ptr;				\
369 	__gu_ptr = (const void __user *)(ptr);				\
370 	__get_user_asm(__gu_low, __gu_ptr, "l", "=r", label);		\
371 	__get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label);	\
372 	(x) = ((unsigned long long)__gu_high << 32) | __gu_low;		\
373 } while (0)
374 #else
375 #define __get_user_asm_u64(x, ptr, label)				\
376 	__get_user_asm(x, ptr, "q", "=r", label)
377 #endif
378 
379 #define __get_user_size(x, ptr, size, label)				\
380 do {									\
381 	__chk_user_ptr(ptr);						\
382 	switch (size) {							\
383 	case 1:	{							\
384 		unsigned char x_u8__;					\
385 		__get_user_asm(x_u8__, ptr, "b", "=q", label);		\
386 		(x) = x_u8__;						\
387 		break;							\
388 	}								\
389 	case 2:								\
390 		__get_user_asm(x, ptr, "w", "=r", label);		\
391 		break;							\
392 	case 4:								\
393 		__get_user_asm(x, ptr, "l", "=r", label);		\
394 		break;							\
395 	case 8:								\
396 		__get_user_asm_u64(x, ptr, label);			\
397 		break;							\
398 	default:							\
399 		(x) = __get_user_bad();					\
400 	}								\
401 	instrument_get_user(x);						\
402 } while (0)
403 
404 #define __get_user_asm(x, addr, itype, ltype, label)			\
405 	asm_volatile_goto("\n"						\
406 		     "1:	mov"itype" %[umem],%[output]\n"		\
407 		     _ASM_EXTABLE_UA(1b, %l2)				\
408 		     : [output] ltype(x)				\
409 		     : [umem] "m" (__m(addr))				\
410 		     : : label)
411 
412 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
413 
414 #ifdef CONFIG_X86_32
415 #define __get_user_asm_u64(x, ptr, retval)				\
416 ({									\
417 	__typeof__(ptr) __ptr = (ptr);					\
418 	asm volatile("\n"						\
419 		     "1:	movl %[lowbits],%%eax\n"		\
420 		     "2:	movl %[highbits],%%edx\n"		\
421 		     "3:\n"						\
422 		     _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG |	\
423 					   EX_FLAG_CLEAR_AX_DX,		\
424 					   %[errout])			\
425 		     _ASM_EXTABLE_TYPE_REG(2b, 3b, EX_TYPE_EFAULT_REG |	\
426 					   EX_FLAG_CLEAR_AX_DX,		\
427 					   %[errout])			\
428 		     : [errout] "=r" (retval),				\
429 		       [output] "=&A"(x)				\
430 		     : [lowbits] "m" (__m(__ptr)),			\
431 		       [highbits] "m" __m(((u32 __user *)(__ptr)) + 1),	\
432 		       "0" (retval));					\
433 })
434 
435 #else
436 #define __get_user_asm_u64(x, ptr, retval) \
437 	 __get_user_asm(x, ptr, retval, "q")
438 #endif
439 
440 #define __get_user_size(x, ptr, size, retval)				\
441 do {									\
442 	unsigned char x_u8__;						\
443 									\
444 	retval = 0;							\
445 	__chk_user_ptr(ptr);						\
446 	switch (size) {							\
447 	case 1:								\
448 		__get_user_asm(x_u8__, ptr, retval, "b");		\
449 		(x) = x_u8__;						\
450 		break;							\
451 	case 2:								\
452 		__get_user_asm(x, ptr, retval, "w");			\
453 		break;							\
454 	case 4:								\
455 		__get_user_asm(x, ptr, retval, "l");			\
456 		break;							\
457 	case 8:								\
458 		__get_user_asm_u64(x, ptr, retval);			\
459 		break;							\
460 	default:							\
461 		(x) = __get_user_bad();					\
462 	}								\
463 } while (0)
464 
465 #define __get_user_asm(x, addr, err, itype)				\
466 	asm volatile("\n"						\
467 		     "1:	mov"itype" %[umem],%[output]\n"		\
468 		     "2:\n"						\
469 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG | \
470 					   EX_FLAG_CLEAR_AX,		\
471 					   %[errout])			\
472 		     : [errout] "=r" (err),				\
473 		       [output] "=a" (x)				\
474 		     : [umem] "m" (__m(addr)),				\
475 		       "0" (err))
476 
477 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
478 
479 #ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
480 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label)	({ \
481 	bool success;							\
482 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
483 	__typeof__(*(_ptr)) __old = *_old;				\
484 	__typeof__(*(_ptr)) __new = (_new);				\
485 	asm_volatile_goto("\n"						\
486 		     "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
487 		     _ASM_EXTABLE_UA(1b, %l[label])			\
488 		     : CC_OUT(z) (success),				\
489 		       [ptr] "+m" (*_ptr),				\
490 		       [old] "+a" (__old)				\
491 		     : [new] ltype (__new)				\
492 		     : "memory"						\
493 		     : label);						\
494 	if (unlikely(!success))						\
495 		*_old = __old;						\
496 	likely(success);					})
497 
498 #ifdef CONFIG_X86_32
499 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label)	({	\
500 	bool success;							\
501 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
502 	__typeof__(*(_ptr)) __old = *_old;				\
503 	__typeof__(*(_ptr)) __new = (_new);				\
504 	asm_volatile_goto("\n"						\
505 		     "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"		\
506 		     _ASM_EXTABLE_UA(1b, %l[label])			\
507 		     : CC_OUT(z) (success),				\
508 		       "+A" (__old),					\
509 		       [ptr] "+m" (*_ptr)				\
510 		     : "b" ((u32)__new),				\
511 		       "c" ((u32)((u64)__new >> 32))			\
512 		     : "memory"						\
513 		     : label);						\
514 	if (unlikely(!success))						\
515 		*_old = __old;						\
516 	likely(success);					})
517 #endif // CONFIG_X86_32
518 #else  // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
519 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label)	({ \
520 	int __err = 0;							\
521 	bool success;							\
522 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
523 	__typeof__(*(_ptr)) __old = *_old;				\
524 	__typeof__(*(_ptr)) __new = (_new);				\
525 	asm volatile("\n"						\
526 		     "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
527 		     CC_SET(z)						\
528 		     "2:\n"						\
529 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG,	\
530 					   %[errout])			\
531 		     : CC_OUT(z) (success),				\
532 		       [errout] "+r" (__err),				\
533 		       [ptr] "+m" (*_ptr),				\
534 		       [old] "+a" (__old)				\
535 		     : [new] ltype (__new)				\
536 		     : "memory");					\
537 	if (unlikely(__err))						\
538 		goto label;						\
539 	if (unlikely(!success))						\
540 		*_old = __old;						\
541 	likely(success);					})
542 
543 #ifdef CONFIG_X86_32
544 /*
545  * Unlike the normal CMPXCHG, use output GPR for both success/fail and error.
546  * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
547  * hardcoded by CMPXCHG8B, leaving only ESI and EDI.  If the compiler uses
548  * both ESI and EDI for the memory operand, compilation will fail if the error
549  * is an input+output as there will be no register available for input.
550  */
551 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label)	({	\
552 	int __result;							\
553 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
554 	__typeof__(*(_ptr)) __old = *_old;				\
555 	__typeof__(*(_ptr)) __new = (_new);				\
556 	asm volatile("\n"						\
557 		     "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"		\
558 		     "mov $0, %[result]\n\t"				\
559 		     "setz %b[result]\n"				\
560 		     "2:\n"						\
561 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG,	\
562 					   %[result])			\
563 		     : [result] "=q" (__result),			\
564 		       "+A" (__old),					\
565 		       [ptr] "+m" (*_ptr)				\
566 		     : "b" ((u32)__new),				\
567 		       "c" ((u32)((u64)__new >> 32))			\
568 		     : "memory", "cc");					\
569 	if (unlikely(__result < 0))					\
570 		goto label;						\
571 	if (unlikely(!__result))					\
572 		*_old = __old;						\
573 	likely(__result);					})
574 #endif // CONFIG_X86_32
575 #endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
576 
577 /* FIXME: this hack is definitely wrong -AK */
578 struct __large_struct { unsigned long buf[100]; };
579 #define __m(x) (*(struct __large_struct __user *)(x))
580 
581 /*
582  * Tell gcc we read from memory instead of writing: this is because
583  * we do not write to any memory gcc knows about, so there are no
584  * aliasing issues.
585  */
586 #define __put_user_goto(x, addr, itype, ltype, label)			\
587 	asm_volatile_goto("\n"						\
588 		"1:	mov"itype" %0,%1\n"				\
589 		_ASM_EXTABLE_UA(1b, %l2)				\
590 		: : ltype(x), "m" (__m(addr))				\
591 		: : label)
592 
593 extern unsigned long
594 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
595 extern __must_check long
596 strncpy_from_user(char *dst, const char __user *src, long count);
597 
598 extern __must_check long strnlen_user(const char __user *str, long n);
599 
600 #ifdef CONFIG_ARCH_HAS_COPY_MC
601 unsigned long __must_check
602 copy_mc_to_kernel(void *to, const void *from, unsigned len);
603 #define copy_mc_to_kernel copy_mc_to_kernel
604 
605 unsigned long __must_check
606 copy_mc_to_user(void *to, const void *from, unsigned len);
607 #endif
608 
609 /*
610  * movsl can be slow when source and dest are not both 8-byte aligned
611  */
612 #ifdef CONFIG_X86_INTEL_USERCOPY
613 extern struct movsl_mask {
614 	int mask;
615 } ____cacheline_aligned_in_smp movsl_mask;
616 #endif
617 
618 #define ARCH_HAS_NOCACHE_UACCESS 1
619 
620 #ifdef CONFIG_X86_32
621 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
622 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
623 # include <asm/uaccess_32.h>
624 #else
625 # include <asm/uaccess_64.h>
626 #endif
627 
628 /*
629  * The "unsafe" user accesses aren't really "unsafe", but the naming
630  * is a big fat warning: you have to not only do the access_ok()
631  * checking before using them, but you have to surround them with the
632  * user_access_begin/end() pair.
633  */
634 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
635 {
636 	if (unlikely(!access_ok(ptr,len)))
637 		return 0;
638 	__uaccess_begin_nospec();
639 	return 1;
640 }
641 #define user_access_begin(a,b)	user_access_begin(a,b)
642 #define user_access_end()	__uaccess_end()
643 
644 #define user_access_save()	smap_save()
645 #define user_access_restore(x)	smap_restore(x)
646 
647 #define unsafe_put_user(x, ptr, label)	\
648 	__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
649 
650 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
651 #define unsafe_get_user(x, ptr, err_label)					\
652 do {										\
653 	__inttype(*(ptr)) __gu_val;						\
654 	__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label);		\
655 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
656 } while (0)
657 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
658 #define unsafe_get_user(x, ptr, err_label)					\
659 do {										\
660 	int __gu_err;								\
661 	__inttype(*(ptr)) __gu_val;						\
662 	__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err);		\
663 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
664 	if (unlikely(__gu_err)) goto err_label;					\
665 } while (0)
666 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
667 
668 extern void __try_cmpxchg_user_wrong_size(void);
669 
670 #ifndef CONFIG_X86_32
671 #define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label)		\
672 	__try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
673 #endif
674 
675 /*
676  * Force the pointer to u<size> to match the size expected by the asm helper.
677  * clang/LLVM compiles all cases and only discards the unused paths after
678  * processing errors, which breaks i386 if the pointer is an 8-byte value.
679  */
680 #define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({			\
681 	bool __ret;								\
682 	__chk_user_ptr(_ptr);							\
683 	switch (sizeof(*(_ptr))) {						\
684 	case 1:	__ret = __try_cmpxchg_user_asm("b", "q",			\
685 					       (__force u8 *)(_ptr), (_oldp),	\
686 					       (_nval), _label);		\
687 		break;								\
688 	case 2:	__ret = __try_cmpxchg_user_asm("w", "r",			\
689 					       (__force u16 *)(_ptr), (_oldp),	\
690 					       (_nval), _label);		\
691 		break;								\
692 	case 4:	__ret = __try_cmpxchg_user_asm("l", "r",			\
693 					       (__force u32 *)(_ptr), (_oldp),	\
694 					       (_nval), _label);		\
695 		break;								\
696 	case 8:	__ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\
697 						 (_nval), _label);		\
698 		break;								\
699 	default: __try_cmpxchg_user_wrong_size();				\
700 	}									\
701 	__ret;						})
702 
703 /* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
704 #define __try_cmpxchg_user(_ptr, _oldp, _nval, _label)	({		\
705 	int __ret = -EFAULT;						\
706 	__uaccess_begin_nospec();					\
707 	__ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label);	\
708 _label:									\
709 	__uaccess_end();						\
710 	__ret;								\
711 							})
712 
713 /*
714  * We want the unsafe accessors to always be inlined and use
715  * the error labels - thus the macro games.
716  */
717 #define unsafe_copy_loop(dst, src, len, type, label)				\
718 	while (len >= sizeof(type)) {						\
719 		unsafe_put_user(*(type *)(src),(type __user *)(dst),label);	\
720 		dst += sizeof(type);						\
721 		src += sizeof(type);						\
722 		len -= sizeof(type);						\
723 	}
724 
725 #define unsafe_copy_to_user(_dst,_src,_len,label)			\
726 do {									\
727 	char __user *__ucu_dst = (_dst);				\
728 	const char *__ucu_src = (_src);					\
729 	size_t __ucu_len = (_len);					\
730 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label);	\
731 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label);	\
732 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label);	\
733 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label);	\
734 } while (0)
735 
736 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
737 #define __get_kernel_nofault(dst, src, type, err_label)			\
738 	__get_user_size(*((type *)(dst)), (__force type __user *)(src),	\
739 			sizeof(type), err_label)
740 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
741 #define __get_kernel_nofault(dst, src, type, err_label)			\
742 do {									\
743 	int __kr_err;							\
744 									\
745 	__get_user_size(*((type *)(dst)), (__force type __user *)(src),	\
746 			sizeof(type), __kr_err);			\
747 	if (unlikely(__kr_err))						\
748 		goto err_label;						\
749 } while (0)
750 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
751 
752 #define __put_kernel_nofault(dst, src, type, err_label)			\
753 	__put_user_size(*((type *)(src)), (__force type __user *)(dst),	\
754 			sizeof(type), err_label)
755 
756 #endif /* _ASM_X86_UACCESS_H */
757 
758