xref: /openbmc/linux/arch/x86/include/asm/uaccess.h (revision 74c228d20a51ddb1354409fdbed7b72427339d7b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_H
3 #define _ASM_X86_UACCESS_H
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/instrumented.h>
9 #include <linux/kasan-checks.h>
10 #include <linux/mm_types.h>
11 #include <linux/string.h>
12 #include <asm/asm.h>
13 #include <asm/page.h>
14 #include <asm/smap.h>
15 #include <asm/extable.h>
16 #include <asm/tlbflush.h>
17 
18 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
19 static inline bool pagefault_disabled(void);
20 # define WARN_ON_IN_IRQ()	\
21 	WARN_ON_ONCE(!in_task() && !pagefault_disabled())
22 #else
23 # define WARN_ON_IN_IRQ()
24 #endif
25 
26 #ifdef CONFIG_ADDRESS_MASKING
27 /*
28  * Mask out tag bits from the address.
29  *
30  * Magic with the 'sign' allows to untag userspace pointer without any branches
31  * while leaving kernel addresses intact.
32  */
33 static inline unsigned long __untagged_addr(unsigned long addr,
34 					    unsigned long mask)
35 {
36 	long sign = addr >> 63;
37 
38 	addr &= mask | sign;
39 	return addr;
40 }
41 
42 #define untagged_addr(addr)	({					\
43 	u64 __addr = (__force u64)(addr);				\
44 	__addr = __untagged_addr(__addr, current_untag_mask());		\
45 	(__force __typeof__(addr))__addr;				\
46 })
47 
48 #define untagged_addr_remote(mm, addr)	({				\
49 	u64 __addr = (__force u64)(addr);				\
50 	mmap_assert_locked(mm);						\
51 	__addr = __untagged_addr(__addr, (mm)->context.untag_mask);	\
52 	(__force __typeof__(addr))__addr;				\
53 })
54 
55 #else
56 #define untagged_addr(addr)	(addr)
57 #endif
58 
59 /**
60  * access_ok - Checks if a user space pointer is valid
61  * @addr: User space pointer to start of block to check
62  * @size: Size of block to check
63  *
64  * Context: User context only. This function may sleep if pagefaults are
65  *          enabled.
66  *
67  * Checks if a pointer to a block of memory in user space is valid.
68  *
69  * Note that, depending on architecture, this function probably just
70  * checks that the pointer is in the user space range - after calling
71  * this function, memory access functions may still return -EFAULT.
72  *
73  * Return: true (nonzero) if the memory block may be valid, false (zero)
74  * if it is definitely invalid.
75  */
76 #define access_ok(addr, size)						\
77 ({									\
78 	WARN_ON_IN_IRQ();						\
79 	likely(__access_ok(untagged_addr(addr), size));			\
80 })
81 
82 #include <asm-generic/access_ok.h>
83 
84 extern int __get_user_1(void);
85 extern int __get_user_2(void);
86 extern int __get_user_4(void);
87 extern int __get_user_8(void);
88 extern int __get_user_nocheck_1(void);
89 extern int __get_user_nocheck_2(void);
90 extern int __get_user_nocheck_4(void);
91 extern int __get_user_nocheck_8(void);
92 extern int __get_user_bad(void);
93 
94 #define __uaccess_begin() stac()
95 #define __uaccess_end()   clac()
96 #define __uaccess_begin_nospec()	\
97 ({					\
98 	stac();				\
99 	barrier_nospec();		\
100 })
101 
102 /*
103  * This is the smallest unsigned integer type that can fit a value
104  * (up to 'long long')
105  */
106 #define __inttype(x) __typeof__(		\
107 	__typefits(x,char,			\
108 	  __typefits(x,short,			\
109 	    __typefits(x,int,			\
110 	      __typefits(x,long,0ULL)))))
111 
112 #define __typefits(x,type,not) \
113 	__builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
114 
115 /*
116  * This is used for both get_user() and __get_user() to expand to
117  * the proper special function call that has odd calling conventions
118  * due to returning both a value and an error, and that depends on
119  * the size of the pointer passed in.
120  *
121  * Careful: we have to cast the result to the type of the pointer
122  * for sign reasons.
123  *
124  * The use of _ASM_DX as the register specifier is a bit of a
125  * simplification, as gcc only cares about it as the starting point
126  * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
127  * (%ecx being the next register in gcc's x86 register sequence), and
128  * %rdx on 64 bits.
129  *
130  * Clang/LLVM cares about the size of the register, but still wants
131  * the base register for something that ends up being a pair.
132  */
133 #define do_get_user_call(fn,x,ptr)					\
134 ({									\
135 	int __ret_gu;							\
136 	register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);		\
137 	__chk_user_ptr(ptr);						\
138 	asm volatile("call __" #fn "_%P4"				\
139 		     : "=a" (__ret_gu), "=r" (__val_gu),		\
140 			ASM_CALL_CONSTRAINT				\
141 		     : "0" (ptr), "i" (sizeof(*(ptr))));		\
142 	instrument_get_user(__val_gu);					\
143 	(x) = (__force __typeof__(*(ptr))) __val_gu;			\
144 	__builtin_expect(__ret_gu, 0);					\
145 })
146 
147 /**
148  * get_user - Get a simple variable from user space.
149  * @x:   Variable to store result.
150  * @ptr: Source address, in user space.
151  *
152  * Context: User context only. This function may sleep if pagefaults are
153  *          enabled.
154  *
155  * This macro copies a single simple variable from user space to kernel
156  * space.  It supports simple types like char and int, but not larger
157  * data types like structures or arrays.
158  *
159  * @ptr must have pointer-to-simple-variable type, and the result of
160  * dereferencing @ptr must be assignable to @x without a cast.
161  *
162  * Return: zero on success, or -EFAULT on error.
163  * On error, the variable @x is set to zero.
164  */
165 #define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
166 
167 /**
168  * __get_user - Get a simple variable from user space, with less checking.
169  * @x:   Variable to store result.
170  * @ptr: Source address, in user space.
171  *
172  * Context: User context only. This function may sleep if pagefaults are
173  *          enabled.
174  *
175  * This macro copies a single simple variable from user space to kernel
176  * space.  It supports simple types like char and int, but not larger
177  * data types like structures or arrays.
178  *
179  * @ptr must have pointer-to-simple-variable type, and the result of
180  * dereferencing @ptr must be assignable to @x without a cast.
181  *
182  * Caller must check the pointer with access_ok() before calling this
183  * function.
184  *
185  * Return: zero on success, or -EFAULT on error.
186  * On error, the variable @x is set to zero.
187  */
188 #define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
189 
190 
191 #ifdef CONFIG_X86_32
192 #define __put_user_goto_u64(x, addr, label)			\
193 	asm_volatile_goto("\n"					\
194 		     "1:	movl %%eax,0(%1)\n"		\
195 		     "2:	movl %%edx,4(%1)\n"		\
196 		     _ASM_EXTABLE_UA(1b, %l2)			\
197 		     _ASM_EXTABLE_UA(2b, %l2)			\
198 		     : : "A" (x), "r" (addr)			\
199 		     : : label)
200 
201 #else
202 #define __put_user_goto_u64(x, ptr, label) \
203 	__put_user_goto(x, ptr, "q", "er", label)
204 #endif
205 
206 extern void __put_user_bad(void);
207 
208 /*
209  * Strange magic calling convention: pointer in %ecx,
210  * value in %eax(:%edx), return value in %ecx. clobbers %rbx
211  */
212 extern void __put_user_1(void);
213 extern void __put_user_2(void);
214 extern void __put_user_4(void);
215 extern void __put_user_8(void);
216 extern void __put_user_nocheck_1(void);
217 extern void __put_user_nocheck_2(void);
218 extern void __put_user_nocheck_4(void);
219 extern void __put_user_nocheck_8(void);
220 
221 /*
222  * ptr must be evaluated and assigned to the temporary __ptr_pu before
223  * the assignment of x to __val_pu, to avoid any function calls
224  * involved in the ptr expression (possibly implicitly generated due
225  * to KASAN) from clobbering %ax.
226  */
227 #define do_put_user_call(fn,x,ptr)					\
228 ({									\
229 	int __ret_pu;							\
230 	void __user *__ptr_pu;						\
231 	register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX);		\
232 	__typeof__(*(ptr)) __x = (x); /* eval x once */			\
233 	__typeof__(ptr) __ptr = (ptr); /* eval ptr once */		\
234 	__chk_user_ptr(__ptr);						\
235 	__ptr_pu = __ptr;						\
236 	__val_pu = __x;							\
237 	asm volatile("call __" #fn "_%P[size]"				\
238 		     : "=c" (__ret_pu),					\
239 			ASM_CALL_CONSTRAINT				\
240 		     : "0" (__ptr_pu),					\
241 		       "r" (__val_pu),					\
242 		       [size] "i" (sizeof(*(ptr)))			\
243 		     :"ebx");						\
244 	instrument_put_user(__x, __ptr, sizeof(*(ptr)));		\
245 	__builtin_expect(__ret_pu, 0);					\
246 })
247 
248 /**
249  * put_user - Write a simple value into user space.
250  * @x:   Value to copy to user space.
251  * @ptr: Destination address, in user space.
252  *
253  * Context: User context only. This function may sleep if pagefaults are
254  *          enabled.
255  *
256  * This macro copies a single simple value from kernel space to user
257  * space.  It supports simple types like char and int, but not larger
258  * data types like structures or arrays.
259  *
260  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
261  * to the result of dereferencing @ptr.
262  *
263  * Return: zero on success, or -EFAULT on error.
264  */
265 #define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
266 
267 /**
268  * __put_user - Write a simple value into user space, with less checking.
269  * @x:   Value to copy to user space.
270  * @ptr: Destination address, in user space.
271  *
272  * Context: User context only. This function may sleep if pagefaults are
273  *          enabled.
274  *
275  * This macro copies a single simple value from kernel space to user
276  * space.  It supports simple types like char and int, but not larger
277  * data types like structures or arrays.
278  *
279  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
280  * to the result of dereferencing @ptr.
281  *
282  * Caller must check the pointer with access_ok() before calling this
283  * function.
284  *
285  * Return: zero on success, or -EFAULT on error.
286  */
287 #define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
288 
289 #define __put_user_size(x, ptr, size, label)				\
290 do {									\
291 	__typeof__(*(ptr)) __x = (x); /* eval x once */			\
292 	__typeof__(ptr) __ptr = (ptr); /* eval ptr once */		\
293 	__chk_user_ptr(__ptr);						\
294 	switch (size) {							\
295 	case 1:								\
296 		__put_user_goto(__x, __ptr, "b", "iq", label);		\
297 		break;							\
298 	case 2:								\
299 		__put_user_goto(__x, __ptr, "w", "ir", label);		\
300 		break;							\
301 	case 4:								\
302 		__put_user_goto(__x, __ptr, "l", "ir", label);		\
303 		break;							\
304 	case 8:								\
305 		__put_user_goto_u64(__x, __ptr, label);			\
306 		break;							\
307 	default:							\
308 		__put_user_bad();					\
309 	}								\
310 	instrument_put_user(__x, __ptr, size);				\
311 } while (0)
312 
313 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
314 
315 #ifdef CONFIG_X86_32
316 #define __get_user_asm_u64(x, ptr, label) do {				\
317 	unsigned int __gu_low, __gu_high;				\
318 	const unsigned int __user *__gu_ptr;				\
319 	__gu_ptr = (const void __user *)(ptr);				\
320 	__get_user_asm(__gu_low, __gu_ptr, "l", "=r", label);		\
321 	__get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label);	\
322 	(x) = ((unsigned long long)__gu_high << 32) | __gu_low;		\
323 } while (0)
324 #else
325 #define __get_user_asm_u64(x, ptr, label)				\
326 	__get_user_asm(x, ptr, "q", "=r", label)
327 #endif
328 
329 #define __get_user_size(x, ptr, size, label)				\
330 do {									\
331 	__chk_user_ptr(ptr);						\
332 	switch (size) {							\
333 	case 1:	{							\
334 		unsigned char x_u8__;					\
335 		__get_user_asm(x_u8__, ptr, "b", "=q", label);		\
336 		(x) = x_u8__;						\
337 		break;							\
338 	}								\
339 	case 2:								\
340 		__get_user_asm(x, ptr, "w", "=r", label);		\
341 		break;							\
342 	case 4:								\
343 		__get_user_asm(x, ptr, "l", "=r", label);		\
344 		break;							\
345 	case 8:								\
346 		__get_user_asm_u64(x, ptr, label);			\
347 		break;							\
348 	default:							\
349 		(x) = __get_user_bad();					\
350 	}								\
351 	instrument_get_user(x);						\
352 } while (0)
353 
354 #define __get_user_asm(x, addr, itype, ltype, label)			\
355 	asm_volatile_goto("\n"						\
356 		     "1:	mov"itype" %[umem],%[output]\n"		\
357 		     _ASM_EXTABLE_UA(1b, %l2)				\
358 		     : [output] ltype(x)				\
359 		     : [umem] "m" (__m(addr))				\
360 		     : : label)
361 
362 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
363 
364 #ifdef CONFIG_X86_32
365 #define __get_user_asm_u64(x, ptr, retval)				\
366 ({									\
367 	__typeof__(ptr) __ptr = (ptr);					\
368 	asm volatile("\n"						\
369 		     "1:	movl %[lowbits],%%eax\n"		\
370 		     "2:	movl %[highbits],%%edx\n"		\
371 		     "3:\n"						\
372 		     _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG |	\
373 					   EX_FLAG_CLEAR_AX_DX,		\
374 					   %[errout])			\
375 		     _ASM_EXTABLE_TYPE_REG(2b, 3b, EX_TYPE_EFAULT_REG |	\
376 					   EX_FLAG_CLEAR_AX_DX,		\
377 					   %[errout])			\
378 		     : [errout] "=r" (retval),				\
379 		       [output] "=&A"(x)				\
380 		     : [lowbits] "m" (__m(__ptr)),			\
381 		       [highbits] "m" __m(((u32 __user *)(__ptr)) + 1),	\
382 		       "0" (retval));					\
383 })
384 
385 #else
386 #define __get_user_asm_u64(x, ptr, retval) \
387 	 __get_user_asm(x, ptr, retval, "q")
388 #endif
389 
390 #define __get_user_size(x, ptr, size, retval)				\
391 do {									\
392 	unsigned char x_u8__;						\
393 									\
394 	retval = 0;							\
395 	__chk_user_ptr(ptr);						\
396 	switch (size) {							\
397 	case 1:								\
398 		__get_user_asm(x_u8__, ptr, retval, "b");		\
399 		(x) = x_u8__;						\
400 		break;							\
401 	case 2:								\
402 		__get_user_asm(x, ptr, retval, "w");			\
403 		break;							\
404 	case 4:								\
405 		__get_user_asm(x, ptr, retval, "l");			\
406 		break;							\
407 	case 8:								\
408 		__get_user_asm_u64(x, ptr, retval);			\
409 		break;							\
410 	default:							\
411 		(x) = __get_user_bad();					\
412 	}								\
413 } while (0)
414 
415 #define __get_user_asm(x, addr, err, itype)				\
416 	asm volatile("\n"						\
417 		     "1:	mov"itype" %[umem],%[output]\n"		\
418 		     "2:\n"						\
419 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG | \
420 					   EX_FLAG_CLEAR_AX,		\
421 					   %[errout])			\
422 		     : [errout] "=r" (err),				\
423 		       [output] "=a" (x)				\
424 		     : [umem] "m" (__m(addr)),				\
425 		       "0" (err))
426 
427 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
428 
429 #ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
430 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label)	({ \
431 	bool success;							\
432 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
433 	__typeof__(*(_ptr)) __old = *_old;				\
434 	__typeof__(*(_ptr)) __new = (_new);				\
435 	asm_volatile_goto("\n"						\
436 		     "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
437 		     _ASM_EXTABLE_UA(1b, %l[label])			\
438 		     : CC_OUT(z) (success),				\
439 		       [ptr] "+m" (*_ptr),				\
440 		       [old] "+a" (__old)				\
441 		     : [new] ltype (__new)				\
442 		     : "memory"						\
443 		     : label);						\
444 	if (unlikely(!success))						\
445 		*_old = __old;						\
446 	likely(success);					})
447 
448 #ifdef CONFIG_X86_32
449 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label)	({	\
450 	bool success;							\
451 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
452 	__typeof__(*(_ptr)) __old = *_old;				\
453 	__typeof__(*(_ptr)) __new = (_new);				\
454 	asm_volatile_goto("\n"						\
455 		     "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"		\
456 		     _ASM_EXTABLE_UA(1b, %l[label])			\
457 		     : CC_OUT(z) (success),				\
458 		       "+A" (__old),					\
459 		       [ptr] "+m" (*_ptr)				\
460 		     : "b" ((u32)__new),				\
461 		       "c" ((u32)((u64)__new >> 32))			\
462 		     : "memory"						\
463 		     : label);						\
464 	if (unlikely(!success))						\
465 		*_old = __old;						\
466 	likely(success);					})
467 #endif // CONFIG_X86_32
468 #else  // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
469 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label)	({ \
470 	int __err = 0;							\
471 	bool success;							\
472 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
473 	__typeof__(*(_ptr)) __old = *_old;				\
474 	__typeof__(*(_ptr)) __new = (_new);				\
475 	asm volatile("\n"						\
476 		     "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
477 		     CC_SET(z)						\
478 		     "2:\n"						\
479 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG,	\
480 					   %[errout])			\
481 		     : CC_OUT(z) (success),				\
482 		       [errout] "+r" (__err),				\
483 		       [ptr] "+m" (*_ptr),				\
484 		       [old] "+a" (__old)				\
485 		     : [new] ltype (__new)				\
486 		     : "memory");					\
487 	if (unlikely(__err))						\
488 		goto label;						\
489 	if (unlikely(!success))						\
490 		*_old = __old;						\
491 	likely(success);					})
492 
493 #ifdef CONFIG_X86_32
494 /*
495  * Unlike the normal CMPXCHG, use output GPR for both success/fail and error.
496  * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
497  * hardcoded by CMPXCHG8B, leaving only ESI and EDI.  If the compiler uses
498  * both ESI and EDI for the memory operand, compilation will fail if the error
499  * is an input+output as there will be no register available for input.
500  */
501 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label)	({	\
502 	int __result;							\
503 	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
504 	__typeof__(*(_ptr)) __old = *_old;				\
505 	__typeof__(*(_ptr)) __new = (_new);				\
506 	asm volatile("\n"						\
507 		     "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"		\
508 		     "mov $0, %[result]\n\t"				\
509 		     "setz %b[result]\n"				\
510 		     "2:\n"						\
511 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG,	\
512 					   %[result])			\
513 		     : [result] "=q" (__result),			\
514 		       "+A" (__old),					\
515 		       [ptr] "+m" (*_ptr)				\
516 		     : "b" ((u32)__new),				\
517 		       "c" ((u32)((u64)__new >> 32))			\
518 		     : "memory", "cc");					\
519 	if (unlikely(__result < 0))					\
520 		goto label;						\
521 	if (unlikely(!__result))					\
522 		*_old = __old;						\
523 	likely(__result);					})
524 #endif // CONFIG_X86_32
525 #endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
526 
527 /* FIXME: this hack is definitely wrong -AK */
528 struct __large_struct { unsigned long buf[100]; };
529 #define __m(x) (*(struct __large_struct __user *)(x))
530 
531 /*
532  * Tell gcc we read from memory instead of writing: this is because
533  * we do not write to any memory gcc knows about, so there are no
534  * aliasing issues.
535  */
536 #define __put_user_goto(x, addr, itype, ltype, label)			\
537 	asm_volatile_goto("\n"						\
538 		"1:	mov"itype" %0,%1\n"				\
539 		_ASM_EXTABLE_UA(1b, %l2)				\
540 		: : ltype(x), "m" (__m(addr))				\
541 		: : label)
542 
543 extern unsigned long
544 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
545 extern __must_check long
546 strncpy_from_user(char *dst, const char __user *src, long count);
547 
548 extern __must_check long strnlen_user(const char __user *str, long n);
549 
550 #ifdef CONFIG_ARCH_HAS_COPY_MC
551 unsigned long __must_check
552 copy_mc_to_kernel(void *to, const void *from, unsigned len);
553 #define copy_mc_to_kernel copy_mc_to_kernel
554 
555 unsigned long __must_check
556 copy_mc_to_user(void *to, const void *from, unsigned len);
557 #endif
558 
559 /*
560  * movsl can be slow when source and dest are not both 8-byte aligned
561  */
562 #ifdef CONFIG_X86_INTEL_USERCOPY
563 extern struct movsl_mask {
564 	int mask;
565 } ____cacheline_aligned_in_smp movsl_mask;
566 #endif
567 
568 #define ARCH_HAS_NOCACHE_UACCESS 1
569 
570 #ifdef CONFIG_X86_32
571 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
572 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
573 # include <asm/uaccess_32.h>
574 #else
575 # include <asm/uaccess_64.h>
576 #endif
577 
578 /*
579  * The "unsafe" user accesses aren't really "unsafe", but the naming
580  * is a big fat warning: you have to not only do the access_ok()
581  * checking before using them, but you have to surround them with the
582  * user_access_begin/end() pair.
583  */
584 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
585 {
586 	if (unlikely(!access_ok(ptr,len)))
587 		return 0;
588 	__uaccess_begin_nospec();
589 	return 1;
590 }
591 #define user_access_begin(a,b)	user_access_begin(a,b)
592 #define user_access_end()	__uaccess_end()
593 
594 #define user_access_save()	smap_save()
595 #define user_access_restore(x)	smap_restore(x)
596 
597 #define unsafe_put_user(x, ptr, label)	\
598 	__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
599 
600 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
601 #define unsafe_get_user(x, ptr, err_label)					\
602 do {										\
603 	__inttype(*(ptr)) __gu_val;						\
604 	__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label);		\
605 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
606 } while (0)
607 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
608 #define unsafe_get_user(x, ptr, err_label)					\
609 do {										\
610 	int __gu_err;								\
611 	__inttype(*(ptr)) __gu_val;						\
612 	__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err);		\
613 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
614 	if (unlikely(__gu_err)) goto err_label;					\
615 } while (0)
616 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
617 
618 extern void __try_cmpxchg_user_wrong_size(void);
619 
620 #ifndef CONFIG_X86_32
621 #define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label)		\
622 	__try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
623 #endif
624 
625 /*
626  * Force the pointer to u<size> to match the size expected by the asm helper.
627  * clang/LLVM compiles all cases and only discards the unused paths after
628  * processing errors, which breaks i386 if the pointer is an 8-byte value.
629  */
630 #define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({			\
631 	bool __ret;								\
632 	__chk_user_ptr(_ptr);							\
633 	switch (sizeof(*(_ptr))) {						\
634 	case 1:	__ret = __try_cmpxchg_user_asm("b", "q",			\
635 					       (__force u8 *)(_ptr), (_oldp),	\
636 					       (_nval), _label);		\
637 		break;								\
638 	case 2:	__ret = __try_cmpxchg_user_asm("w", "r",			\
639 					       (__force u16 *)(_ptr), (_oldp),	\
640 					       (_nval), _label);		\
641 		break;								\
642 	case 4:	__ret = __try_cmpxchg_user_asm("l", "r",			\
643 					       (__force u32 *)(_ptr), (_oldp),	\
644 					       (_nval), _label);		\
645 		break;								\
646 	case 8:	__ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\
647 						 (_nval), _label);		\
648 		break;								\
649 	default: __try_cmpxchg_user_wrong_size();				\
650 	}									\
651 	__ret;						})
652 
653 /* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
654 #define __try_cmpxchg_user(_ptr, _oldp, _nval, _label)	({		\
655 	int __ret = -EFAULT;						\
656 	__uaccess_begin_nospec();					\
657 	__ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label);	\
658 _label:									\
659 	__uaccess_end();						\
660 	__ret;								\
661 							})
662 
663 /*
664  * We want the unsafe accessors to always be inlined and use
665  * the error labels - thus the macro games.
666  */
667 #define unsafe_copy_loop(dst, src, len, type, label)				\
668 	while (len >= sizeof(type)) {						\
669 		unsafe_put_user(*(type *)(src),(type __user *)(dst),label);	\
670 		dst += sizeof(type);						\
671 		src += sizeof(type);						\
672 		len -= sizeof(type);						\
673 	}
674 
675 #define unsafe_copy_to_user(_dst,_src,_len,label)			\
676 do {									\
677 	char __user *__ucu_dst = (_dst);				\
678 	const char *__ucu_src = (_src);					\
679 	size_t __ucu_len = (_len);					\
680 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label);	\
681 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label);	\
682 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label);	\
683 	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label);	\
684 } while (0)
685 
686 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
687 #define __get_kernel_nofault(dst, src, type, err_label)			\
688 	__get_user_size(*((type *)(dst)), (__force type __user *)(src),	\
689 			sizeof(type), err_label)
690 #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
691 #define __get_kernel_nofault(dst, src, type, err_label)			\
692 do {									\
693 	int __kr_err;							\
694 									\
695 	__get_user_size(*((type *)(dst)), (__force type __user *)(src),	\
696 			sizeof(type), __kr_err);			\
697 	if (unlikely(__kr_err))						\
698 		goto err_label;						\
699 } while (0)
700 #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
701 
702 #define __put_kernel_nofault(dst, src, type, err_label)			\
703 	__put_user_size(*((type *)(src)), (__force type __user *)(dst),	\
704 			sizeof(type), err_label)
705 
706 #endif /* _ASM_X86_UACCESS_H */
707 
708