xref: /openbmc/linux/arch/sh/include/asm/uaccess.h (revision 0c6dfa75)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SH_UACCESS_H
3 #define __ASM_SH_UACCESS_H
4 
5 #include <asm/extable.h>
6 #include <asm-generic/access_ok.h>
7 
8 /*
9  * Uh, these should become the main single-value transfer routines ...
10  * They automatically use the right size if we just have the right
11  * pointer type ...
12  *
13  * As SuperH uses the same address space for kernel and user data, we
14  * can just do these as direct assignments.
15  *
16  * Careful to not
17  * (a) re-use the arguments for side effects (sizeof is ok)
18  * (b) require any knowledge of processes at this stage
19  */
20 #define put_user(x,ptr)		__put_user_check((x), (ptr), sizeof(*(ptr)))
21 #define get_user(x,ptr)		__get_user_check((x), (ptr), sizeof(*(ptr)))
22 
23 /*
24  * The "__xxx" versions do not do address space checking, useful when
25  * doing multiple accesses to the same area (the user has to do the
26  * checks by hand with "access_ok()")
27  */
28 #define __put_user(x,ptr)	__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
29 #define __get_user(x,ptr)	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
30 
31 struct __large_struct { unsigned long buf[100]; };
32 #define __m(x) (*(struct __large_struct __user *)(x))
33 
34 #define __get_user_nocheck(x,ptr,size)				\
35 ({								\
36 	long __gu_err;						\
37 	unsigned long __gu_val;					\
38 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
39 	__chk_user_ptr(ptr);					\
40 	__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
41 	(x) = (__force __typeof__(*(ptr)))__gu_val;		\
42 	__gu_err;						\
43 })
44 
45 #define __get_user_check(x,ptr,size)					\
46 ({									\
47 	long __gu_err = -EFAULT;					\
48 	unsigned long __gu_val = 0;					\
49 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);			\
50 	if (likely(access_ok(__gu_addr, (size))))		\
51 		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
52 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
53 	__gu_err;							\
54 })
55 
56 #define __put_user_nocheck(x,ptr,size)				\
57 ({								\
58 	long __pu_err;						\
59 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
60 	__typeof__(*(ptr)) __pu_val = x;			\
61 	__chk_user_ptr(ptr);					\
62 	__put_user_size(__pu_val, __pu_addr, (size), __pu_err);	\
63 	__pu_err;						\
64 })
65 
66 #define __put_user_check(x,ptr,size)				\
67 ({								\
68 	long __pu_err = -EFAULT;				\
69 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
70 	__typeof__(*(ptr)) __pu_val = x;			\
71 	if (likely(access_ok(__pu_addr, size)))	\
72 		__put_user_size(__pu_val, __pu_addr, (size),	\
73 				__pu_err);			\
74 	__pu_err;						\
75 })
76 
77 # include <asm/uaccess_32.h>
78 
79 extern long strncpy_from_user(char *dest, const char __user *src, long count);
80 
81 extern __must_check long strnlen_user(const char __user *str, long n);
82 
83 /* Generic arbitrary sized copy.  */
84 /* Return the number of bytes NOT copied */
85 __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
86 
87 static __always_inline unsigned long
88 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
89 {
90 	return __copy_user(to, (__force void *)from, n);
91 }
92 
93 static __always_inline unsigned long __must_check
94 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
95 {
96 	return __copy_user((__force void *)to, from, n);
97 }
98 #define INLINE_COPY_FROM_USER
99 #define INLINE_COPY_TO_USER
100 
101 /*
102  * Clear the area and return remaining number of bytes
103  * (on failure.  Usually it's 0.)
104  */
105 __kernel_size_t __clear_user(void __user *addr, __kernel_size_t size);
106 
107 #define clear_user(addr,n)						\
108 ({									\
109 	void __user * __cl_addr = (addr);				\
110 	unsigned long __cl_size = (n);					\
111 									\
112 	if (__cl_size && access_ok(__cl_addr, __cl_size))		\
113 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
114 									\
115 	__cl_size;							\
116 })
117 
118 extern void *set_exception_table_vec(unsigned int vec, void *handler);
119 
120 static inline void *set_exception_table_evt(unsigned int evt, void *handler)
121 {
122 	return set_exception_table_vec(evt >> 5, handler);
123 }
124 
125 struct mem_access {
126 	unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt);
127 	unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt);
128 };
129 
130 int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
131 			    struct mem_access *ma, int, unsigned long address);
132 
133 #endif /* __ASM_SH_UACCESS_H */
134