xref: /openbmc/linux/arch/parisc/include/asm/uaccess.h (revision 4f139972b489f8bc2c821aa25ac65018d92af3f7)
1 #ifndef __PARISC_UACCESS_H
2 #define __PARISC_UACCESS_H
3 
4 /*
5  * User space memory access functions
6  */
7 #include <asm/page.h>
8 #include <asm/cache.h>
9 #include <asm/errno.h>
10 #include <asm-generic/uaccess-unaligned.h>
11 
12 #include <linux/bug.h>
13 #include <linux/string.h>
14 #include <linux/thread_info.h>
15 
16 #define VERIFY_READ 0
17 #define VERIFY_WRITE 1
18 
19 #define KERNEL_DS	((mm_segment_t){0})
20 #define USER_DS 	((mm_segment_t){1})
21 
22 #define segment_eq(a, b) ((a).seg == (b).seg)
23 
24 #define get_ds()	(KERNEL_DS)
25 #define get_fs()	(current_thread_info()->addr_limit)
26 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
27 
28 /*
29  * Note that since kernel addresses are in a separate address space on
30  * parisc, we don't need to do anything for access_ok().
31  * We just let the page fault handler do the right thing. This also means
32  * that put_user is the same as __put_user, etc.
33  */
34 
35 #define access_ok(type, uaddr, size)	\
36 	( (uaddr) == (uaddr) )
37 
38 #define put_user __put_user
39 #define get_user __get_user
40 
41 #if !defined(CONFIG_64BIT)
42 #define LDD_USER(ptr)		__get_user_asm64(ptr)
43 #define STD_USER(x, ptr)	__put_user_asm64(x, ptr)
44 #else
45 #define LDD_USER(ptr)		__get_user_asm("ldd", ptr)
46 #define STD_USER(x, ptr)	__put_user_asm("std", x, ptr)
47 #endif
48 
49 /*
50  * The exception table contains two values: the first is the relative offset to
51  * the address of the instruction that is allowed to fault, and the second is
52  * the relative offset to the address of the fixup routine. Since relative
53  * addresses are used, 32bit values are sufficient even on 64bit kernel.
54  */
55 
56 #define ARCH_HAS_RELATIVE_EXTABLE
57 struct exception_table_entry {
58 	int insn;	/* relative address of insn that is allowed to fault. */
59 	int fixup;	/* relative address of fixup routine */
60 };
61 
62 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
63 	".section __ex_table,\"aw\"\n"			   \
64 	".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
65 	".previous\n"
66 
67 /*
68  * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
69  * (with lowest bit set) for which the fault handler in fixup_exception() will
70  * load -EFAULT into %r8 for a read or write fault, and zeroes the target
71  * register in case of a read fault in get_user().
72  */
73 #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
74 	ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
75 
76 /*
77  * The page fault handler stores, in a per-cpu area, the following information
78  * if a fixup routine is available.
79  */
80 struct exception_data {
81 	unsigned long fault_ip;
82 	unsigned long fault_gp;
83 	unsigned long fault_space;
84 	unsigned long fault_addr;
85 };
86 
87 /*
88  * load_sr2() preloads the space register %%sr2 - based on the value of
89  * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
90  * is 0), or with the current value of %%sr3 to access user space (USER_DS)
91  * memory. The following __get_user_asm() and __put_user_asm() functions have
92  * %%sr2 hard-coded to access the requested memory.
93  */
94 #define load_sr2() \
95 	__asm__(" or,=  %0,%%r0,%%r0\n\t"	\
96 		" mfsp %%sr3,%0\n\t"		\
97 		" mtsp %0,%%sr2\n\t"		\
98 		: : "r"(get_fs()) : )
99 
100 #define __get_user(x, ptr)                               \
101 ({                                                       \
102 	register long __gu_err __asm__ ("r8") = 0;       \
103 	register long __gu_val;				 \
104 							 \
105 	load_sr2();					 \
106 	switch (sizeof(*(ptr))) {			 \
107 	    case 1: __get_user_asm("ldb", ptr); break;   \
108 	    case 2: __get_user_asm("ldh", ptr); break;   \
109 	    case 4: __get_user_asm("ldw", ptr); break;   \
110 	    case 8: LDD_USER(ptr);  break;		 \
111 	    default: BUILD_BUG(); break;		 \
112 	}                                                \
113 							 \
114 	(x) = (__force __typeof__(*(ptr))) __gu_val;	 \
115 	__gu_err;                                        \
116 })
117 
118 #define __get_user_asm(ldx, ptr)                        \
119 	__asm__("1: " ldx " 0(%%sr2,%2),%0\n"		\
120 		"9:\n"					\
121 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
122 		: "=r"(__gu_val), "=r"(__gu_err)        \
123 		: "r"(ptr), "1"(__gu_err));
124 
125 #if !defined(CONFIG_64BIT)
126 
127 #define __get_user_asm64(ptr) 				\
128 	__asm__("   copy %%r0,%R0\n"			\
129 		"1: ldw 0(%%sr2,%2),%0\n"		\
130 		"2: ldw 4(%%sr2,%2),%R0\n"		\
131 		"9:\n"					\
132 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
133 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	\
134 		: "=r"(__gu_val), "=r"(__gu_err)	\
135 		: "r"(ptr), "1"(__gu_err));
136 
137 #endif /* !defined(CONFIG_64BIT) */
138 
139 
140 #define __put_user(x, ptr)                                      \
141 ({								\
142 	register long __pu_err __asm__ ("r8") = 0;      	\
143         __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);	\
144 								\
145 	load_sr2();						\
146 	switch (sizeof(*(ptr))) {				\
147 	    case 1: __put_user_asm("stb", __x, ptr); break;     \
148 	    case 2: __put_user_asm("sth", __x, ptr); break;     \
149 	    case 4: __put_user_asm("stw", __x, ptr); break;     \
150 	    case 8: STD_USER(__x, ptr); break;			\
151 	    default: BUILD_BUG(); break;			\
152 	}                                                       \
153 								\
154 	__pu_err;						\
155 })
156 
157 /*
158  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
159  * instead of writing. This is because they do not write to any memory
160  * gcc knows about, so there are no aliasing issues. These macros must
161  * also be aware that fixups are executed in the context of the fault,
162  * and any registers used there must be listed as clobbers.
163  * r8 is already listed as err.
164  */
165 
166 #define __put_user_asm(stx, x, ptr)                         \
167 	__asm__ __volatile__ (                              \
168 		"1: " stx " %2,0(%%sr2,%1)\n"		    \
169 		"9:\n"					    \
170 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
171 		: "=r"(__pu_err)                            \
172 		: "r"(ptr), "r"(x), "0"(__pu_err))
173 
174 
175 #if !defined(CONFIG_64BIT)
176 
177 #define __put_user_asm64(__val, ptr) do {	    	    \
178 	__asm__ __volatile__ (				    \
179 		"1: stw %2,0(%%sr2,%1)\n"		    \
180 		"2: stw %R2,4(%%sr2,%1)\n"		    \
181 		"9:\n"					    \
182 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
183 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	    \
184 		: "=r"(__pu_err)                            \
185 		: "r"(ptr), "r"(__val), "0"(__pu_err));	    \
186 } while (0)
187 
188 #endif /* !defined(CONFIG_64BIT) */
189 
190 
191 /*
192  * Complex access routines -- external declarations
193  */
194 
195 extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
196 extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
197 extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
198 extern long strncpy_from_user(char *, const char __user *, long);
199 extern unsigned lclear_user(void __user *, unsigned long);
200 extern long lstrnlen_user(const char __user *, long);
201 /*
202  * Complex access routines -- macros
203  */
204 #define user_addr_max() (~0UL)
205 
206 #define strnlen_user lstrnlen_user
207 #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
208 #define clear_user lclear_user
209 #define __clear_user lclear_user
210 
211 unsigned long __must_check __copy_to_user(void __user *dst, const void *src,
212 					  unsigned long len);
213 unsigned long __must_check __copy_from_user(void *dst, const void __user *src,
214 					  unsigned long len);
215 unsigned long copy_in_user(void __user *dst, const void __user *src,
216 			   unsigned long len);
217 #define __copy_in_user copy_in_user
218 #define __copy_to_user_inatomic __copy_to_user
219 #define __copy_from_user_inatomic __copy_from_user
220 
221 extern void __compiletime_error("usercopy buffer size is too small")
222 __bad_copy_user(void);
223 
224 static inline void copy_user_overflow(int size, unsigned long count)
225 {
226 	WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
227 }
228 
229 static __always_inline unsigned long __must_check
230 copy_from_user(void *to, const void __user *from, unsigned long n)
231 {
232 	int sz = __compiletime_object_size(to);
233 	unsigned long ret = n;
234 
235 	if (likely(sz < 0 || sz >= n)) {
236 		check_object_size(to, n, false);
237 		ret = __copy_from_user(to, from, n);
238 	} else if (!__builtin_constant_p(n))
239 		copy_user_overflow(sz, n);
240 	else
241 		__bad_copy_user();
242 
243 	if (unlikely(ret))
244 		memset(to + (n - ret), 0, ret);
245 
246 	return ret;
247 }
248 
249 static __always_inline unsigned long __must_check
250 copy_to_user(void __user *to, const void *from, unsigned long n)
251 {
252 	int sz = __compiletime_object_size(from);
253 
254 	if (likely(sz < 0 || sz >= n)) {
255 		check_object_size(from, n, true);
256 		n = __copy_to_user(to, from, n);
257 	} else if (!__builtin_constant_p(n))
258 		copy_user_overflow(sz, n);
259 	else
260 		__bad_copy_user();
261 
262 	return n;
263 }
264 
265 struct pt_regs;
266 int fixup_exception(struct pt_regs *regs);
267 
268 #endif /* __PARISC_UACCESS_H */
269