xref: /openbmc/linux/arch/parisc/include/asm/uaccess.h (revision 232b0b08)
1 #ifndef __PARISC_UACCESS_H
2 #define __PARISC_UACCESS_H
3 
4 /*
5  * User space memory access functions
6  */
7 #include <asm/page.h>
8 #include <asm/cache.h>
9 #include <asm/errno.h>
10 #include <asm-generic/uaccess-unaligned.h>
11 
12 #include <linux/bug.h>
13 #include <linux/string.h>
14 #include <linux/thread_info.h>
15 
16 #define VERIFY_READ 0
17 #define VERIFY_WRITE 1
18 
19 #define KERNEL_DS	((mm_segment_t){0})
20 #define USER_DS 	((mm_segment_t){1})
21 
22 #define segment_eq(a, b) ((a).seg == (b).seg)
23 
24 #define get_ds()	(KERNEL_DS)
25 #define get_fs()	(current_thread_info()->addr_limit)
26 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
27 
28 /*
29  * Note that since kernel addresses are in a separate address space on
30  * parisc, we don't need to do anything for access_ok().
31  * We just let the page fault handler do the right thing. This also means
32  * that put_user is the same as __put_user, etc.
33  */
34 
35 #define access_ok(type, uaddr, size)	\
36 	( (uaddr) == (uaddr) )
37 
38 #define put_user __put_user
39 #define get_user __get_user
40 
41 #if !defined(CONFIG_64BIT)
42 #define LDD_USER(ptr)		__get_user_asm64(ptr)
43 #define STD_USER(x, ptr)	__put_user_asm64(x, ptr)
44 #else
45 #define LDD_USER(ptr)		__get_user_asm("ldd", ptr)
46 #define STD_USER(x, ptr)	__put_user_asm("std", x, ptr)
47 #endif
48 
49 /*
50  * The exception table contains two values: the first is the relative offset to
51  * the address of the instruction that is allowed to fault, and the second is
52  * the relative offset to the address of the fixup routine. Since relative
53  * addresses are used, 32bit values are sufficient even on 64bit kernel.
54  */
55 
56 #define ARCH_HAS_RELATIVE_EXTABLE
57 struct exception_table_entry {
58 	int insn;	/* relative address of insn that is allowed to fault. */
59 	int fixup;	/* relative address of fixup routine */
60 };
61 
62 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
63 	".section __ex_table,\"aw\"\n"			   \
64 	".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
65 	".previous\n"
66 
67 /*
68  * The page fault handler stores, in a per-cpu area, the following information
69  * if a fixup routine is available.
70  */
71 struct exception_data {
72 	unsigned long fault_ip;
73 	unsigned long fault_gp;
74 	unsigned long fault_space;
75 	unsigned long fault_addr;
76 };
77 
78 /*
79  * load_sr2() preloads the space register %%sr2 - based on the value of
80  * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
81  * is 0), or with the current value of %%sr3 to access user space (USER_DS)
82  * memory. The following __get_user_asm() and __put_user_asm() functions have
83  * %%sr2 hard-coded to access the requested memory.
84  */
85 #define load_sr2() \
86 	__asm__(" or,=  %0,%%r0,%%r0\n\t"	\
87 		" mfsp %%sr3,%0\n\t"		\
88 		" mtsp %0,%%sr2\n\t"		\
89 		: : "r"(get_fs()) : )
90 
91 #define __get_user(x, ptr)                               \
92 ({                                                       \
93 	register long __gu_err __asm__ ("r8") = 0;       \
94 	register long __gu_val __asm__ ("r9") = 0;       \
95 							 \
96 	load_sr2();					 \
97 	switch (sizeof(*(ptr))) {			 \
98 	    case 1: __get_user_asm("ldb", ptr); break;   \
99 	    case 2: __get_user_asm("ldh", ptr); break;   \
100 	    case 4: __get_user_asm("ldw", ptr); break;   \
101 	    case 8: LDD_USER(ptr);  break;		 \
102 	    default: BUILD_BUG(); break;		 \
103 	}                                                \
104 							 \
105 	(x) = (__force __typeof__(*(ptr))) __gu_val;	 \
106 	__gu_err;                                        \
107 })
108 
109 #define __get_user_asm(ldx, ptr)                        \
110 	__asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t"	\
111 		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
112 		: "=r"(__gu_val), "=r"(__gu_err)        \
113 		: "r"(ptr), "1"(__gu_err)		\
114 		: "r1");
115 
116 #if !defined(CONFIG_64BIT)
117 
118 #define __get_user_asm64(ptr) 				\
119 	__asm__("\n1:\tldw 0(%%sr2,%2),%0"		\
120 		"\n2:\tldw 4(%%sr2,%2),%R0\n\t"		\
121 		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
122 		ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
123 		: "=r"(__gu_val), "=r"(__gu_err)	\
124 		: "r"(ptr), "1"(__gu_err)		\
125 		: "r1");
126 
127 #endif /* !defined(CONFIG_64BIT) */
128 
129 
130 #define __put_user(x, ptr)                                      \
131 ({								\
132 	register long __pu_err __asm__ ("r8") = 0;      	\
133         __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);	\
134 								\
135 	load_sr2();						\
136 	switch (sizeof(*(ptr))) {				\
137 	    case 1: __put_user_asm("stb", __x, ptr); break;     \
138 	    case 2: __put_user_asm("sth", __x, ptr); break;     \
139 	    case 4: __put_user_asm("stw", __x, ptr); break;     \
140 	    case 8: STD_USER(__x, ptr); break;			\
141 	    default: BUILD_BUG(); break;			\
142 	}                                                       \
143 								\
144 	__pu_err;						\
145 })
146 
147 /*
148  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
149  * instead of writing. This is because they do not write to any memory
150  * gcc knows about, so there are no aliasing issues. These macros must
151  * also be aware that "fixup_put_user_skip_[12]" are executed in the
152  * context of the fault, and any registers used there must be listed
153  * as clobbers. In this case only "r1" is used by the current routines.
154  * r8/r9 are already listed as err/val.
155  */
156 
157 #define __put_user_asm(stx, x, ptr)                         \
158 	__asm__ __volatile__ (                              \
159 		"\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t"	    \
160 		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
161 		: "=r"(__pu_err)                            \
162 		: "r"(ptr), "r"(x), "0"(__pu_err)	    \
163 		: "r1")
164 
165 
166 #if !defined(CONFIG_64BIT)
167 
168 #define __put_user_asm64(__val, ptr) do {	    	    \
169 	__asm__ __volatile__ (				    \
170 		"\n1:\tstw %2,0(%%sr2,%1)"		    \
171 		"\n2:\tstw %R2,4(%%sr2,%1)\n\t"		    \
172 		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
173 		ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
174 		: "=r"(__pu_err)                            \
175 		: "r"(ptr), "r"(__val), "0"(__pu_err) \
176 		: "r1");				    \
177 } while (0)
178 
179 #endif /* !defined(CONFIG_64BIT) */
180 
181 
182 /*
183  * Complex access routines -- external declarations
184  */
185 
186 extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
187 extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
188 extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
189 extern long strncpy_from_user(char *, const char __user *, long);
190 extern unsigned lclear_user(void __user *, unsigned long);
191 extern long lstrnlen_user(const char __user *, long);
192 /*
193  * Complex access routines -- macros
194  */
195 #define user_addr_max() (~0UL)
196 
197 #define strnlen_user lstrnlen_user
198 #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
199 #define clear_user lclear_user
200 #define __clear_user lclear_user
201 
202 unsigned long __must_check __copy_to_user(void __user *dst, const void *src,
203 					  unsigned long len);
204 unsigned long __must_check __copy_from_user(void *dst, const void __user *src,
205 					  unsigned long len);
206 unsigned long copy_in_user(void __user *dst, const void __user *src,
207 			   unsigned long len);
208 #define __copy_in_user copy_in_user
209 #define __copy_to_user_inatomic __copy_to_user
210 #define __copy_from_user_inatomic __copy_from_user
211 
212 extern void __compiletime_error("usercopy buffer size is too small")
213 __bad_copy_user(void);
214 
215 static inline void copy_user_overflow(int size, unsigned long count)
216 {
217 	WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
218 }
219 
220 static __always_inline unsigned long __must_check
221 copy_from_user(void *to, const void __user *from, unsigned long n)
222 {
223 	int sz = __compiletime_object_size(to);
224 	unsigned long ret = n;
225 
226 	if (likely(sz < 0 || sz >= n)) {
227 		check_object_size(to, n, false);
228 		ret = __copy_from_user(to, from, n);
229 	} else if (!__builtin_constant_p(n))
230 		copy_user_overflow(sz, n);
231 	else
232 		__bad_copy_user();
233 
234 	if (unlikely(ret))
235 		memset(to + (n - ret), 0, ret);
236 
237 	return ret;
238 }
239 
240 static __always_inline unsigned long __must_check
241 copy_to_user(void __user *to, const void *from, unsigned long n)
242 {
243 	int sz = __compiletime_object_size(from);
244 
245 	if (likely(sz < 0 || sz >= n)) {
246 		check_object_size(from, n, true);
247 		n = __copy_to_user(to, from, n);
248 	} else if (!__builtin_constant_p(n))
249 		copy_user_overflow(sz, n);
250 	else
251 		__bad_copy_user();
252 
253 	return n;
254 }
255 
256 struct pt_regs;
257 int fixup_exception(struct pt_regs *regs);
258 
259 #endif /* __PARISC_UACCESS_H */
260