xref: /openbmc/linux/arch/alpha/include/asm/uaccess.h (revision 612a462a)
1 #ifndef __ALPHA_UACCESS_H
2 #define __ALPHA_UACCESS_H
3 
4 /*
5  * The fs value determines whether argument validity checking should be
6  * performed or not.  If get_fs() == USER_DS, checking is performed, with
7  * get_fs() == KERNEL_DS, checking is bypassed.
8  *
9  * Or at least it did once upon a time.  Nowadays it is a mask that
10  * defines which bits of the address space are off limits.  This is a
11  * wee bit faster than the above.
12  *
13  * For historical reasons, these macros are grossly misnamed.
14  */
15 
16 #define KERNEL_DS	((mm_segment_t) { 0UL })
17 #define USER_DS		((mm_segment_t) { -0x40000000000UL })
18 
19 #define get_fs()  (current_thread_info()->addr_limit)
20 #define get_ds()  (KERNEL_DS)
21 #define set_fs(x) (current_thread_info()->addr_limit = (x))
22 
23 #define segment_eq(a, b)	((a).seg == (b).seg)
24 
25 /*
26  * Is a address valid? This does a straightforward calculation rather
27  * than tests.
28  *
29  * Address valid if:
30  *  - "addr" doesn't have any high-bits set
31  *  - AND "size" doesn't have any high-bits set
32  *  - AND "addr+size" doesn't have any high-bits set
33  *  - OR we are in kernel mode.
34  */
35 #define __access_ok(addr, size) \
36 	((get_fs().seg & (addr | size | (addr+size))) == 0)
37 
38 #define access_ok(type, addr, size)			\
39 ({							\
40 	__chk_user_ptr(addr);				\
41 	__access_ok(((unsigned long)(addr)), (size));	\
42 })
43 
44 /*
45  * These are the main single-value transfer routines.  They automatically
46  * use the right size if we just have the right pointer type.
47  *
48  * As the alpha uses the same address space for kernel and user
49  * data, we can just do these as direct assignments.  (Of course, the
50  * exception handling means that it's no longer "just"...)
51  *
52  * Careful to not
53  * (a) re-use the arguments for side effects (sizeof/typeof is ok)
54  * (b) require any knowledge of processes at this stage
55  */
56 #define put_user(x, ptr) \
57   __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
58 #define get_user(x, ptr) \
59   __get_user_check((x), (ptr), sizeof(*(ptr)))
60 
61 /*
62  * The "__xxx" versions do not do address space checking, useful when
63  * doing multiple accesses to the same area (the programmer has to do the
64  * checks by hand with "access_ok()")
65  */
66 #define __put_user(x, ptr) \
67   __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
68 #define __get_user(x, ptr) \
69   __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
70 
71 /*
72  * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
73  * encode the bits we need for resolving the exception.  See the
74  * more extensive comments with fixup_inline_exception below for
75  * more information.
76  */
77 #define EXC(label,cont,res,err)				\
78 	".section __ex_table,\"a\"\n"			\
79 	"	.long "#label"-.\n"			\
80 	"	lda "#res","#cont"-"#label"("#err")\n"	\
81 	".previous\n"
82 
83 extern void __get_user_unknown(void);
84 
85 #define __get_user_nocheck(x, ptr, size)			\
86 ({								\
87 	long __gu_err = 0;					\
88 	unsigned long __gu_val;					\
89 	__chk_user_ptr(ptr);					\
90 	switch (size) {						\
91 	  case 1: __get_user_8(ptr); break;			\
92 	  case 2: __get_user_16(ptr); break;			\
93 	  case 4: __get_user_32(ptr); break;			\
94 	  case 8: __get_user_64(ptr); break;			\
95 	  default: __get_user_unknown(); break;			\
96 	}							\
97 	(x) = (__force __typeof__(*(ptr))) __gu_val;		\
98 	__gu_err;						\
99 })
100 
101 #define __get_user_check(x, ptr, size)				\
102 ({								\
103 	long __gu_err = -EFAULT;				\
104 	unsigned long __gu_val = 0;				\
105 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
106 	if (__access_ok((unsigned long)__gu_addr, size)) {	\
107 		__gu_err = 0;					\
108 		switch (size) {					\
109 		  case 1: __get_user_8(__gu_addr); break;	\
110 		  case 2: __get_user_16(__gu_addr); break;	\
111 		  case 4: __get_user_32(__gu_addr); break;	\
112 		  case 8: __get_user_64(__gu_addr); break;	\
113 		  default: __get_user_unknown(); break;		\
114 		}						\
115 	}							\
116 	(x) = (__force __typeof__(*(ptr))) __gu_val;		\
117 	__gu_err;						\
118 })
119 
120 struct __large_struct { unsigned long buf[100]; };
121 #define __m(x) (*(struct __large_struct __user *)(x))
122 
123 #define __get_user_64(addr)				\
124 	__asm__("1: ldq %0,%2\n"			\
125 	"2:\n"						\
126 	EXC(1b,2b,%0,%1)				\
127 		: "=r"(__gu_val), "=r"(__gu_err)	\
128 		: "m"(__m(addr)), "1"(__gu_err))
129 
130 #define __get_user_32(addr)				\
131 	__asm__("1: ldl %0,%2\n"			\
132 	"2:\n"						\
133 	EXC(1b,2b,%0,%1)				\
134 		: "=r"(__gu_val), "=r"(__gu_err)	\
135 		: "m"(__m(addr)), "1"(__gu_err))
136 
137 #ifdef __alpha_bwx__
138 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
139 
140 #define __get_user_16(addr)				\
141 	__asm__("1: ldwu %0,%2\n"			\
142 	"2:\n"						\
143 	EXC(1b,2b,%0,%1)				\
144 		: "=r"(__gu_val), "=r"(__gu_err)	\
145 		: "m"(__m(addr)), "1"(__gu_err))
146 
147 #define __get_user_8(addr)				\
148 	__asm__("1: ldbu %0,%2\n"			\
149 	"2:\n"						\
150 	EXC(1b,2b,%0,%1)				\
151 		: "=r"(__gu_val), "=r"(__gu_err)	\
152 		: "m"(__m(addr)), "1"(__gu_err))
153 #else
154 /* Unfortunately, we can't get an unaligned access trap for the sub-word
155    load, so we have to do a general unaligned operation.  */
156 
157 #define __get_user_16(addr)						\
158 {									\
159 	long __gu_tmp;							\
160 	__asm__("1: ldq_u %0,0(%3)\n"					\
161 	"2:	ldq_u %1,1(%3)\n"					\
162 	"	extwl %0,%3,%0\n"					\
163 	"	extwh %1,%3,%1\n"					\
164 	"	or %0,%1,%0\n"						\
165 	"3:\n"								\
166 	EXC(1b,3b,%0,%2)						\
167 	EXC(2b,3b,%0,%2)						\
168 		: "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err)	\
169 		: "r"(addr), "2"(__gu_err));				\
170 }
171 
172 #define __get_user_8(addr)						\
173 	__asm__("1: ldq_u %0,0(%2)\n"					\
174 	"	extbl %0,%2,%0\n"					\
175 	"2:\n"								\
176 	EXC(1b,2b,%0,%1)						\
177 		: "=&r"(__gu_val), "=r"(__gu_err)			\
178 		: "r"(addr), "1"(__gu_err))
179 #endif
180 
181 extern void __put_user_unknown(void);
182 
183 #define __put_user_nocheck(x, ptr, size)			\
184 ({								\
185 	long __pu_err = 0;					\
186 	__chk_user_ptr(ptr);					\
187 	switch (size) {						\
188 	  case 1: __put_user_8(x, ptr); break;			\
189 	  case 2: __put_user_16(x, ptr); break;			\
190 	  case 4: __put_user_32(x, ptr); break;			\
191 	  case 8: __put_user_64(x, ptr); break;			\
192 	  default: __put_user_unknown(); break;			\
193 	}							\
194 	__pu_err;						\
195 })
196 
197 #define __put_user_check(x, ptr, size)				\
198 ({								\
199 	long __pu_err = -EFAULT;				\
200 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
201 	if (__access_ok((unsigned long)__pu_addr, size)) {	\
202 		__pu_err = 0;					\
203 		switch (size) {					\
204 		  case 1: __put_user_8(x, __pu_addr); break;	\
205 		  case 2: __put_user_16(x, __pu_addr); break;	\
206 		  case 4: __put_user_32(x, __pu_addr); break;	\
207 		  case 8: __put_user_64(x, __pu_addr); break;	\
208 		  default: __put_user_unknown(); break;		\
209 		}						\
210 	}							\
211 	__pu_err;						\
212 })
213 
214 /*
215  * The "__put_user_xx()" macros tell gcc they read from memory
216  * instead of writing: this is because they do not write to
217  * any memory gcc knows about, so there are no aliasing issues
218  */
219 #define __put_user_64(x, addr)					\
220 __asm__ __volatile__("1: stq %r2,%1\n"				\
221 	"2:\n"							\
222 	EXC(1b,2b,$31,%0)					\
223 		: "=r"(__pu_err)				\
224 		: "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
225 
226 #define __put_user_32(x, addr)					\
227 __asm__ __volatile__("1: stl %r2,%1\n"				\
228 	"2:\n"							\
229 	EXC(1b,2b,$31,%0)					\
230 		: "=r"(__pu_err)				\
231 		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
232 
233 #ifdef __alpha_bwx__
234 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
235 
236 #define __put_user_16(x, addr)					\
237 __asm__ __volatile__("1: stw %r2,%1\n"				\
238 	"2:\n"							\
239 	EXC(1b,2b,$31,%0)					\
240 		: "=r"(__pu_err)				\
241 		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
242 
243 #define __put_user_8(x, addr)					\
244 __asm__ __volatile__("1: stb %r2,%1\n"				\
245 	"2:\n"							\
246 	EXC(1b,2b,$31,%0)					\
247 		: "=r"(__pu_err)				\
248 		: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
249 #else
250 /* Unfortunately, we can't get an unaligned access trap for the sub-word
251    write, so we have to do a general unaligned operation.  */
252 
253 #define __put_user_16(x, addr)					\
254 {								\
255 	long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4;	\
256 	__asm__ __volatile__(					\
257 	"1:	ldq_u %2,1(%5)\n"				\
258 	"2:	ldq_u %1,0(%5)\n"				\
259 	"	inswh %6,%5,%4\n"				\
260 	"	inswl %6,%5,%3\n"				\
261 	"	mskwh %2,%5,%2\n"				\
262 	"	mskwl %1,%5,%1\n"				\
263 	"	or %2,%4,%2\n"					\
264 	"	or %1,%3,%1\n"					\
265 	"3:	stq_u %2,1(%5)\n"				\
266 	"4:	stq_u %1,0(%5)\n"				\
267 	"5:\n"							\
268 	EXC(1b,5b,$31,%0)					\
269 	EXC(2b,5b,$31,%0)					\
270 	EXC(3b,5b,$31,%0)					\
271 	EXC(4b,5b,$31,%0)					\
272 		: "=r"(__pu_err), "=&r"(__pu_tmp1), 		\
273 		  "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), 		\
274 		  "=&r"(__pu_tmp4)				\
275 		: "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
276 }
277 
278 #define __put_user_8(x, addr)					\
279 {								\
280 	long __pu_tmp1, __pu_tmp2;				\
281 	__asm__ __volatile__(					\
282 	"1:	ldq_u %1,0(%4)\n"				\
283 	"	insbl %3,%4,%2\n"				\
284 	"	mskbl %1,%4,%1\n"				\
285 	"	or %1,%2,%1\n"					\
286 	"2:	stq_u %1,0(%4)\n"				\
287 	"3:\n"							\
288 	EXC(1b,3b,$31,%0)					\
289 	EXC(2b,3b,$31,%0)					\
290 		: "=r"(__pu_err), 				\
291 	  	  "=&r"(__pu_tmp1), "=&r"(__pu_tmp2)		\
292 		: "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
293 }
294 #endif
295 
296 
297 /*
298  * Complex access routines
299  */
300 
301 extern long __copy_user(void *to, const void *from, long len);
302 
303 static inline unsigned long
304 raw_copy_from_user(void *to, const void __user *from, unsigned long len)
305 {
306 	return __copy_user(to, (__force const void *)from, len);
307 }
308 
309 static inline unsigned long
310 raw_copy_to_user(void __user *to, const void *from, unsigned long len)
311 {
312 	return __copy_user((__force void *)to, from, len);
313 }
314 
315 extern long __clear_user(void __user *to, long len);
316 
317 extern inline long
318 clear_user(void __user *to, long len)
319 {
320 	if (__access_ok((unsigned long)to, len))
321 		len = __clear_user(to, len);
322 	return len;
323 }
324 
325 #define user_addr_max() \
326         (uaccess_kernel() ? ~0UL : TASK_SIZE)
327 
328 extern long strncpy_from_user(char *dest, const char __user *src, long count);
329 extern __must_check long strnlen_user(const char __user *str, long n);
330 
331 #include <asm/extable.h>
332 
333 #endif /* __ALPHA_UACCESS_H */
334