xref: /openbmc/linux/arch/parisc/include/asm/uaccess.h (revision 161f4089)
1 #ifndef __PARISC_UACCESS_H
2 #define __PARISC_UACCESS_H
3 
4 /*
5  * User space memory access functions
6  */
7 #include <asm/processor.h>
8 #include <asm/page.h>
9 #include <asm/cache.h>
10 #include <asm/errno.h>
11 #include <asm-generic/uaccess-unaligned.h>
12 
13 #include <linux/sched.h>
14 
15 #define VERIFY_READ 0
16 #define VERIFY_WRITE 1
17 
18 #define KERNEL_DS	((mm_segment_t){0})
19 #define USER_DS 	((mm_segment_t){1})
20 
21 #define segment_eq(a,b)	((a).seg == (b).seg)
22 
23 #define get_ds()	(KERNEL_DS)
24 #define get_fs()	(current_thread_info()->addr_limit)
25 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
26 
27 /*
28  * Note that since kernel addresses are in a separate address space on
29  * parisc, we don't need to do anything for access_ok().
30  * We just let the page fault handler do the right thing. This also means
31  * that put_user is the same as __put_user, etc.
32  */
33 
34 extern int __get_kernel_bad(void);
35 extern int __get_user_bad(void);
36 extern int __put_kernel_bad(void);
37 extern int __put_user_bad(void);
38 
39 
40 /*
41  * Test whether a block of memory is a valid user space address.
42  * Returns 0 if the range is valid, nonzero otherwise.
43  */
44 static inline int __range_not_ok(unsigned long addr, unsigned long size,
45 				 unsigned long limit)
46 {
47 	unsigned long __newaddr = addr + size;
48 	return (__newaddr < addr || __newaddr > limit || size > limit);
49 }
50 
51 /**
52  * access_ok: - Checks if a user space pointer is valid
53  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
54  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
55  *        to write to a block, it is always safe to read from it.
56  * @addr: User space pointer to start of block to check
57  * @size: Size of block to check
58  *
59  * Context: User context only.  This function may sleep.
60  *
61  * Checks if a pointer to a block of memory in user space is valid.
62  *
63  * Returns true (nonzero) if the memory block may be valid, false (zero)
64  * if it is definitely invalid.
65  *
66  * Note that, depending on architecture, this function probably just
67  * checks that the pointer is in the user space range - after calling
68  * this function, memory access functions may still return -EFAULT.
69  */
70 #define access_ok(type, addr, size)					\
71 (	__chk_user_ptr(addr),						\
72 	!__range_not_ok((unsigned long) (__force void *) (addr),	\
73 			size, user_addr_max())				\
74 )
75 
76 #define put_user __put_user
77 #define get_user __get_user
78 
79 #if !defined(CONFIG_64BIT)
80 #define LDD_KERNEL(ptr)		__get_kernel_bad();
81 #define LDD_USER(ptr)		__get_user_bad();
82 #define STD_KERNEL(x, ptr)	__put_kernel_asm64(x,ptr)
83 #define STD_USER(x, ptr)	__put_user_asm64(x,ptr)
84 #define ASM_WORD_INSN		".word\t"
85 #else
86 #define LDD_KERNEL(ptr)		__get_kernel_asm("ldd",ptr)
87 #define LDD_USER(ptr)		__get_user_asm("ldd",ptr)
88 #define STD_KERNEL(x, ptr)	__put_kernel_asm("std",x,ptr)
89 #define STD_USER(x, ptr)	__put_user_asm("std",x,ptr)
90 #define ASM_WORD_INSN		".dword\t"
91 #endif
92 
93 /*
94  * The exception table contains two values: the first is an address
95  * for an instruction that is allowed to fault, and the second is
96  * the address to the fixup routine. Even on a 64bit kernel we could
97  * use a 32bit (unsigned int) address here.
98  */
99 
100 struct exception_table_entry {
101 	unsigned long insn;	/* address of insn that is allowed to fault. */
102 	unsigned long fixup;	/* fixup routine */
103 };
104 
105 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
106 	".section __ex_table,\"aw\"\n"			   \
107 	ASM_WORD_INSN #fault_addr ", " #except_addr "\n\t" \
108 	".previous\n"
109 
110 /*
111  * The page fault handler stores, in a per-cpu area, the following information
112  * if a fixup routine is available.
113  */
114 struct exception_data {
115 	unsigned long fault_ip;
116 	unsigned long fault_space;
117 	unsigned long fault_addr;
118 };
119 
120 #define __get_user(x,ptr)                               \
121 ({                                                      \
122 	register long __gu_err __asm__ ("r8") = 0;      \
123 	register long __gu_val __asm__ ("r9") = 0;      \
124 							\
125 	if (segment_eq(get_fs(),KERNEL_DS)) {           \
126 	    switch (sizeof(*(ptr))) {                   \
127 	    case 1: __get_kernel_asm("ldb",ptr); break; \
128 	    case 2: __get_kernel_asm("ldh",ptr); break; \
129 	    case 4: __get_kernel_asm("ldw",ptr); break; \
130 	    case 8: LDD_KERNEL(ptr); break;		\
131 	    default: __get_kernel_bad(); break;         \
132 	    }                                           \
133 	}                                               \
134 	else {                                          \
135 	    switch (sizeof(*(ptr))) {                   \
136 	    case 1: __get_user_asm("ldb",ptr); break;   \
137 	    case 2: __get_user_asm("ldh",ptr); break;   \
138 	    case 4: __get_user_asm("ldw",ptr); break;   \
139 	    case 8: LDD_USER(ptr);  break;		\
140 	    default: __get_user_bad(); break;           \
141 	    }                                           \
142 	}                                               \
143 							\
144 	(x) = (__typeof__(*(ptr))) __gu_val;            \
145 	__gu_err;                                       \
146 })
147 
148 #define __get_kernel_asm(ldx,ptr)                       \
149 	__asm__("\n1:\t" ldx "\t0(%2),%0\n\t"		\
150 		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
151 		: "=r"(__gu_val), "=r"(__gu_err)        \
152 		: "r"(ptr), "1"(__gu_err)		\
153 		: "r1");
154 
155 #define __get_user_asm(ldx,ptr)                         \
156 	__asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t"	\
157 		ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_get_user_skip_1)\
158 		: "=r"(__gu_val), "=r"(__gu_err)        \
159 		: "r"(ptr), "1"(__gu_err)		\
160 		: "r1");
161 
162 #define __put_user(x,ptr)                                       \
163 ({								\
164 	register long __pu_err __asm__ ("r8") = 0;      	\
165         __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);	\
166 								\
167 	if (segment_eq(get_fs(),KERNEL_DS)) {                   \
168 	    switch (sizeof(*(ptr))) {                           \
169 	    case 1: __put_kernel_asm("stb",__x,ptr); break;     \
170 	    case 2: __put_kernel_asm("sth",__x,ptr); break;     \
171 	    case 4: __put_kernel_asm("stw",__x,ptr); break;     \
172 	    case 8: STD_KERNEL(__x,ptr); break;			\
173 	    default: __put_kernel_bad(); break;			\
174 	    }                                                   \
175 	}                                                       \
176 	else {                                                  \
177 	    switch (sizeof(*(ptr))) {                           \
178 	    case 1: __put_user_asm("stb",__x,ptr); break;       \
179 	    case 2: __put_user_asm("sth",__x,ptr); break;       \
180 	    case 4: __put_user_asm("stw",__x,ptr); break;       \
181 	    case 8: STD_USER(__x,ptr); break;			\
182 	    default: __put_user_bad(); break;			\
183 	    }                                                   \
184 	}                                                       \
185 								\
186 	__pu_err;						\
187 })
188 
189 /*
190  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
191  * instead of writing. This is because they do not write to any memory
192  * gcc knows about, so there are no aliasing issues. These macros must
193  * also be aware that "fixup_put_user_skip_[12]" are executed in the
194  * context of the fault, and any registers used there must be listed
195  * as clobbers. In this case only "r1" is used by the current routines.
196  * r8/r9 are already listed as err/val.
197  */
198 
199 #define __put_kernel_asm(stx,x,ptr)                         \
200 	__asm__ __volatile__ (                              \
201 		"\n1:\t" stx "\t%2,0(%1)\n\t"		    \
202 		ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
203 		: "=r"(__pu_err)                            \
204 		: "r"(ptr), "r"(x), "0"(__pu_err)	    \
205 	    	: "r1")
206 
207 #define __put_user_asm(stx,x,ptr)                           \
208 	__asm__ __volatile__ (                              \
209 		"\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t"	    \
210 		ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
211 		: "=r"(__pu_err)                            \
212 		: "r"(ptr), "r"(x), "0"(__pu_err)	    \
213 		: "r1")
214 
215 
216 #if !defined(CONFIG_64BIT)
217 
218 #define __put_kernel_asm64(__val,ptr) do {		    \
219 	__asm__ __volatile__ (				    \
220 		"\n1:\tstw %2,0(%1)"			    \
221 		"\n2:\tstw %R2,4(%1)\n\t"		    \
222 		ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
223 		ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
224 		: "=r"(__pu_err)                            \
225 		: "r"(ptr), "r"(__val), "0"(__pu_err) \
226 		: "r1");				    \
227 } while (0)
228 
229 #define __put_user_asm64(__val,ptr) do {	    	    \
230 	__asm__ __volatile__ (				    \
231 		"\n1:\tstw %2,0(%%sr3,%1)"		    \
232 		"\n2:\tstw %R2,4(%%sr3,%1)\n\t"		    \
233 		ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
234 		ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
235 		: "=r"(__pu_err)                            \
236 		: "r"(ptr), "r"(__val), "0"(__pu_err) \
237 		: "r1");				    \
238 } while (0)
239 
240 #endif /* !defined(CONFIG_64BIT) */
241 
242 
243 /*
244  * Complex access routines -- external declarations
245  */
246 
247 extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
248 extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
249 extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
250 extern long strncpy_from_user(char *, const char __user *, long);
251 extern unsigned lclear_user(void __user *,unsigned long);
252 extern long lstrnlen_user(const char __user *,long);
253 /*
254  * Complex access routines -- macros
255  */
256 #ifdef CONFIG_COMPAT
257 #define user_addr_max() (TASK_SIZE)
258 #else
259 #define user_addr_max() (DEFAULT_TASK_SIZE)
260 #endif
261 
262 #define strnlen_user lstrnlen_user
263 #define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
264 #define clear_user lclear_user
265 #define __clear_user lclear_user
266 
267 unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len);
268 #define __copy_to_user copy_to_user
269 unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len);
270 unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len);
271 #define __copy_in_user copy_in_user
272 #define __copy_to_user_inatomic __copy_to_user
273 #define __copy_from_user_inatomic __copy_from_user
274 
275 extern void copy_from_user_overflow(void)
276 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
277         __compiletime_error("copy_from_user() buffer size is not provably correct")
278 #else
279         __compiletime_warning("copy_from_user() buffer size is not provably correct")
280 #endif
281 ;
282 
283 static inline unsigned long __must_check copy_from_user(void *to,
284                                           const void __user *from,
285                                           unsigned long n)
286 {
287         int sz = __compiletime_object_size(to);
288         int ret = -EFAULT;
289 
290         if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
291                 ret = __copy_from_user(to, from, n);
292         else
293                 copy_from_user_overflow();
294 
295         return ret;
296 }
297 
298 struct pt_regs;
299 int fixup_exception(struct pt_regs *regs);
300 
301 #endif /* __PARISC_UACCESS_H */
302