1 #ifndef _ARCH_POWERPC_UACCESS_H 2 #define _ARCH_POWERPC_UACCESS_H 3 4 #include <asm/asm-compat.h> 5 #include <asm/ppc_asm.h> 6 #include <asm/processor.h> 7 #include <asm/page.h> 8 #include <asm/extable.h> 9 10 /* 11 * The fs value determines whether argument validity checking should be 12 * performed or not. If get_fs() == USER_DS, checking is performed, with 13 * get_fs() == KERNEL_DS, checking is bypassed. 14 * 15 * For historical reasons, these macros are grossly misnamed. 16 * 17 * The fs/ds values are now the highest legal address in the "segment". 18 * This simplifies the checking in the routines below. 19 */ 20 21 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 22 23 #define KERNEL_DS MAKE_MM_SEG(~0UL) 24 #ifdef __powerpc64__ 25 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ 26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1) 27 #else 28 #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) 29 #endif 30 31 #define get_ds() (KERNEL_DS) 32 #define get_fs() (current->thread.fs) 33 #define set_fs(val) (current->thread.fs = (val)) 34 35 #define segment_eq(a, b) ((a).seg == (b).seg) 36 37 #define user_addr_max() (get_fs().seg) 38 39 #ifdef __powerpc64__ 40 /* 41 * This check is sufficient because there is a large enough 42 * gap between user addresses and the kernel addresses 43 */ 44 #define __access_ok(addr, size, segment) \ 45 (((addr) <= (segment).seg) && ((size) <= (segment).seg)) 46 47 #else 48 49 #define __access_ok(addr, size, segment) \ 50 (((addr) <= (segment).seg) && \ 51 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr))))) 52 53 #endif 54 55 #define access_ok(type, addr, size) \ 56 (__chk_user_ptr(addr), \ 57 __access_ok((__force unsigned long)(addr), (size), get_fs())) 58 59 /* 60 * These are the main single-value transfer routines. They automatically 61 * use the right size if we just have the right pointer type. 62 * 63 * This gets kind of ugly. We want to return _two_ values in "get_user()" 64 * and yet we don't want to do any pointers, because that is too much 65 * of a performance impact. Thus we have a few rather ugly macros here, 66 * and hide all the ugliness from the user. 67 * 68 * The "__xxx" versions of the user access functions are versions that 69 * do not verify the address space, that must have been done previously 70 * with a separate "access_ok()" call (this is used when we do multiple 71 * accesses to the same area of user memory). 72 * 73 * As we use the same address space for kernel and user data on the 74 * PowerPC, we can just do these as direct assignments. (Of course, the 75 * exception handling means that it's no longer "just"...) 76 * 77 */ 78 #define get_user(x, ptr) \ 79 __get_user_check((x), (ptr), sizeof(*(ptr))) 80 #define put_user(x, ptr) \ 81 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 82 83 #define __get_user(x, ptr) \ 84 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 85 #define __put_user(x, ptr) \ 86 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 87 88 #define __get_user_inatomic(x, ptr) \ 89 __get_user_nosleep((x), (ptr), sizeof(*(ptr))) 90 #define __put_user_inatomic(x, ptr) \ 91 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 92 93 extern long __put_user_bad(void); 94 95 /* 96 * We don't tell gcc that we are accessing memory, but this is OK 97 * because we do not write to any memory gcc knows about, so there 98 * are no aliasing issues. 99 */ 100 #define __put_user_asm(x, addr, err, op) \ 101 __asm__ __volatile__( \ 102 "1: " op " %1,0(%2) # put_user\n" \ 103 "2:\n" \ 104 ".section .fixup,\"ax\"\n" \ 105 "3: li %0,%3\n" \ 106 " b 2b\n" \ 107 ".previous\n" \ 108 EX_TABLE(1b, 3b) \ 109 : "=r" (err) \ 110 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) 111 112 #ifdef __powerpc64__ 113 #define __put_user_asm2(x, ptr, retval) \ 114 __put_user_asm(x, ptr, retval, "std") 115 #else /* __powerpc64__ */ 116 #define __put_user_asm2(x, addr, err) \ 117 __asm__ __volatile__( \ 118 "1: stw %1,0(%2)\n" \ 119 "2: stw %1+1,4(%2)\n" \ 120 "3:\n" \ 121 ".section .fixup,\"ax\"\n" \ 122 "4: li %0,%3\n" \ 123 " b 3b\n" \ 124 ".previous\n" \ 125 EX_TABLE(1b, 4b) \ 126 EX_TABLE(2b, 4b) \ 127 : "=r" (err) \ 128 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) 129 #endif /* __powerpc64__ */ 130 131 #define __put_user_size(x, ptr, size, retval) \ 132 do { \ 133 retval = 0; \ 134 switch (size) { \ 135 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \ 136 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \ 137 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \ 138 case 8: __put_user_asm2(x, ptr, retval); break; \ 139 default: __put_user_bad(); \ 140 } \ 141 } while (0) 142 143 #define __put_user_nocheck(x, ptr, size) \ 144 ({ \ 145 long __pu_err; \ 146 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 147 if (!is_kernel_addr((unsigned long)__pu_addr)) \ 148 might_fault(); \ 149 __chk_user_ptr(ptr); \ 150 __put_user_size((x), __pu_addr, (size), __pu_err); \ 151 __pu_err; \ 152 }) 153 154 #define __put_user_check(x, ptr, size) \ 155 ({ \ 156 long __pu_err = -EFAULT; \ 157 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 158 might_fault(); \ 159 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ 160 __put_user_size((x), __pu_addr, (size), __pu_err); \ 161 __pu_err; \ 162 }) 163 164 #define __put_user_nosleep(x, ptr, size) \ 165 ({ \ 166 long __pu_err; \ 167 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 168 __chk_user_ptr(ptr); \ 169 __put_user_size((x), __pu_addr, (size), __pu_err); \ 170 __pu_err; \ 171 }) 172 173 174 extern long __get_user_bad(void); 175 176 #define __get_user_asm(x, addr, err, op) \ 177 __asm__ __volatile__( \ 178 "1: "op" %1,0(%2) # get_user\n" \ 179 "2:\n" \ 180 ".section .fixup,\"ax\"\n" \ 181 "3: li %0,%3\n" \ 182 " li %1,0\n" \ 183 " b 2b\n" \ 184 ".previous\n" \ 185 EX_TABLE(1b, 3b) \ 186 : "=r" (err), "=r" (x) \ 187 : "b" (addr), "i" (-EFAULT), "0" (err)) 188 189 #ifdef __powerpc64__ 190 #define __get_user_asm2(x, addr, err) \ 191 __get_user_asm(x, addr, err, "ld") 192 #else /* __powerpc64__ */ 193 #define __get_user_asm2(x, addr, err) \ 194 __asm__ __volatile__( \ 195 "1: lwz %1,0(%2)\n" \ 196 "2: lwz %1+1,4(%2)\n" \ 197 "3:\n" \ 198 ".section .fixup,\"ax\"\n" \ 199 "4: li %0,%3\n" \ 200 " li %1,0\n" \ 201 " li %1+1,0\n" \ 202 " b 3b\n" \ 203 ".previous\n" \ 204 EX_TABLE(1b, 4b) \ 205 EX_TABLE(2b, 4b) \ 206 : "=r" (err), "=&r" (x) \ 207 : "b" (addr), "i" (-EFAULT), "0" (err)) 208 #endif /* __powerpc64__ */ 209 210 #define __get_user_size(x, ptr, size, retval) \ 211 do { \ 212 retval = 0; \ 213 __chk_user_ptr(ptr); \ 214 if (size > sizeof(x)) \ 215 (x) = __get_user_bad(); \ 216 switch (size) { \ 217 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ 218 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ 219 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \ 220 case 8: __get_user_asm2(x, ptr, retval); break; \ 221 default: (x) = __get_user_bad(); \ 222 } \ 223 } while (0) 224 225 #define __get_user_nocheck(x, ptr, size) \ 226 ({ \ 227 long __gu_err; \ 228 unsigned long __gu_val; \ 229 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 230 __chk_user_ptr(ptr); \ 231 if (!is_kernel_addr((unsigned long)__gu_addr)) \ 232 might_fault(); \ 233 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 234 (x) = (__typeof__(*(ptr)))__gu_val; \ 235 __gu_err; \ 236 }) 237 238 #define __get_user_check(x, ptr, size) \ 239 ({ \ 240 long __gu_err = -EFAULT; \ 241 unsigned long __gu_val = 0; \ 242 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 243 might_fault(); \ 244 if (access_ok(VERIFY_READ, __gu_addr, (size))) \ 245 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 246 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 247 __gu_err; \ 248 }) 249 250 #define __get_user_nosleep(x, ptr, size) \ 251 ({ \ 252 long __gu_err; \ 253 unsigned long __gu_val; \ 254 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 255 __chk_user_ptr(ptr); \ 256 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 257 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 258 __gu_err; \ 259 }) 260 261 262 /* more complex routines */ 263 264 extern unsigned long __copy_tofrom_user(void __user *to, 265 const void __user *from, unsigned long size); 266 267 #ifdef __powerpc64__ 268 static inline unsigned long 269 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) 270 { 271 return __copy_tofrom_user(to, from, n); 272 } 273 #endif /* __powerpc64__ */ 274 275 static inline unsigned long raw_copy_from_user(void *to, 276 const void __user *from, unsigned long n) 277 { 278 if (__builtin_constant_p(n) && (n <= 8)) { 279 unsigned long ret = 1; 280 281 switch (n) { 282 case 1: 283 __get_user_size(*(u8 *)to, from, 1, ret); 284 break; 285 case 2: 286 __get_user_size(*(u16 *)to, from, 2, ret); 287 break; 288 case 4: 289 __get_user_size(*(u32 *)to, from, 4, ret); 290 break; 291 case 8: 292 __get_user_size(*(u64 *)to, from, 8, ret); 293 break; 294 } 295 if (ret == 0) 296 return 0; 297 } 298 299 return __copy_tofrom_user((__force void __user *)to, from, n); 300 } 301 302 static inline unsigned long raw_copy_to_user(void __user *to, 303 const void *from, unsigned long n) 304 { 305 if (__builtin_constant_p(n) && (n <= 8)) { 306 unsigned long ret = 1; 307 308 switch (n) { 309 case 1: 310 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret); 311 break; 312 case 2: 313 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret); 314 break; 315 case 4: 316 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret); 317 break; 318 case 8: 319 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret); 320 break; 321 } 322 if (ret == 0) 323 return 0; 324 } 325 326 return __copy_tofrom_user(to, (__force const void __user *)from, n); 327 } 328 329 extern unsigned long __clear_user(void __user *addr, unsigned long size); 330 331 static inline unsigned long clear_user(void __user *addr, unsigned long size) 332 { 333 might_fault(); 334 if (likely(access_ok(VERIFY_WRITE, addr, size))) 335 return __clear_user(addr, size); 336 return size; 337 } 338 339 extern long strncpy_from_user(char *dst, const char __user *src, long count); 340 extern __must_check long strnlen_user(const char __user *str, long n); 341 342 #endif /* _ARCH_POWERPC_UACCESS_H */ 343