1 /* 2 * include/asm-xtensa/uaccess.h 3 * 4 * User space memory access functions 5 * 6 * These routines provide basic accessing functions to the user memory 7 * space for the kernel. This header file provides functions such as: 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License. See the file "COPYING" in the main directory of this archive 11 * for more details. 12 * 13 * Copyright (C) 2001 - 2005 Tensilica Inc. 14 */ 15 16 #ifndef _XTENSA_UACCESS_H 17 #define _XTENSA_UACCESS_H 18 19 #include <linux/errno.h> 20 #include <linux/prefetch.h> 21 #include <asm/types.h> 22 23 #define VERIFY_READ 0 24 #define VERIFY_WRITE 1 25 26 #include <linux/sched.h> 27 28 /* 29 * The fs value determines whether argument validity checking should 30 * be performed or not. If get_fs() == USER_DS, checking is 31 * performed, with get_fs() == KERNEL_DS, checking is bypassed. 32 * 33 * For historical reasons (Data Segment Register?), these macros are 34 * grossly misnamed. 35 */ 36 37 #define KERNEL_DS ((mm_segment_t) { 0 }) 38 #define USER_DS ((mm_segment_t) { 1 }) 39 40 #define get_ds() (KERNEL_DS) 41 #define get_fs() (current->thread.current_ds) 42 #define set_fs(val) (current->thread.current_ds = (val)) 43 44 #define segment_eq(a, b) ((a).seg == (b).seg) 45 46 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 47 #define __user_ok(addr, size) \ 48 (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) 49 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) 50 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) 51 52 /* 53 * These are the main single-value transfer routines. They 54 * automatically use the right size if we just have the right pointer 55 * type. 56 * 57 * This gets kind of ugly. We want to return _two_ values in 58 * "get_user()" and yet we don't want to do any pointers, because that 59 * is too much of a performance impact. Thus we have a few rather ugly 60 * macros here, and hide all the uglyness from the user. 61 * 62 * Careful to not 63 * (a) re-use the arguments for side effects (sizeof is ok) 64 * (b) require any knowledge of processes at this stage 65 */ 66 #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr))) 67 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) 68 69 /* 70 * The "__xxx" versions of the user access functions are versions that 71 * do not verify the address space, that must have been done previously 72 * with a separate "access_ok()" call (this is used when we do multiple 73 * accesses to the same area of user memory). 74 */ 75 #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr))) 76 #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 77 78 79 extern long __put_user_bad(void); 80 81 #define __put_user_nocheck(x, ptr, size) \ 82 ({ \ 83 long __pu_err; \ 84 __put_user_size((x), (ptr), (size), __pu_err); \ 85 __pu_err; \ 86 }) 87 88 #define __put_user_check(x, ptr, size) \ 89 ({ \ 90 long __pu_err = -EFAULT; \ 91 __typeof__(*(ptr)) *__pu_addr = (ptr); \ 92 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ 93 __put_user_size((x), __pu_addr, (size), __pu_err); \ 94 __pu_err; \ 95 }) 96 97 #define __put_user_size(x, ptr, size, retval) \ 98 do { \ 99 int __cb; \ 100 retval = 0; \ 101 switch (size) { \ 102 case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \ 103 case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \ 104 case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \ 105 case 8: { \ 106 __typeof__(*ptr) __v64 = x; \ 107 retval = __copy_to_user(ptr, &__v64, 8); \ 108 break; \ 109 } \ 110 default: __put_user_bad(); \ 111 } \ 112 } while (0) 113 114 115 /* 116 * Consider a case of a user single load/store would cause both an 117 * unaligned exception and an MMU-related exception (unaligned 118 * exceptions happen first): 119 * 120 * User code passes a bad variable ptr to a system call. 121 * Kernel tries to access the variable. 122 * Unaligned exception occurs. 123 * Unaligned exception handler tries to make aligned accesses. 124 * Double exception occurs for MMU-related cause (e.g., page not mapped). 125 * do_page_fault() thinks the fault address belongs to the kernel, not the 126 * user, and panics. 127 * 128 * The kernel currently prohibits user unaligned accesses. We use the 129 * __check_align_* macros to check for unaligned addresses before 130 * accessing user space so we don't crash the kernel. Both 131 * __put_user_asm and __get_user_asm use these alignment macros, so 132 * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in 133 * sync. 134 */ 135 136 #define __check_align_1 "" 137 138 #define __check_align_2 \ 139 " _bbci.l %3, 0, 1f \n" \ 140 " movi %0, %4 \n" \ 141 " _j 2f \n" 142 143 #define __check_align_4 \ 144 " _bbsi.l %3, 0, 0f \n" \ 145 " _bbci.l %3, 1, 1f \n" \ 146 "0: movi %0, %4 \n" \ 147 " _j 2f \n" 148 149 150 /* 151 * We don't tell gcc that we are accessing memory, but this is OK 152 * because we do not write to any memory gcc knows about, so there 153 * are no aliasing issues. 154 * 155 * WARNING: If you modify this macro at all, verify that the 156 * __check_align_* macros still work. 157 */ 158 #define __put_user_asm(x, addr, err, align, insn, cb) \ 159 __asm__ __volatile__( \ 160 __check_align_##align \ 161 "1: "insn" %2, %3, 0 \n" \ 162 "2: \n" \ 163 " .section .fixup,\"ax\" \n" \ 164 " .align 4 \n" \ 165 "4: \n" \ 166 " .long 2b \n" \ 167 "5: \n" \ 168 " l32r %1, 4b \n" \ 169 " movi %0, %4 \n" \ 170 " jx %1 \n" \ 171 " .previous \n" \ 172 " .section __ex_table,\"a\" \n" \ 173 " .long 1b, 5b \n" \ 174 " .previous" \ 175 :"=r" (err), "=r" (cb) \ 176 :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err)) 177 178 #define __get_user_nocheck(x, ptr, size) \ 179 ({ \ 180 long __gu_err, __gu_val; \ 181 __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 182 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 183 __gu_err; \ 184 }) 185 186 #define __get_user_check(x, ptr, size) \ 187 ({ \ 188 long __gu_err = -EFAULT, __gu_val = 0; \ 189 const __typeof__(*(ptr)) *__gu_addr = (ptr); \ 190 if (access_ok(VERIFY_READ, __gu_addr, size)) \ 191 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 192 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 193 __gu_err; \ 194 }) 195 196 extern long __get_user_bad(void); 197 198 #define __get_user_size(x, ptr, size, retval) \ 199 do { \ 200 int __cb; \ 201 retval = 0; \ 202 switch (size) { \ 203 case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\ 204 case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\ 205 case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\ 206 case 8: retval = __copy_from_user(&x, ptr, 8); break; \ 207 default: (x) = __get_user_bad(); \ 208 } \ 209 } while (0) 210 211 212 /* 213 * WARNING: If you modify this macro at all, verify that the 214 * __check_align_* macros still work. 215 */ 216 #define __get_user_asm(x, addr, err, align, insn, cb) \ 217 __asm__ __volatile__( \ 218 __check_align_##align \ 219 "1: "insn" %2, %3, 0 \n" \ 220 "2: \n" \ 221 " .section .fixup,\"ax\" \n" \ 222 " .align 4 \n" \ 223 "4: \n" \ 224 " .long 2b \n" \ 225 "5: \n" \ 226 " l32r %1, 4b \n" \ 227 " movi %2, 0 \n" \ 228 " movi %0, %4 \n" \ 229 " jx %1 \n" \ 230 " .previous \n" \ 231 " .section __ex_table,\"a\" \n" \ 232 " .long 1b, 5b \n" \ 233 " .previous" \ 234 :"=r" (err), "=r" (cb), "=r" (x) \ 235 :"r" (addr), "i" (-EFAULT), "0" (err)) 236 237 238 /* 239 * Copy to/from user space 240 */ 241 242 /* 243 * We use a generic, arbitrary-sized copy subroutine. The Xtensa 244 * architecture would cause heavy code bloat if we tried to inline 245 * these functions and provide __constant_copy_* equivalents like the 246 * i386 versions. __xtensa_copy_user is quite efficient. See the 247 * .fixup section of __xtensa_copy_user for a discussion on the 248 * X_zeroing equivalents for Xtensa. 249 */ 250 251 extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); 252 #define __copy_user(to, from, size) __xtensa_copy_user(to, from, size) 253 254 255 static inline unsigned long 256 __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) 257 { 258 return __copy_user(to, from, n); 259 } 260 261 static inline unsigned long 262 __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) 263 { 264 return __copy_user(to, from, n); 265 } 266 267 static inline unsigned long 268 __generic_copy_to_user(void *to, const void *from, unsigned long n) 269 { 270 prefetch(from); 271 if (access_ok(VERIFY_WRITE, to, n)) 272 return __copy_user(to, from, n); 273 return n; 274 } 275 276 static inline unsigned long 277 __generic_copy_from_user(void *to, const void *from, unsigned long n) 278 { 279 prefetchw(to); 280 if (access_ok(VERIFY_READ, from, n)) 281 return __copy_user(to, from, n); 282 else 283 memset(to, 0, n); 284 return n; 285 } 286 287 #define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) 288 #define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n)) 289 #define __copy_to_user(to, from, n) \ 290 __generic_copy_to_user_nocheck((to), (from), (n)) 291 #define __copy_from_user(to, from, n) \ 292 __generic_copy_from_user_nocheck((to), (from), (n)) 293 #define __copy_to_user_inatomic __copy_to_user 294 #define __copy_from_user_inatomic __copy_from_user 295 296 297 /* 298 * We need to return the number of bytes not cleared. Our memset() 299 * returns zero if a problem occurs while accessing user-space memory. 300 * In that event, return no memory cleared. Otherwise, zero for 301 * success. 302 */ 303 304 static inline unsigned long 305 __xtensa_clear_user(void *addr, unsigned long size) 306 { 307 if ( ! memset(addr, 0, size) ) 308 return size; 309 return 0; 310 } 311 312 static inline unsigned long 313 clear_user(void *addr, unsigned long size) 314 { 315 if (access_ok(VERIFY_WRITE, addr, size)) 316 return __xtensa_clear_user(addr, size); 317 return size ? -EFAULT : 0; 318 } 319 320 #define __clear_user __xtensa_clear_user 321 322 323 extern long __strncpy_user(char *, const char *, long); 324 #define __strncpy_from_user __strncpy_user 325 326 static inline long 327 strncpy_from_user(char *dst, const char *src, long count) 328 { 329 if (access_ok(VERIFY_READ, src, 1)) 330 return __strncpy_from_user(dst, src, count); 331 return -EFAULT; 332 } 333 334 335 #define strlen_user(str) strnlen_user((str), TASK_SIZE - 1) 336 337 /* 338 * Return the size of a string (including the ending 0!) 339 */ 340 extern long __strnlen_user(const char *, long); 341 342 static inline long strnlen_user(const char *str, long len) 343 { 344 unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1; 345 346 if ((unsigned long)str > top) 347 return 0; 348 return __strnlen_user(str, len); 349 } 350 351 352 struct exception_table_entry 353 { 354 unsigned long insn, fixup; 355 }; 356 357 #endif /* _XTENSA_UACCESS_H */ 358