1 /* 2 * include/asm-xtensa/uaccess.h 3 * 4 * User space memory access functions 5 * 6 * These routines provide basic accessing functions to the user memory 7 * space for the kernel. This header file provides functions such as: 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License. See the file "COPYING" in the main directory of this archive 11 * for more details. 12 * 13 * Copyright (C) 2001 - 2005 Tensilica Inc. 14 */ 15 16 #ifndef _XTENSA_UACCESS_H 17 #define _XTENSA_UACCESS_H 18 19 #include <linux/prefetch.h> 20 #include <asm/types.h> 21 #include <asm/extable.h> 22 23 /* 24 * The fs value determines whether argument validity checking should 25 * be performed or not. If get_fs() == USER_DS, checking is 26 * performed, with get_fs() == KERNEL_DS, checking is bypassed. 27 * 28 * For historical reasons (Data Segment Register?), these macros are 29 * grossly misnamed. 30 */ 31 32 #define KERNEL_DS ((mm_segment_t) { 0 }) 33 #define USER_DS ((mm_segment_t) { 1 }) 34 35 #define get_ds() (KERNEL_DS) 36 #define get_fs() (current->thread.current_ds) 37 #define set_fs(val) (current->thread.current_ds = (val)) 38 39 #define segment_eq(a, b) ((a).seg == (b).seg) 40 41 #define __kernel_ok (uaccess_kernel()) 42 #define __user_ok(addr, size) \ 43 (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) 44 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) 45 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) 46 47 /* 48 * These are the main single-value transfer routines. They 49 * automatically use the right size if we just have the right pointer 50 * type. 51 * 52 * This gets kind of ugly. We want to return _two_ values in 53 * "get_user()" and yet we don't want to do any pointers, because that 54 * is too much of a performance impact. Thus we have a few rather ugly 55 * macros here, and hide all the uglyness from the user. 56 * 57 * Careful to not 58 * (a) re-use the arguments for side effects (sizeof is ok) 59 * (b) require any knowledge of processes at this stage 60 */ 61 #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr))) 62 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) 63 64 /* 65 * The "__xxx" versions of the user access functions are versions that 66 * do not verify the address space, that must have been done previously 67 * with a separate "access_ok()" call (this is used when we do multiple 68 * accesses to the same area of user memory). 69 */ 70 #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr))) 71 #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 72 73 74 extern long __put_user_bad(void); 75 76 #define __put_user_nocheck(x, ptr, size) \ 77 ({ \ 78 long __pu_err; \ 79 __put_user_size((x), (ptr), (size), __pu_err); \ 80 __pu_err; \ 81 }) 82 83 #define __put_user_check(x, ptr, size) \ 84 ({ \ 85 long __pu_err = -EFAULT; \ 86 __typeof__(*(ptr)) *__pu_addr = (ptr); \ 87 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ 88 __put_user_size((x), __pu_addr, (size), __pu_err); \ 89 __pu_err; \ 90 }) 91 92 #define __put_user_size(x, ptr, size, retval) \ 93 do { \ 94 int __cb; \ 95 retval = 0; \ 96 switch (size) { \ 97 case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \ 98 case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \ 99 case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \ 100 case 8: { \ 101 __typeof__(*ptr) __v64 = x; \ 102 retval = __copy_to_user(ptr, &__v64, 8); \ 103 break; \ 104 } \ 105 default: __put_user_bad(); \ 106 } \ 107 } while (0) 108 109 110 /* 111 * Consider a case of a user single load/store would cause both an 112 * unaligned exception and an MMU-related exception (unaligned 113 * exceptions happen first): 114 * 115 * User code passes a bad variable ptr to a system call. 116 * Kernel tries to access the variable. 117 * Unaligned exception occurs. 118 * Unaligned exception handler tries to make aligned accesses. 119 * Double exception occurs for MMU-related cause (e.g., page not mapped). 120 * do_page_fault() thinks the fault address belongs to the kernel, not the 121 * user, and panics. 122 * 123 * The kernel currently prohibits user unaligned accesses. We use the 124 * __check_align_* macros to check for unaligned addresses before 125 * accessing user space so we don't crash the kernel. Both 126 * __put_user_asm and __get_user_asm use these alignment macros, so 127 * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in 128 * sync. 129 */ 130 131 #define __check_align_1 "" 132 133 #define __check_align_2 \ 134 " _bbci.l %3, 0, 1f \n" \ 135 " movi %0, %4 \n" \ 136 " _j 2f \n" 137 138 #define __check_align_4 \ 139 " _bbsi.l %3, 0, 0f \n" \ 140 " _bbci.l %3, 1, 1f \n" \ 141 "0: movi %0, %4 \n" \ 142 " _j 2f \n" 143 144 145 /* 146 * We don't tell gcc that we are accessing memory, but this is OK 147 * because we do not write to any memory gcc knows about, so there 148 * are no aliasing issues. 149 * 150 * WARNING: If you modify this macro at all, verify that the 151 * __check_align_* macros still work. 152 */ 153 #define __put_user_asm(x, addr, err, align, insn, cb) \ 154 __asm__ __volatile__( \ 155 __check_align_##align \ 156 "1: "insn" %2, %3, 0 \n" \ 157 "2: \n" \ 158 " .section .fixup,\"ax\" \n" \ 159 " .align 4 \n" \ 160 "4: \n" \ 161 " .long 2b \n" \ 162 "5: \n" \ 163 " l32r %1, 4b \n" \ 164 " movi %0, %4 \n" \ 165 " jx %1 \n" \ 166 " .previous \n" \ 167 " .section __ex_table,\"a\" \n" \ 168 " .long 1b, 5b \n" \ 169 " .previous" \ 170 :"=r" (err), "=r" (cb) \ 171 :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err)) 172 173 #define __get_user_nocheck(x, ptr, size) \ 174 ({ \ 175 long __gu_err, __gu_val; \ 176 __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 177 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 178 __gu_err; \ 179 }) 180 181 #define __get_user_check(x, ptr, size) \ 182 ({ \ 183 long __gu_err = -EFAULT, __gu_val = 0; \ 184 const __typeof__(*(ptr)) *__gu_addr = (ptr); \ 185 if (access_ok(VERIFY_READ, __gu_addr, size)) \ 186 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 187 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 188 __gu_err; \ 189 }) 190 191 extern long __get_user_bad(void); 192 193 #define __get_user_size(x, ptr, size, retval) \ 194 do { \ 195 int __cb; \ 196 retval = 0; \ 197 switch (size) { \ 198 case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\ 199 case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\ 200 case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\ 201 case 8: retval = __copy_from_user(&x, ptr, 8); break; \ 202 default: (x) = __get_user_bad(); \ 203 } \ 204 } while (0) 205 206 207 /* 208 * WARNING: If you modify this macro at all, verify that the 209 * __check_align_* macros still work. 210 */ 211 #define __get_user_asm(x, addr, err, align, insn, cb) \ 212 __asm__ __volatile__( \ 213 __check_align_##align \ 214 "1: "insn" %2, %3, 0 \n" \ 215 "2: \n" \ 216 " .section .fixup,\"ax\" \n" \ 217 " .align 4 \n" \ 218 "4: \n" \ 219 " .long 2b \n" \ 220 "5: \n" \ 221 " l32r %1, 4b \n" \ 222 " movi %2, 0 \n" \ 223 " movi %0, %4 \n" \ 224 " jx %1 \n" \ 225 " .previous \n" \ 226 " .section __ex_table,\"a\" \n" \ 227 " .long 1b, 5b \n" \ 228 " .previous" \ 229 :"=r" (err), "=r" (cb), "=r" (x) \ 230 :"r" (addr), "i" (-EFAULT), "0" (err)) 231 232 233 /* 234 * Copy to/from user space 235 */ 236 237 extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); 238 239 static inline unsigned long 240 raw_copy_from_user(void *to, const void __user *from, unsigned long n) 241 { 242 prefetchw(to); 243 return __xtensa_copy_user(to, (__force const void *)from, n); 244 } 245 static inline unsigned long 246 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 247 { 248 prefetch(from); 249 return __xtensa_copy_user((__force void *)to, from, n); 250 } 251 #define INLINE_COPY_FROM_USER 252 #define INLINE_COPY_TO_USER 253 254 /* 255 * We need to return the number of bytes not cleared. Our memset() 256 * returns zero if a problem occurs while accessing user-space memory. 257 * In that event, return no memory cleared. Otherwise, zero for 258 * success. 259 */ 260 261 static inline unsigned long 262 __xtensa_clear_user(void *addr, unsigned long size) 263 { 264 if ( ! memset(addr, 0, size) ) 265 return size; 266 return 0; 267 } 268 269 static inline unsigned long 270 clear_user(void *addr, unsigned long size) 271 { 272 if (access_ok(VERIFY_WRITE, addr, size)) 273 return __xtensa_clear_user(addr, size); 274 return size ? -EFAULT : 0; 275 } 276 277 #define __clear_user __xtensa_clear_user 278 279 280 extern long __strncpy_user(char *, const char *, long); 281 282 static inline long 283 strncpy_from_user(char *dst, const char *src, long count) 284 { 285 if (access_ok(VERIFY_READ, src, 1)) 286 return __strncpy_user(dst, src, count); 287 return -EFAULT; 288 } 289 290 /* 291 * Return the size of a string (including the ending 0!) 292 */ 293 extern long __strnlen_user(const char *, long); 294 295 static inline long strnlen_user(const char *str, long len) 296 { 297 unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1; 298 299 if ((unsigned long)str > top) 300 return 0; 301 return __strnlen_user(str, len); 302 } 303 304 #endif /* _XTENSA_UACCESS_H */ 305