1 #ifndef __ASM_GENERIC_UACCESS_H 2 #define __ASM_GENERIC_UACCESS_H 3 4 /* 5 * User space memory access functions, these should work 6 * on any machine that has kernel and user data in the same 7 * address space, e.g. all NOMMU machines. 8 */ 9 #include <linux/sched.h> 10 #include <linux/string.h> 11 12 #include <asm/segment.h> 13 14 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 15 16 #ifndef KERNEL_DS 17 #define KERNEL_DS MAKE_MM_SEG(~0UL) 18 #endif 19 20 #ifndef USER_DS 21 #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) 22 #endif 23 24 #ifndef get_fs 25 #define get_ds() (KERNEL_DS) 26 #define get_fs() (current_thread_info()->addr_limit) 27 28 static inline void set_fs(mm_segment_t fs) 29 { 30 current_thread_info()->addr_limit = fs; 31 } 32 #endif 33 34 #ifndef segment_eq 35 #define segment_eq(a, b) ((a).seg == (b).seg) 36 #endif 37 38 #define VERIFY_READ 0 39 #define VERIFY_WRITE 1 40 41 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size)) 42 43 /* 44 * The architecture should really override this if possible, at least 45 * doing a check on the get_fs() 46 */ 47 #ifndef __access_ok 48 static inline int __access_ok(unsigned long addr, unsigned long size) 49 { 50 return 1; 51 } 52 #endif 53 54 /* 55 * The exception table consists of pairs of addresses: the first is the 56 * address of an instruction that is allowed to fault, and the second is 57 * the address at which the program should continue. No registers are 58 * modified, so it is entirely up to the continuation code to figure out 59 * what to do. 60 * 61 * All the routines below use bits of fixup code that are out of line 62 * with the main instruction path. This means when everything is well, 63 * we don't even have to jump over them. Further, they do not intrude 64 * on our cache or tlb entries. 65 */ 66 67 struct exception_table_entry 68 { 69 unsigned long insn, fixup; 70 }; 71 72 /* Returns 0 if exception not found and fixup otherwise. */ 73 extern unsigned long search_exception_table(unsigned long); 74 75 76 /* 77 * architectures with an MMU should override these two 78 */ 79 #ifndef __copy_from_user 80 static inline __must_check long __copy_from_user(void *to, 81 const void __user * from, unsigned long n) 82 { 83 if (__builtin_constant_p(n)) { 84 switch(n) { 85 case 1: 86 *(u8 *)to = *(u8 __force *)from; 87 return 0; 88 case 2: 89 *(u16 *)to = *(u16 __force *)from; 90 return 0; 91 case 4: 92 *(u32 *)to = *(u32 __force *)from; 93 return 0; 94 #ifdef CONFIG_64BIT 95 case 8: 96 *(u64 *)to = *(u64 __force *)from; 97 return 0; 98 #endif 99 default: 100 break; 101 } 102 } 103 104 memcpy(to, (const void __force *)from, n); 105 return 0; 106 } 107 #endif 108 109 #ifndef __copy_to_user 110 static inline __must_check long __copy_to_user(void __user *to, 111 const void *from, unsigned long n) 112 { 113 if (__builtin_constant_p(n)) { 114 switch(n) { 115 case 1: 116 *(u8 __force *)to = *(u8 *)from; 117 return 0; 118 case 2: 119 *(u16 __force *)to = *(u16 *)from; 120 return 0; 121 case 4: 122 *(u32 __force *)to = *(u32 *)from; 123 return 0; 124 #ifdef CONFIG_64BIT 125 case 8: 126 *(u64 __force *)to = *(u64 *)from; 127 return 0; 128 #endif 129 default: 130 break; 131 } 132 } 133 134 memcpy((void __force *)to, from, n); 135 return 0; 136 } 137 #endif 138 139 /* 140 * These are the main single-value transfer routines. They automatically 141 * use the right size if we just have the right pointer type. 142 * This version just falls back to copy_{from,to}_user, which should 143 * provide a fast-path for small values. 144 */ 145 #define __put_user(x, ptr) \ 146 ({ \ 147 __typeof__(*(ptr)) __x = (x); \ 148 int __pu_err = -EFAULT; \ 149 __chk_user_ptr(ptr); \ 150 switch (sizeof (*(ptr))) { \ 151 case 1: \ 152 case 2: \ 153 case 4: \ 154 case 8: \ 155 __pu_err = __put_user_fn(sizeof (*(ptr)), \ 156 ptr, &__x); \ 157 break; \ 158 default: \ 159 __put_user_bad(); \ 160 break; \ 161 } \ 162 __pu_err; \ 163 }) 164 165 #define put_user(x, ptr) \ 166 ({ \ 167 void *__p = (ptr); \ 168 might_fault(); \ 169 access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ? \ 170 __put_user((x), ((__typeof__(*(ptr)) *)__p)) : \ 171 -EFAULT; \ 172 }) 173 174 #ifndef __put_user_fn 175 176 static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 177 { 178 size = __copy_to_user(ptr, x, size); 179 return size ? -EFAULT : size; 180 } 181 182 #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) 183 184 #endif 185 186 extern int __put_user_bad(void) __attribute__((noreturn)); 187 188 #define __get_user(x, ptr) \ 189 ({ \ 190 int __gu_err = -EFAULT; \ 191 __chk_user_ptr(ptr); \ 192 switch (sizeof(*(ptr))) { \ 193 case 1: { \ 194 unsigned char __x; \ 195 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 196 ptr, &__x); \ 197 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 198 break; \ 199 }; \ 200 case 2: { \ 201 unsigned short __x; \ 202 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 203 ptr, &__x); \ 204 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 205 break; \ 206 }; \ 207 case 4: { \ 208 unsigned int __x; \ 209 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 210 ptr, &__x); \ 211 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 212 break; \ 213 }; \ 214 case 8: { \ 215 unsigned long long __x; \ 216 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 217 ptr, &__x); \ 218 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 219 break; \ 220 }; \ 221 default: \ 222 __get_user_bad(); \ 223 break; \ 224 } \ 225 __gu_err; \ 226 }) 227 228 #define get_user(x, ptr) \ 229 ({ \ 230 const void *__p = (ptr); \ 231 might_fault(); \ 232 access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \ 233 __get_user((x), (__typeof__(*(ptr)) *)__p) : \ 234 -EFAULT; \ 235 }) 236 237 #ifndef __get_user_fn 238 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) 239 { 240 size = __copy_from_user(x, ptr, size); 241 return size ? -EFAULT : size; 242 } 243 244 #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) 245 246 #endif 247 248 extern int __get_user_bad(void) __attribute__((noreturn)); 249 250 #ifndef __copy_from_user_inatomic 251 #define __copy_from_user_inatomic __copy_from_user 252 #endif 253 254 #ifndef __copy_to_user_inatomic 255 #define __copy_to_user_inatomic __copy_to_user 256 #endif 257 258 static inline long copy_from_user(void *to, 259 const void __user * from, unsigned long n) 260 { 261 might_fault(); 262 if (access_ok(VERIFY_READ, from, n)) 263 return __copy_from_user(to, from, n); 264 else 265 return n; 266 } 267 268 static inline long copy_to_user(void __user *to, 269 const void *from, unsigned long n) 270 { 271 might_fault(); 272 if (access_ok(VERIFY_WRITE, to, n)) 273 return __copy_to_user(to, from, n); 274 else 275 return n; 276 } 277 278 /* 279 * Copy a null terminated string from userspace. 280 */ 281 #ifndef __strncpy_from_user 282 static inline long 283 __strncpy_from_user(char *dst, const char __user *src, long count) 284 { 285 char *tmp; 286 strncpy(dst, (const char __force *)src, count); 287 for (tmp = dst; *tmp && count > 0; tmp++, count--) 288 ; 289 return (tmp - dst); 290 } 291 #endif 292 293 static inline long 294 strncpy_from_user(char *dst, const char __user *src, long count) 295 { 296 if (!access_ok(VERIFY_READ, src, 1)) 297 return -EFAULT; 298 return __strncpy_from_user(dst, src, count); 299 } 300 301 /* 302 * Return the size of a string (including the ending 0) 303 * 304 * Return 0 on exception, a value greater than N if too long 305 */ 306 #ifndef __strnlen_user 307 #define __strnlen_user(s, n) (strnlen((s), (n)) + 1) 308 #endif 309 310 /* 311 * Unlike strnlen, strnlen_user includes the nul terminator in 312 * its returned count. Callers should check for a returned value 313 * greater than N as an indication the string is too long. 314 */ 315 static inline long strnlen_user(const char __user *src, long n) 316 { 317 if (!access_ok(VERIFY_READ, src, 1)) 318 return 0; 319 return __strnlen_user(src, n); 320 } 321 322 static inline long strlen_user(const char __user *src) 323 { 324 return strnlen_user(src, 32767); 325 } 326 327 /* 328 * Zero Userspace 329 */ 330 #ifndef __clear_user 331 static inline __must_check unsigned long 332 __clear_user(void __user *to, unsigned long n) 333 { 334 memset((void __force *)to, 0, n); 335 return 0; 336 } 337 #endif 338 339 static inline __must_check unsigned long 340 clear_user(void __user *to, unsigned long n) 341 { 342 might_fault(); 343 if (!access_ok(VERIFY_WRITE, to, n)) 344 return n; 345 346 return __clear_user(to, n); 347 } 348 349 #endif /* __ASM_GENERIC_UACCESS_H */ 350