1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ASM_GENERIC_UACCESS_H 3 #define __ASM_GENERIC_UACCESS_H 4 5 /* 6 * User space memory access functions, these should work 7 * on any machine that has kernel and user data in the same 8 * address space, e.g. all NOMMU machines. 9 */ 10 #include <linux/string.h> 11 12 #ifdef CONFIG_UACCESS_MEMCPY 13 #include <asm/unaligned.h> 14 15 static inline int __get_user_fn(size_t size, const void __user *from, void *to) 16 { 17 BUILD_BUG_ON(!__builtin_constant_p(size)); 18 19 switch (size) { 20 case 1: 21 *(u8 *)to = get_unaligned((u8 __force *)from); 22 return 0; 23 case 2: 24 *(u16 *)to = get_unaligned((u16 __force *)from); 25 return 0; 26 case 4: 27 *(u32 *)to = get_unaligned((u32 __force *)from); 28 return 0; 29 case 8: 30 *(u64 *)to = get_unaligned((u64 __force *)from); 31 return 0; 32 default: 33 BUILD_BUG(); 34 return 0; 35 } 36 37 } 38 #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) 39 40 static inline int __put_user_fn(size_t size, void __user *to, void *from) 41 { 42 BUILD_BUG_ON(!__builtin_constant_p(size)); 43 44 switch (size) { 45 case 1: 46 put_unaligned(*(u8 *)from, (u8 __force *)to); 47 return 0; 48 case 2: 49 put_unaligned(*(u16 *)from, (u16 __force *)to); 50 return 0; 51 case 4: 52 put_unaligned(*(u32 *)from, (u32 __force *)to); 53 return 0; 54 case 8: 55 put_unaligned(*(u64 *)from, (u64 __force *)to); 56 return 0; 57 default: 58 BUILD_BUG(); 59 return 0; 60 } 61 } 62 #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) 63 64 #define __get_kernel_nofault(dst, src, type, err_label) \ 65 do { \ 66 *((type *)dst) = get_unaligned((type *)(src)); \ 67 if (0) /* make sure the label looks used to the compiler */ \ 68 goto err_label; \ 69 } while (0) 70 71 #define __put_kernel_nofault(dst, src, type, err_label) \ 72 do { \ 73 put_unaligned(*((type *)src), (type *)(dst)); \ 74 if (0) /* make sure the label looks used to the compiler */ \ 75 goto err_label; \ 76 } while (0) 77 78 #define HAVE_GET_KERNEL_NOFAULT 1 79 80 static inline __must_check unsigned long 81 raw_copy_from_user(void *to, const void __user * from, unsigned long n) 82 { 83 memcpy(to, (const void __force *)from, n); 84 return 0; 85 } 86 87 static inline __must_check unsigned long 88 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 89 { 90 memcpy((void __force *)to, from, n); 91 return 0; 92 } 93 #define INLINE_COPY_FROM_USER 94 #define INLINE_COPY_TO_USER 95 #endif /* CONFIG_UACCESS_MEMCPY */ 96 97 #ifdef CONFIG_SET_FS 98 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 99 100 #ifndef KERNEL_DS 101 #define KERNEL_DS MAKE_MM_SEG(~0UL) 102 #endif 103 104 #ifndef USER_DS 105 #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) 106 #endif 107 108 #ifndef get_fs 109 #define get_fs() (current_thread_info()->addr_limit) 110 111 static inline void set_fs(mm_segment_t fs) 112 { 113 current_thread_info()->addr_limit = fs; 114 } 115 #endif 116 117 #ifndef uaccess_kernel 118 #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) 119 #endif 120 #endif /* CONFIG_SET_FS */ 121 122 #define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) 123 124 /* 125 * The architecture should really override this if possible, at least 126 * doing a check on the get_fs() 127 */ 128 #ifndef __access_ok 129 static inline int __access_ok(unsigned long addr, unsigned long size) 130 { 131 return 1; 132 } 133 #endif 134 135 /* 136 * These are the main single-value transfer routines. They automatically 137 * use the right size if we just have the right pointer type. 138 * This version just falls back to copy_{from,to}_user, which should 139 * provide a fast-path for small values. 140 */ 141 #define __put_user(x, ptr) \ 142 ({ \ 143 __typeof__(*(ptr)) __x = (x); \ 144 int __pu_err = -EFAULT; \ 145 __chk_user_ptr(ptr); \ 146 switch (sizeof (*(ptr))) { \ 147 case 1: \ 148 case 2: \ 149 case 4: \ 150 case 8: \ 151 __pu_err = __put_user_fn(sizeof (*(ptr)), \ 152 ptr, &__x); \ 153 break; \ 154 default: \ 155 __put_user_bad(); \ 156 break; \ 157 } \ 158 __pu_err; \ 159 }) 160 161 #define put_user(x, ptr) \ 162 ({ \ 163 void __user *__p = (ptr); \ 164 might_fault(); \ 165 access_ok(__p, sizeof(*ptr)) ? \ 166 __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \ 167 -EFAULT; \ 168 }) 169 170 #ifndef __put_user_fn 171 172 static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 173 { 174 return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0; 175 } 176 177 #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) 178 179 #endif 180 181 extern int __put_user_bad(void) __attribute__((noreturn)); 182 183 #define __get_user(x, ptr) \ 184 ({ \ 185 int __gu_err = -EFAULT; \ 186 __chk_user_ptr(ptr); \ 187 switch (sizeof(*(ptr))) { \ 188 case 1: { \ 189 unsigned char __x = 0; \ 190 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 191 ptr, &__x); \ 192 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 193 break; \ 194 }; \ 195 case 2: { \ 196 unsigned short __x = 0; \ 197 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 198 ptr, &__x); \ 199 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 200 break; \ 201 }; \ 202 case 4: { \ 203 unsigned int __x = 0; \ 204 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 205 ptr, &__x); \ 206 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 207 break; \ 208 }; \ 209 case 8: { \ 210 unsigned long long __x = 0; \ 211 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 212 ptr, &__x); \ 213 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 214 break; \ 215 }; \ 216 default: \ 217 __get_user_bad(); \ 218 break; \ 219 } \ 220 __gu_err; \ 221 }) 222 223 #define get_user(x, ptr) \ 224 ({ \ 225 const void __user *__p = (ptr); \ 226 might_fault(); \ 227 access_ok(__p, sizeof(*ptr)) ? \ 228 __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\ 229 ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ 230 }) 231 232 #ifndef __get_user_fn 233 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) 234 { 235 return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0; 236 } 237 238 #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) 239 240 #endif 241 242 extern int __get_user_bad(void) __attribute__((noreturn)); 243 244 /* 245 * Copy a null terminated string from userspace. 246 */ 247 #ifndef __strncpy_from_user 248 static inline long 249 __strncpy_from_user(char *dst, const char __user *src, long count) 250 { 251 char *tmp; 252 strncpy(dst, (const char __force *)src, count); 253 for (tmp = dst; *tmp && count > 0; tmp++, count--) 254 ; 255 return (tmp - dst); 256 } 257 #endif 258 259 static inline long 260 strncpy_from_user(char *dst, const char __user *src, long count) 261 { 262 if (!access_ok(src, 1)) 263 return -EFAULT; 264 return __strncpy_from_user(dst, src, count); 265 } 266 267 /* 268 * Return the size of a string (including the ending 0) 269 * 270 * Return 0 on exception, a value greater than N if too long 271 */ 272 #ifndef __strnlen_user 273 #define __strnlen_user(s, n) (strnlen((s), (n)) + 1) 274 #endif 275 276 /* 277 * Unlike strnlen, strnlen_user includes the nul terminator in 278 * its returned count. Callers should check for a returned value 279 * greater than N as an indication the string is too long. 280 */ 281 static inline long strnlen_user(const char __user *src, long n) 282 { 283 if (!access_ok(src, 1)) 284 return 0; 285 return __strnlen_user(src, n); 286 } 287 288 /* 289 * Zero Userspace 290 */ 291 #ifndef __clear_user 292 static inline __must_check unsigned long 293 __clear_user(void __user *to, unsigned long n) 294 { 295 memset((void __force *)to, 0, n); 296 return 0; 297 } 298 #endif 299 300 static inline __must_check unsigned long 301 clear_user(void __user *to, unsigned long n) 302 { 303 might_fault(); 304 if (!access_ok(to, n)) 305 return n; 306 307 return __clear_user(to, n); 308 } 309 310 #include <asm/extable.h> 311 312 #endif /* __ASM_GENERIC_UACCESS_H */ 313