1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ASM_GENERIC_UACCESS_H 3 #define __ASM_GENERIC_UACCESS_H 4 5 /* 6 * User space memory access functions, these should work 7 * on any machine that has kernel and user data in the same 8 * address space, e.g. all NOMMU machines. 9 */ 10 #include <linux/string.h> 11 12 #ifdef CONFIG_UACCESS_MEMCPY 13 #include <asm/unaligned.h> 14 15 static __always_inline int 16 __get_user_fn(size_t size, const void __user *from, void *to) 17 { 18 BUILD_BUG_ON(!__builtin_constant_p(size)); 19 20 switch (size) { 21 case 1: 22 *(u8 *)to = *((u8 __force *)from); 23 return 0; 24 case 2: 25 *(u16 *)to = get_unaligned((u16 __force *)from); 26 return 0; 27 case 4: 28 *(u32 *)to = get_unaligned((u32 __force *)from); 29 return 0; 30 case 8: 31 *(u64 *)to = get_unaligned((u64 __force *)from); 32 return 0; 33 default: 34 BUILD_BUG(); 35 return 0; 36 } 37 38 } 39 #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) 40 41 static __always_inline int 42 __put_user_fn(size_t size, void __user *to, void *from) 43 { 44 BUILD_BUG_ON(!__builtin_constant_p(size)); 45 46 switch (size) { 47 case 1: 48 *(u8 __force *)to = *(u8 *)from; 49 return 0; 50 case 2: 51 put_unaligned(*(u16 *)from, (u16 __force *)to); 52 return 0; 53 case 4: 54 put_unaligned(*(u32 *)from, (u32 __force *)to); 55 return 0; 56 case 8: 57 put_unaligned(*(u64 *)from, (u64 __force *)to); 58 return 0; 59 default: 60 BUILD_BUG(); 61 return 0; 62 } 63 } 64 #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) 65 66 #define __get_kernel_nofault(dst, src, type, err_label) \ 67 do { \ 68 *((type *)dst) = get_unaligned((type *)(src)); \ 69 if (0) /* make sure the label looks used to the compiler */ \ 70 goto err_label; \ 71 } while (0) 72 73 #define __put_kernel_nofault(dst, src, type, err_label) \ 74 do { \ 75 put_unaligned(*((type *)src), (type *)(dst)); \ 76 if (0) /* make sure the label looks used to the compiler */ \ 77 goto err_label; \ 78 } while (0) 79 80 #define HAVE_GET_KERNEL_NOFAULT 1 81 82 static inline __must_check unsigned long 83 raw_copy_from_user(void *to, const void __user * from, unsigned long n) 84 { 85 memcpy(to, (const void __force *)from, n); 86 return 0; 87 } 88 89 static inline __must_check unsigned long 90 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 91 { 92 memcpy((void __force *)to, from, n); 93 return 0; 94 } 95 #define INLINE_COPY_FROM_USER 96 #define INLINE_COPY_TO_USER 97 #endif /* CONFIG_UACCESS_MEMCPY */ 98 99 #ifdef CONFIG_SET_FS 100 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 101 102 #ifndef KERNEL_DS 103 #define KERNEL_DS MAKE_MM_SEG(~0UL) 104 #endif 105 106 #ifndef USER_DS 107 #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) 108 #endif 109 110 #ifndef get_fs 111 #define get_fs() (current_thread_info()->addr_limit) 112 113 static inline void set_fs(mm_segment_t fs) 114 { 115 current_thread_info()->addr_limit = fs; 116 } 117 #endif 118 119 #ifndef uaccess_kernel 120 #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) 121 #endif 122 #endif /* CONFIG_SET_FS */ 123 124 #define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) 125 126 /* 127 * The architecture should really override this if possible, at least 128 * doing a check on the get_fs() 129 */ 130 #ifndef __access_ok 131 static inline int __access_ok(unsigned long addr, unsigned long size) 132 { 133 return 1; 134 } 135 #endif 136 137 /* 138 * These are the main single-value transfer routines. They automatically 139 * use the right size if we just have the right pointer type. 140 * This version just falls back to copy_{from,to}_user, which should 141 * provide a fast-path for small values. 142 */ 143 #define __put_user(x, ptr) \ 144 ({ \ 145 __typeof__(*(ptr)) __x = (x); \ 146 int __pu_err = -EFAULT; \ 147 __chk_user_ptr(ptr); \ 148 switch (sizeof (*(ptr))) { \ 149 case 1: \ 150 case 2: \ 151 case 4: \ 152 case 8: \ 153 __pu_err = __put_user_fn(sizeof (*(ptr)), \ 154 ptr, &__x); \ 155 break; \ 156 default: \ 157 __put_user_bad(); \ 158 break; \ 159 } \ 160 __pu_err; \ 161 }) 162 163 #define put_user(x, ptr) \ 164 ({ \ 165 void __user *__p = (ptr); \ 166 might_fault(); \ 167 access_ok(__p, sizeof(*ptr)) ? \ 168 __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \ 169 -EFAULT; \ 170 }) 171 172 #ifndef __put_user_fn 173 174 static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 175 { 176 return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0; 177 } 178 179 #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) 180 181 #endif 182 183 extern int __put_user_bad(void) __attribute__((noreturn)); 184 185 #define __get_user(x, ptr) \ 186 ({ \ 187 int __gu_err = -EFAULT; \ 188 __chk_user_ptr(ptr); \ 189 switch (sizeof(*(ptr))) { \ 190 case 1: { \ 191 unsigned char __x = 0; \ 192 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 193 ptr, &__x); \ 194 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 195 break; \ 196 }; \ 197 case 2: { \ 198 unsigned short __x = 0; \ 199 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 200 ptr, &__x); \ 201 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 202 break; \ 203 }; \ 204 case 4: { \ 205 unsigned int __x = 0; \ 206 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 207 ptr, &__x); \ 208 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 209 break; \ 210 }; \ 211 case 8: { \ 212 unsigned long long __x = 0; \ 213 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 214 ptr, &__x); \ 215 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 216 break; \ 217 }; \ 218 default: \ 219 __get_user_bad(); \ 220 break; \ 221 } \ 222 __gu_err; \ 223 }) 224 225 #define get_user(x, ptr) \ 226 ({ \ 227 const void __user *__p = (ptr); \ 228 might_fault(); \ 229 access_ok(__p, sizeof(*ptr)) ? \ 230 __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\ 231 ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ 232 }) 233 234 #ifndef __get_user_fn 235 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) 236 { 237 return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0; 238 } 239 240 #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) 241 242 #endif 243 244 extern int __get_user_bad(void) __attribute__((noreturn)); 245 246 /* 247 * Copy a null terminated string from userspace. 248 */ 249 #ifndef __strncpy_from_user 250 static inline long 251 __strncpy_from_user(char *dst, const char __user *src, long count) 252 { 253 char *tmp; 254 strncpy(dst, (const char __force *)src, count); 255 for (tmp = dst; *tmp && count > 0; tmp++, count--) 256 ; 257 return (tmp - dst); 258 } 259 #endif 260 261 static inline long 262 strncpy_from_user(char *dst, const char __user *src, long count) 263 { 264 if (!access_ok(src, 1)) 265 return -EFAULT; 266 return __strncpy_from_user(dst, src, count); 267 } 268 269 /* 270 * Return the size of a string (including the ending 0) 271 * 272 * Return 0 on exception, a value greater than N if too long 273 */ 274 #ifndef __strnlen_user 275 #define __strnlen_user(s, n) (strnlen((s), (n)) + 1) 276 #endif 277 278 /* 279 * Unlike strnlen, strnlen_user includes the nul terminator in 280 * its returned count. Callers should check for a returned value 281 * greater than N as an indication the string is too long. 282 */ 283 static inline long strnlen_user(const char __user *src, long n) 284 { 285 if (!access_ok(src, 1)) 286 return 0; 287 return __strnlen_user(src, n); 288 } 289 290 /* 291 * Zero Userspace 292 */ 293 #ifndef __clear_user 294 static inline __must_check unsigned long 295 __clear_user(void __user *to, unsigned long n) 296 { 297 memset((void __force *)to, 0, n); 298 return 0; 299 } 300 #endif 301 302 static inline __must_check unsigned long 303 clear_user(void __user *to, unsigned long n) 304 { 305 might_fault(); 306 if (!access_ok(to, n)) 307 return n; 308 309 return __clear_user(to, n); 310 } 311 312 #include <asm/extable.h> 313 314 #endif /* __ASM_GENERIC_UACCESS_H */ 315