1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_UACCESS_H__ 3 #define __LINUX_UACCESS_H__ 4 5 #include <linux/instrumented.h> 6 #include <linux/sched.h> 7 #include <linux/thread_info.h> 8 9 #include <asm/uaccess.h> 10 11 /* 12 * Force the uaccess routines to be wired up for actual userspace access, 13 * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone 14 * using force_uaccess_end below. 15 */ 16 static inline mm_segment_t force_uaccess_begin(void) 17 { 18 mm_segment_t fs = get_fs(); 19 20 set_fs(USER_DS); 21 return fs; 22 } 23 24 static inline void force_uaccess_end(mm_segment_t oldfs) 25 { 26 set_fs(oldfs); 27 } 28 29 /* 30 * Architectures should provide two primitives (raw_copy_{to,from}_user()) 31 * and get rid of their private instances of copy_{to,from}_user() and 32 * __copy_{to,from}_user{,_inatomic}(). 33 * 34 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and 35 * return the amount left to copy. They should assume that access_ok() has 36 * already been checked (and succeeded); they should *not* zero-pad anything. 37 * No KASAN or object size checks either - those belong here. 38 * 39 * Both of these functions should attempt to copy size bytes starting at from 40 * into the area starting at to. They must not fetch or store anything 41 * outside of those areas. Return value must be between 0 (everything 42 * copied successfully) and size (nothing copied). 43 * 44 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting 45 * at to must become equal to the bytes fetched from the corresponding area 46 * starting at from. All data past to + size - N must be left unmodified. 47 * 48 * If copying succeeds, the return value must be 0. If some data cannot be 49 * fetched, it is permitted to copy less than had been fetched; the only 50 * hard requirement is that not storing anything at all (i.e. returning size) 51 * should happen only when nothing could be copied. In other words, you don't 52 * have to squeeze as much as possible - it is allowed, but not necessary. 53 * 54 * For raw_copy_from_user() to always points to kernel memory and no faults 55 * on store should happen. Interpretation of from is affected by set_fs(). 56 * For raw_copy_to_user() it's the other way round. 57 * 58 * Both can be inlined - it's up to architectures whether it wants to bother 59 * with that. They should not be used directly; they are used to implement 60 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) 61 * that are used instead. Out of those, __... ones are inlined. Plain 62 * copy_{to,from}_user() might or might not be inlined. If you want them 63 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. 64 * 65 * NOTE: only copy_from_user() zero-pads the destination in case of short copy. 66 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything 67 * at all; their callers absolutely must check the return value. 68 * 69 * Biarch ones should also provide raw_copy_in_user() - similar to the above, 70 * but both source and destination are __user pointers (affected by set_fs() 71 * as usual) and both source and destination can trigger faults. 72 */ 73 74 static __always_inline __must_check unsigned long 75 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 76 { 77 instrument_copy_from_user(to, from, n); 78 check_object_size(to, n, false); 79 return raw_copy_from_user(to, from, n); 80 } 81 82 static __always_inline __must_check unsigned long 83 __copy_from_user(void *to, const void __user *from, unsigned long n) 84 { 85 might_fault(); 86 instrument_copy_from_user(to, from, n); 87 check_object_size(to, n, false); 88 return raw_copy_from_user(to, from, n); 89 } 90 91 /** 92 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. 93 * @to: Destination address, in user space. 94 * @from: Source address, in kernel space. 95 * @n: Number of bytes to copy. 96 * 97 * Context: User context only. 98 * 99 * Copy data from kernel space to user space. Caller must check 100 * the specified block with access_ok() before calling this function. 101 * The caller should also make sure he pins the user space address 102 * so that we don't result in page fault and sleep. 103 */ 104 static __always_inline __must_check unsigned long 105 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 106 { 107 instrument_copy_to_user(to, from, n); 108 check_object_size(from, n, true); 109 return raw_copy_to_user(to, from, n); 110 } 111 112 static __always_inline __must_check unsigned long 113 __copy_to_user(void __user *to, const void *from, unsigned long n) 114 { 115 might_fault(); 116 instrument_copy_to_user(to, from, n); 117 check_object_size(from, n, true); 118 return raw_copy_to_user(to, from, n); 119 } 120 121 #ifdef INLINE_COPY_FROM_USER 122 static inline __must_check unsigned long 123 _copy_from_user(void *to, const void __user *from, unsigned long n) 124 { 125 unsigned long res = n; 126 might_fault(); 127 if (likely(access_ok(from, n))) { 128 instrument_copy_from_user(to, from, n); 129 res = raw_copy_from_user(to, from, n); 130 } 131 if (unlikely(res)) 132 memset(to + (n - res), 0, res); 133 return res; 134 } 135 #else 136 extern __must_check unsigned long 137 _copy_from_user(void *, const void __user *, unsigned long); 138 #endif 139 140 #ifdef INLINE_COPY_TO_USER 141 static inline __must_check unsigned long 142 _copy_to_user(void __user *to, const void *from, unsigned long n) 143 { 144 might_fault(); 145 if (access_ok(to, n)) { 146 instrument_copy_to_user(to, from, n); 147 n = raw_copy_to_user(to, from, n); 148 } 149 return n; 150 } 151 #else 152 extern __must_check unsigned long 153 _copy_to_user(void __user *, const void *, unsigned long); 154 #endif 155 156 static __always_inline unsigned long __must_check 157 copy_from_user(void *to, const void __user *from, unsigned long n) 158 { 159 if (likely(check_copy_size(to, n, false))) 160 n = _copy_from_user(to, from, n); 161 return n; 162 } 163 164 static __always_inline unsigned long __must_check 165 copy_to_user(void __user *to, const void *from, unsigned long n) 166 { 167 if (likely(check_copy_size(from, n, true))) 168 n = _copy_to_user(to, from, n); 169 return n; 170 } 171 #ifdef CONFIG_COMPAT 172 static __always_inline unsigned long __must_check 173 copy_in_user(void __user *to, const void __user *from, unsigned long n) 174 { 175 might_fault(); 176 if (access_ok(to, n) && access_ok(from, n)) 177 n = raw_copy_in_user(to, from, n); 178 return n; 179 } 180 #endif 181 182 static __always_inline void pagefault_disabled_inc(void) 183 { 184 current->pagefault_disabled++; 185 } 186 187 static __always_inline void pagefault_disabled_dec(void) 188 { 189 current->pagefault_disabled--; 190 } 191 192 /* 193 * These routines enable/disable the pagefault handler. If disabled, it will 194 * not take any locks and go straight to the fixup table. 195 * 196 * User access methods will not sleep when called from a pagefault_disabled() 197 * environment. 198 */ 199 static inline void pagefault_disable(void) 200 { 201 pagefault_disabled_inc(); 202 /* 203 * make sure to have issued the store before a pagefault 204 * can hit. 205 */ 206 barrier(); 207 } 208 209 static inline void pagefault_enable(void) 210 { 211 /* 212 * make sure to issue those last loads/stores before enabling 213 * the pagefault handler again. 214 */ 215 barrier(); 216 pagefault_disabled_dec(); 217 } 218 219 /* 220 * Is the pagefault handler disabled? If so, user access methods will not sleep. 221 */ 222 static inline bool pagefault_disabled(void) 223 { 224 return current->pagefault_disabled != 0; 225 } 226 227 /* 228 * The pagefault handler is in general disabled by pagefault_disable() or 229 * when in irq context (via in_atomic()). 230 * 231 * This function should only be used by the fault handlers. Other users should 232 * stick to pagefault_disabled(). 233 * Please NEVER use preempt_disable() to disable the fault handler. With 234 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled. 235 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT. 236 */ 237 #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) 238 239 #ifndef ARCH_HAS_NOCACHE_UACCESS 240 241 static inline __must_check unsigned long 242 __copy_from_user_inatomic_nocache(void *to, const void __user *from, 243 unsigned long n) 244 { 245 return __copy_from_user_inatomic(to, from, n); 246 } 247 248 #endif /* ARCH_HAS_NOCACHE_UACCESS */ 249 250 extern __must_check int check_zeroed_user(const void __user *from, size_t size); 251 252 /** 253 * copy_struct_from_user: copy a struct from userspace 254 * @dst: Destination address, in kernel space. This buffer must be @ksize 255 * bytes long. 256 * @ksize: Size of @dst struct. 257 * @src: Source address, in userspace. 258 * @usize: (Alleged) size of @src struct. 259 * 260 * Copies a struct from userspace to kernel space, in a way that guarantees 261 * backwards-compatibility for struct syscall arguments (as long as future 262 * struct extensions are made such that all new fields are *appended* to the 263 * old struct, and zeroed-out new fields have the same meaning as the old 264 * struct). 265 * 266 * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. 267 * The recommended usage is something like the following: 268 * 269 * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) 270 * { 271 * int err; 272 * struct foo karg = {}; 273 * 274 * if (usize > PAGE_SIZE) 275 * return -E2BIG; 276 * if (usize < FOO_SIZE_VER0) 277 * return -EINVAL; 278 * 279 * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); 280 * if (err) 281 * return err; 282 * 283 * // ... 284 * } 285 * 286 * There are three cases to consider: 287 * * If @usize == @ksize, then it's copied verbatim. 288 * * If @usize < @ksize, then the userspace has passed an old struct to a 289 * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) 290 * are to be zero-filled. 291 * * If @usize > @ksize, then the userspace has passed a new struct to an 292 * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) 293 * are checked to ensure they are zeroed, otherwise -E2BIG is returned. 294 * 295 * Returns (in all cases, some data may have been copied): 296 * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. 297 * * -EFAULT: access to userspace failed. 298 */ 299 static __always_inline __must_check int 300 copy_struct_from_user(void *dst, size_t ksize, const void __user *src, 301 size_t usize) 302 { 303 size_t size = min(ksize, usize); 304 size_t rest = max(ksize, usize) - size; 305 306 /* Deal with trailing bytes. */ 307 if (usize < ksize) { 308 memset(dst + size, 0, rest); 309 } else if (usize > ksize) { 310 int ret = check_zeroed_user(src + size, rest); 311 if (ret <= 0) 312 return ret ?: -E2BIG; 313 } 314 /* Copy the interoperable parts of the struct. */ 315 if (copy_from_user(dst, src, size)) 316 return -EFAULT; 317 return 0; 318 } 319 320 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); 321 322 long copy_from_kernel_nofault(void *dst, const void *src, size_t size); 323 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); 324 325 long copy_from_user_nofault(void *dst, const void __user *src, size_t size); 326 long notrace copy_to_user_nofault(void __user *dst, const void *src, 327 size_t size); 328 329 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, 330 long count); 331 332 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, 333 long count); 334 long strnlen_user_nofault(const void __user *unsafe_addr, long count); 335 336 /** 337 * get_kernel_nofault(): safely attempt to read from a location 338 * @val: read into this variable 339 * @ptr: address to read from 340 * 341 * Returns 0 on success, or -EFAULT. 342 */ 343 #define get_kernel_nofault(val, ptr) ({ \ 344 const typeof(val) *__gk_ptr = (ptr); \ 345 copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ 346 }) 347 348 #ifndef user_access_begin 349 #define user_access_begin(ptr,len) access_ok(ptr, len) 350 #define user_access_end() do { } while (0) 351 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) 352 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) 353 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) 354 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) 355 static inline unsigned long user_access_save(void) { return 0UL; } 356 static inline void user_access_restore(unsigned long flags) { } 357 #endif 358 #ifndef user_write_access_begin 359 #define user_write_access_begin user_access_begin 360 #define user_write_access_end user_access_end 361 #endif 362 #ifndef user_read_access_begin 363 #define user_read_access_begin user_access_begin 364 #define user_read_access_end user_access_end 365 #endif 366 367 #ifdef CONFIG_HARDENED_USERCOPY 368 void usercopy_warn(const char *name, const char *detail, bool to_user, 369 unsigned long offset, unsigned long len); 370 void __noreturn usercopy_abort(const char *name, const char *detail, 371 bool to_user, unsigned long offset, 372 unsigned long len); 373 #endif 374 375 #endif /* __LINUX_UACCESS_H__ */ 376