1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ARCH_POWERPC_UACCESS_H 3 #define _ARCH_POWERPC_UACCESS_H 4 5 #include <asm/ppc_asm.h> 6 #include <asm/processor.h> 7 #include <asm/page.h> 8 #include <asm/extable.h> 9 #include <asm/kup.h> 10 11 /* 12 * The fs value determines whether argument validity checking should be 13 * performed or not. If get_fs() == USER_DS, checking is performed, with 14 * get_fs() == KERNEL_DS, checking is bypassed. 15 * 16 * For historical reasons, these macros are grossly misnamed. 17 * 18 * The fs/ds values are now the highest legal address in the "segment". 19 * This simplifies the checking in the routines below. 20 */ 21 22 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 23 24 #define KERNEL_DS MAKE_MM_SEG(~0UL) 25 #ifdef __powerpc64__ 26 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ 27 #define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1) 28 #else 29 #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) 30 #endif 31 32 #define get_fs() (current->thread.addr_limit) 33 34 static inline void set_fs(mm_segment_t fs) 35 { 36 current->thread.addr_limit = fs; 37 /* On user-mode return check addr_limit (fs) is correct */ 38 set_thread_flag(TIF_FSCHECK); 39 } 40 41 #define segment_eq(a, b) ((a).seg == (b).seg) 42 43 #define user_addr_max() (get_fs().seg) 44 45 #ifdef __powerpc64__ 46 /* 47 * This check is sufficient because there is a large enough 48 * gap between user addresses and the kernel addresses 49 */ 50 #define __access_ok(addr, size, segment) \ 51 (((addr) <= (segment).seg) && ((size) <= (segment).seg)) 52 53 #else 54 55 static inline int __access_ok(unsigned long addr, unsigned long size, 56 mm_segment_t seg) 57 { 58 if (addr > seg.seg) 59 return 0; 60 return (size == 0 || size - 1 <= seg.seg - addr); 61 } 62 63 #endif 64 65 #define access_ok(addr, size) \ 66 (__chk_user_ptr(addr), \ 67 __access_ok((__force unsigned long)(addr), (size), get_fs())) 68 69 /* 70 * These are the main single-value transfer routines. They automatically 71 * use the right size if we just have the right pointer type. 72 * 73 * This gets kind of ugly. We want to return _two_ values in "get_user()" 74 * and yet we don't want to do any pointers, because that is too much 75 * of a performance impact. Thus we have a few rather ugly macros here, 76 * and hide all the ugliness from the user. 77 * 78 * The "__xxx" versions of the user access functions are versions that 79 * do not verify the address space, that must have been done previously 80 * with a separate "access_ok()" call (this is used when we do multiple 81 * accesses to the same area of user memory). 82 * 83 * As we use the same address space for kernel and user data on the 84 * PowerPC, we can just do these as direct assignments. (Of course, the 85 * exception handling means that it's no longer "just"...) 86 * 87 */ 88 #define get_user(x, ptr) \ 89 __get_user_check((x), (ptr), sizeof(*(ptr))) 90 #define put_user(x, ptr) \ 91 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 92 93 #define __get_user(x, ptr) \ 94 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true) 95 #define __put_user(x, ptr) \ 96 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true) 97 98 #define __get_user_allowed(x, ptr) \ 99 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false) 100 #define __put_user_allowed(x, ptr) \ 101 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false) 102 103 #define __get_user_inatomic(x, ptr) \ 104 __get_user_nosleep((x), (ptr), sizeof(*(ptr))) 105 #define __put_user_inatomic(x, ptr) \ 106 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 107 108 extern long __put_user_bad(void); 109 110 /* 111 * We don't tell gcc that we are accessing memory, but this is OK 112 * because we do not write to any memory gcc knows about, so there 113 * are no aliasing issues. 114 */ 115 #define __put_user_asm(x, addr, err, op) \ 116 __asm__ __volatile__( \ 117 "1: " op " %1,0(%2) # put_user\n" \ 118 "2:\n" \ 119 ".section .fixup,\"ax\"\n" \ 120 "3: li %0,%3\n" \ 121 " b 2b\n" \ 122 ".previous\n" \ 123 EX_TABLE(1b, 3b) \ 124 : "=r" (err) \ 125 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) 126 127 #ifdef __powerpc64__ 128 #define __put_user_asm2(x, ptr, retval) \ 129 __put_user_asm(x, ptr, retval, "std") 130 #else /* __powerpc64__ */ 131 #define __put_user_asm2(x, addr, err) \ 132 __asm__ __volatile__( \ 133 "1: stw %1,0(%2)\n" \ 134 "2: stw %1+1,4(%2)\n" \ 135 "3:\n" \ 136 ".section .fixup,\"ax\"\n" \ 137 "4: li %0,%3\n" \ 138 " b 3b\n" \ 139 ".previous\n" \ 140 EX_TABLE(1b, 4b) \ 141 EX_TABLE(2b, 4b) \ 142 : "=r" (err) \ 143 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) 144 #endif /* __powerpc64__ */ 145 146 #define __put_user_size_allowed(x, ptr, size, retval) \ 147 do { \ 148 retval = 0; \ 149 switch (size) { \ 150 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \ 151 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \ 152 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \ 153 case 8: __put_user_asm2(x, ptr, retval); break; \ 154 default: __put_user_bad(); \ 155 } \ 156 } while (0) 157 158 #define __put_user_size(x, ptr, size, retval) \ 159 do { \ 160 allow_write_to_user(ptr, size); \ 161 __put_user_size_allowed(x, ptr, size, retval); \ 162 prevent_write_to_user(ptr, size); \ 163 } while (0) 164 165 #define __put_user_nocheck(x, ptr, size, do_allow) \ 166 ({ \ 167 long __pu_err; \ 168 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 169 if (!is_kernel_addr((unsigned long)__pu_addr)) \ 170 might_fault(); \ 171 __chk_user_ptr(ptr); \ 172 if (do_allow) \ 173 __put_user_size((x), __pu_addr, (size), __pu_err); \ 174 else \ 175 __put_user_size_allowed((x), __pu_addr, (size), __pu_err); \ 176 __pu_err; \ 177 }) 178 179 #define __put_user_check(x, ptr, size) \ 180 ({ \ 181 long __pu_err = -EFAULT; \ 182 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 183 might_fault(); \ 184 if (access_ok(__pu_addr, size)) \ 185 __put_user_size((x), __pu_addr, (size), __pu_err); \ 186 __pu_err; \ 187 }) 188 189 #define __put_user_nosleep(x, ptr, size) \ 190 ({ \ 191 long __pu_err; \ 192 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 193 __chk_user_ptr(ptr); \ 194 __put_user_size((x), __pu_addr, (size), __pu_err); \ 195 __pu_err; \ 196 }) 197 198 199 extern long __get_user_bad(void); 200 201 /* 202 * This does an atomic 128 byte aligned load from userspace. 203 * Upto caller to do enable_kernel_vmx() before calling! 204 */ 205 #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \ 206 __asm__ __volatile__( \ 207 "1: lvx 0,0,%1 # get user\n" \ 208 " stvx 0,0,%2 # put kernel\n" \ 209 "2:\n" \ 210 ".section .fixup,\"ax\"\n" \ 211 "3: li %0,%3\n" \ 212 " b 2b\n" \ 213 ".previous\n" \ 214 EX_TABLE(1b, 3b) \ 215 : "=r" (err) \ 216 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err)) 217 218 #define __get_user_asm(x, addr, err, op) \ 219 __asm__ __volatile__( \ 220 "1: "op" %1,0(%2) # get_user\n" \ 221 "2:\n" \ 222 ".section .fixup,\"ax\"\n" \ 223 "3: li %0,%3\n" \ 224 " li %1,0\n" \ 225 " b 2b\n" \ 226 ".previous\n" \ 227 EX_TABLE(1b, 3b) \ 228 : "=r" (err), "=r" (x) \ 229 : "b" (addr), "i" (-EFAULT), "0" (err)) 230 231 #ifdef __powerpc64__ 232 #define __get_user_asm2(x, addr, err) \ 233 __get_user_asm(x, addr, err, "ld") 234 #else /* __powerpc64__ */ 235 #define __get_user_asm2(x, addr, err) \ 236 __asm__ __volatile__( \ 237 "1: lwz %1,0(%2)\n" \ 238 "2: lwz %1+1,4(%2)\n" \ 239 "3:\n" \ 240 ".section .fixup,\"ax\"\n" \ 241 "4: li %0,%3\n" \ 242 " li %1,0\n" \ 243 " li %1+1,0\n" \ 244 " b 3b\n" \ 245 ".previous\n" \ 246 EX_TABLE(1b, 4b) \ 247 EX_TABLE(2b, 4b) \ 248 : "=r" (err), "=&r" (x) \ 249 : "b" (addr), "i" (-EFAULT), "0" (err)) 250 #endif /* __powerpc64__ */ 251 252 #define __get_user_size_allowed(x, ptr, size, retval) \ 253 do { \ 254 retval = 0; \ 255 __chk_user_ptr(ptr); \ 256 if (size > sizeof(x)) \ 257 (x) = __get_user_bad(); \ 258 switch (size) { \ 259 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ 260 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ 261 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \ 262 case 8: __get_user_asm2(x, ptr, retval); break; \ 263 default: (x) = __get_user_bad(); \ 264 } \ 265 } while (0) 266 267 #define __get_user_size(x, ptr, size, retval) \ 268 do { \ 269 allow_read_from_user(ptr, size); \ 270 __get_user_size_allowed(x, ptr, size, retval); \ 271 prevent_read_from_user(ptr, size); \ 272 } while (0) 273 274 /* 275 * This is a type: either unsigned long, if the argument fits into 276 * that type, or otherwise unsigned long long. 277 */ 278 #define __long_type(x) \ 279 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 280 281 #define __get_user_nocheck(x, ptr, size, do_allow) \ 282 ({ \ 283 long __gu_err; \ 284 __long_type(*(ptr)) __gu_val; \ 285 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 286 __chk_user_ptr(ptr); \ 287 if (!is_kernel_addr((unsigned long)__gu_addr)) \ 288 might_fault(); \ 289 barrier_nospec(); \ 290 if (do_allow) \ 291 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 292 else \ 293 __get_user_size_allowed(__gu_val, __gu_addr, (size), __gu_err); \ 294 (x) = (__typeof__(*(ptr)))__gu_val; \ 295 __gu_err; \ 296 }) 297 298 #define __get_user_check(x, ptr, size) \ 299 ({ \ 300 long __gu_err = -EFAULT; \ 301 __long_type(*(ptr)) __gu_val = 0; \ 302 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 303 might_fault(); \ 304 if (access_ok(__gu_addr, (size))) { \ 305 barrier_nospec(); \ 306 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 307 } \ 308 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 309 __gu_err; \ 310 }) 311 312 #define __get_user_nosleep(x, ptr, size) \ 313 ({ \ 314 long __gu_err; \ 315 __long_type(*(ptr)) __gu_val; \ 316 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 317 __chk_user_ptr(ptr); \ 318 barrier_nospec(); \ 319 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 320 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 321 __gu_err; \ 322 }) 323 324 325 /* more complex routines */ 326 327 extern unsigned long __copy_tofrom_user(void __user *to, 328 const void __user *from, unsigned long size); 329 330 #ifdef __powerpc64__ 331 static inline unsigned long 332 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) 333 { 334 unsigned long ret; 335 336 barrier_nospec(); 337 allow_read_write_user(to, from, n); 338 ret = __copy_tofrom_user(to, from, n); 339 prevent_read_write_user(to, from, n); 340 return ret; 341 } 342 #endif /* __powerpc64__ */ 343 344 static inline unsigned long raw_copy_from_user(void *to, 345 const void __user *from, unsigned long n) 346 { 347 unsigned long ret; 348 if (__builtin_constant_p(n) && (n <= 8)) { 349 ret = 1; 350 351 switch (n) { 352 case 1: 353 barrier_nospec(); 354 __get_user_size(*(u8 *)to, from, 1, ret); 355 break; 356 case 2: 357 barrier_nospec(); 358 __get_user_size(*(u16 *)to, from, 2, ret); 359 break; 360 case 4: 361 barrier_nospec(); 362 __get_user_size(*(u32 *)to, from, 4, ret); 363 break; 364 case 8: 365 barrier_nospec(); 366 __get_user_size(*(u64 *)to, from, 8, ret); 367 break; 368 } 369 if (ret == 0) 370 return 0; 371 } 372 373 barrier_nospec(); 374 allow_read_from_user(from, n); 375 ret = __copy_tofrom_user((__force void __user *)to, from, n); 376 prevent_read_from_user(from, n); 377 return ret; 378 } 379 380 static inline unsigned long 381 raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n) 382 { 383 if (__builtin_constant_p(n) && (n <= 8)) { 384 unsigned long ret = 1; 385 386 switch (n) { 387 case 1: 388 __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret); 389 break; 390 case 2: 391 __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret); 392 break; 393 case 4: 394 __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret); 395 break; 396 case 8: 397 __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret); 398 break; 399 } 400 if (ret == 0) 401 return 0; 402 } 403 404 return __copy_tofrom_user(to, (__force const void __user *)from, n); 405 } 406 407 static inline unsigned long 408 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 409 { 410 unsigned long ret; 411 412 allow_write_to_user(to, n); 413 ret = raw_copy_to_user_allowed(to, from, n); 414 prevent_write_to_user(to, n); 415 return ret; 416 } 417 418 static __always_inline unsigned long __must_check 419 copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n) 420 { 421 if (likely(check_copy_size(from, n, true))) { 422 if (access_ok(to, n)) { 423 allow_write_to_user(to, n); 424 n = memcpy_mcsafe((void *)to, from, n); 425 prevent_write_to_user(to, n); 426 } 427 } 428 429 return n; 430 } 431 432 unsigned long __arch_clear_user(void __user *addr, unsigned long size); 433 434 static inline unsigned long clear_user(void __user *addr, unsigned long size) 435 { 436 unsigned long ret = size; 437 might_fault(); 438 if (likely(access_ok(addr, size))) { 439 allow_write_to_user(addr, size); 440 ret = __arch_clear_user(addr, size); 441 prevent_write_to_user(addr, size); 442 } 443 return ret; 444 } 445 446 static inline unsigned long __clear_user(void __user *addr, unsigned long size) 447 { 448 return clear_user(addr, size); 449 } 450 451 extern long strncpy_from_user(char *dst, const char __user *src, long count); 452 extern __must_check long strnlen_user(const char __user *str, long n); 453 454 extern long __copy_from_user_flushcache(void *dst, const void __user *src, 455 unsigned size); 456 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset, 457 size_t len); 458 459 static __must_check inline bool user_access_begin(const void __user *ptr, size_t len) 460 { 461 if (unlikely(!access_ok(ptr, len))) 462 return false; 463 allow_read_write_user((void __user *)ptr, ptr, len); 464 return true; 465 } 466 #define user_access_begin user_access_begin 467 #define user_access_end prevent_current_access_user 468 #define user_access_save prevent_user_access_return 469 #define user_access_restore restore_user_access 470 471 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) 472 #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) 473 #define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e) 474 #define unsafe_copy_to_user(d, s, l, e) \ 475 unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e) 476 477 #endif /* _ARCH_POWERPC_UACCESS_H */ 478