1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ARCH_POWERPC_UACCESS_H 3 #define _ARCH_POWERPC_UACCESS_H 4 5 #include <asm/processor.h> 6 #include <asm/page.h> 7 #include <asm/extable.h> 8 #include <asm/kup.h> 9 #include <asm/asm-compat.h> 10 11 #ifdef __powerpc64__ 12 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ 13 #define TASK_SIZE_MAX TASK_SIZE_USER64 14 #endif 15 16 #include <asm-generic/access_ok.h> 17 18 /* 19 * These are the main single-value transfer routines. They automatically 20 * use the right size if we just have the right pointer type. 21 * 22 * This gets kind of ugly. We want to return _two_ values in "get_user()" 23 * and yet we don't want to do any pointers, because that is too much 24 * of a performance impact. Thus we have a few rather ugly macros here, 25 * and hide all the ugliness from the user. 26 * 27 * The "__xxx" versions of the user access functions are versions that 28 * do not verify the address space, that must have been done previously 29 * with a separate "access_ok()" call (this is used when we do multiple 30 * accesses to the same area of user memory). 31 * 32 * As we use the same address space for kernel and user data on the 33 * PowerPC, we can just do these as direct assignments. (Of course, the 34 * exception handling means that it's no longer "just"...) 35 * 36 */ 37 #define __put_user(x, ptr) \ 38 ({ \ 39 long __pu_err; \ 40 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 41 __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x); \ 42 __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr)); \ 43 \ 44 might_fault(); \ 45 do { \ 46 __label__ __pu_failed; \ 47 \ 48 allow_write_to_user(__pu_addr, __pu_size); \ 49 __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed); \ 50 prevent_write_to_user(__pu_addr, __pu_size); \ 51 __pu_err = 0; \ 52 break; \ 53 \ 54 __pu_failed: \ 55 prevent_write_to_user(__pu_addr, __pu_size); \ 56 __pu_err = -EFAULT; \ 57 } while (0); \ 58 \ 59 __pu_err; \ 60 }) 61 62 #define put_user(x, ptr) \ 63 ({ \ 64 __typeof__(*(ptr)) __user *_pu_addr = (ptr); \ 65 \ 66 access_ok(_pu_addr, sizeof(*(ptr))) ? \ 67 __put_user(x, _pu_addr) : -EFAULT; \ 68 }) 69 70 /* 71 * We don't tell gcc that we are accessing memory, but this is OK 72 * because we do not write to any memory gcc knows about, so there 73 * are no aliasing issues. 74 */ 75 /* -mprefixed can generate offsets beyond range, fall back hack */ 76 #ifdef CONFIG_PPC_KERNEL_PREFIXED 77 #define __put_user_asm_goto(x, addr, label, op) \ 78 asm goto( \ 79 "1: " op " %0,0(%1) # put_user\n" \ 80 EX_TABLE(1b, %l2) \ 81 : \ 82 : "r" (x), "b" (addr) \ 83 : \ 84 : label) 85 #else 86 #define __put_user_asm_goto(x, addr, label, op) \ 87 asm goto( \ 88 "1: " op "%U1%X1 %0,%1 # put_user\n" \ 89 EX_TABLE(1b, %l2) \ 90 : \ 91 : "r" (x), "m<>" (*addr) \ 92 : \ 93 : label) 94 #endif 95 96 #ifdef __powerpc64__ 97 #ifdef CONFIG_PPC_KERNEL_PREFIXED 98 #define __put_user_asm2_goto(x, ptr, label) \ 99 __put_user_asm_goto(x, ptr, label, "std") 100 #else 101 #define __put_user_asm2_goto(x, addr, label) \ 102 asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \ 103 EX_TABLE(1b, %l2) \ 104 : \ 105 : "r" (x), DS_FORM_CONSTRAINT (*addr) \ 106 : \ 107 : label) 108 #endif // CONFIG_PPC_KERNEL_PREFIXED 109 #else /* __powerpc64__ */ 110 #define __put_user_asm2_goto(x, addr, label) \ 111 asm goto( \ 112 "1: stw%X1 %0, %1\n" \ 113 "2: stw%X1 %L0, %L1\n" \ 114 EX_TABLE(1b, %l2) \ 115 EX_TABLE(2b, %l2) \ 116 : \ 117 : "r" (x), "m" (*addr) \ 118 : \ 119 : label) 120 #endif /* __powerpc64__ */ 121 122 #define __put_user_size_goto(x, ptr, size, label) \ 123 do { \ 124 __typeof__(*(ptr)) __user *__pus_addr = (ptr); \ 125 \ 126 switch (size) { \ 127 case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break; \ 128 case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break; \ 129 case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break; \ 130 case 8: __put_user_asm2_goto(x, __pus_addr, label); break; \ 131 default: BUILD_BUG(); \ 132 } \ 133 } while (0) 134 135 /* 136 * This does an atomic 128 byte aligned load from userspace. 137 * Upto caller to do enable_kernel_vmx() before calling! 138 */ 139 #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \ 140 __asm__ __volatile__( \ 141 ".machine push\n" \ 142 ".machine altivec\n" \ 143 "1: lvx 0,0,%1 # get user\n" \ 144 " stvx 0,0,%2 # put kernel\n" \ 145 ".machine pop\n" \ 146 "2:\n" \ 147 ".section .fixup,\"ax\"\n" \ 148 "3: li %0,%3\n" \ 149 " b 2b\n" \ 150 ".previous\n" \ 151 EX_TABLE(1b, 3b) \ 152 : "=r" (err) \ 153 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err)) 154 155 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 156 157 /* -mprefixed can generate offsets beyond range, fall back hack */ 158 #ifdef CONFIG_PPC_KERNEL_PREFIXED 159 #define __get_user_asm_goto(x, addr, label, op) \ 160 asm_goto_output( \ 161 "1: "op" %0,0(%1) # get_user\n" \ 162 EX_TABLE(1b, %l2) \ 163 : "=r" (x) \ 164 : "b" (addr) \ 165 : \ 166 : label) 167 #else 168 #define __get_user_asm_goto(x, addr, label, op) \ 169 asm_goto_output( \ 170 "1: "op"%U1%X1 %0, %1 # get_user\n" \ 171 EX_TABLE(1b, %l2) \ 172 : "=r" (x) \ 173 : "m<>" (*addr) \ 174 : \ 175 : label) 176 #endif 177 178 #ifdef __powerpc64__ 179 #define __get_user_asm2_goto(x, addr, label) \ 180 __get_user_asm_goto(x, addr, label, "ld") 181 #else /* __powerpc64__ */ 182 #define __get_user_asm2_goto(x, addr, label) \ 183 asm_goto_output( \ 184 "1: lwz%X1 %0, %1\n" \ 185 "2: lwz%X1 %L0, %L1\n" \ 186 EX_TABLE(1b, %l2) \ 187 EX_TABLE(2b, %l2) \ 188 : "=&r" (x) \ 189 : "m" (*addr) \ 190 : \ 191 : label) 192 #endif /* __powerpc64__ */ 193 194 #define __get_user_size_goto(x, ptr, size, label) \ 195 do { \ 196 BUILD_BUG_ON(size > sizeof(x)); \ 197 switch (size) { \ 198 case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break; \ 199 case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \ 200 case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \ 201 case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label); break; \ 202 default: x = 0; BUILD_BUG(); \ 203 } \ 204 } while (0) 205 206 #define __get_user_size_allowed(x, ptr, size, retval) \ 207 do { \ 208 __label__ __gus_failed; \ 209 \ 210 __get_user_size_goto(x, ptr, size, __gus_failed); \ 211 retval = 0; \ 212 break; \ 213 __gus_failed: \ 214 x = 0; \ 215 retval = -EFAULT; \ 216 } while (0) 217 218 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ 219 220 #define __get_user_asm(x, addr, err, op) \ 221 __asm__ __volatile__( \ 222 "1: "op"%U2%X2 %1, %2 # get_user\n" \ 223 "2:\n" \ 224 ".section .fixup,\"ax\"\n" \ 225 "3: li %0,%3\n" \ 226 " li %1,0\n" \ 227 " b 2b\n" \ 228 ".previous\n" \ 229 EX_TABLE(1b, 3b) \ 230 : "=r" (err), "=r" (x) \ 231 : "m<>" (*addr), "i" (-EFAULT), "0" (err)) 232 233 #ifdef __powerpc64__ 234 #define __get_user_asm2(x, addr, err) \ 235 __get_user_asm(x, addr, err, "ld") 236 #else /* __powerpc64__ */ 237 #define __get_user_asm2(x, addr, err) \ 238 __asm__ __volatile__( \ 239 "1: lwz%X2 %1, %2\n" \ 240 "2: lwz%X2 %L1, %L2\n" \ 241 "3:\n" \ 242 ".section .fixup,\"ax\"\n" \ 243 "4: li %0,%3\n" \ 244 " li %1,0\n" \ 245 " li %1+1,0\n" \ 246 " b 3b\n" \ 247 ".previous\n" \ 248 EX_TABLE(1b, 4b) \ 249 EX_TABLE(2b, 4b) \ 250 : "=r" (err), "=&r" (x) \ 251 : "m" (*addr), "i" (-EFAULT), "0" (err)) 252 #endif /* __powerpc64__ */ 253 254 #define __get_user_size_allowed(x, ptr, size, retval) \ 255 do { \ 256 retval = 0; \ 257 BUILD_BUG_ON(size > sizeof(x)); \ 258 switch (size) { \ 259 case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \ 260 case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \ 261 case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \ 262 case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \ 263 default: x = 0; BUILD_BUG(); \ 264 } \ 265 } while (0) 266 267 #define __get_user_size_goto(x, ptr, size, label) \ 268 do { \ 269 long __gus_retval; \ 270 \ 271 __get_user_size_allowed(x, ptr, size, __gus_retval); \ 272 if (__gus_retval) \ 273 goto label; \ 274 } while (0) 275 276 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ 277 278 /* 279 * This is a type: either unsigned long, if the argument fits into 280 * that type, or otherwise unsigned long long. 281 */ 282 #define __long_type(x) \ 283 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) 284 285 #define __get_user(x, ptr) \ 286 ({ \ 287 long __gu_err; \ 288 __long_type(*(ptr)) __gu_val; \ 289 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 290 __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr)); \ 291 \ 292 might_fault(); \ 293 allow_read_from_user(__gu_addr, __gu_size); \ 294 __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \ 295 prevent_read_from_user(__gu_addr, __gu_size); \ 296 (x) = (__typeof__(*(ptr)))__gu_val; \ 297 \ 298 __gu_err; \ 299 }) 300 301 #define get_user(x, ptr) \ 302 ({ \ 303 __typeof__(*(ptr)) __user *_gu_addr = (ptr); \ 304 \ 305 access_ok(_gu_addr, sizeof(*(ptr))) ? \ 306 __get_user(x, _gu_addr) : \ 307 ((x) = (__force __typeof__(*(ptr)))0, -EFAULT); \ 308 }) 309 310 /* more complex routines */ 311 312 extern unsigned long __copy_tofrom_user(void __user *to, 313 const void __user *from, unsigned long size); 314 315 #ifdef __powerpc64__ 316 static inline unsigned long 317 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) 318 { 319 unsigned long ret; 320 321 allow_read_write_user(to, from, n); 322 ret = __copy_tofrom_user(to, from, n); 323 prevent_read_write_user(to, from, n); 324 return ret; 325 } 326 #endif /* __powerpc64__ */ 327 328 static inline unsigned long raw_copy_from_user(void *to, 329 const void __user *from, unsigned long n) 330 { 331 unsigned long ret; 332 333 allow_read_from_user(from, n); 334 ret = __copy_tofrom_user((__force void __user *)to, from, n); 335 prevent_read_from_user(from, n); 336 return ret; 337 } 338 339 static inline unsigned long 340 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 341 { 342 unsigned long ret; 343 344 allow_write_to_user(to, n); 345 ret = __copy_tofrom_user(to, (__force const void __user *)from, n); 346 prevent_write_to_user(to, n); 347 return ret; 348 } 349 350 unsigned long __arch_clear_user(void __user *addr, unsigned long size); 351 352 static inline unsigned long __clear_user(void __user *addr, unsigned long size) 353 { 354 unsigned long ret; 355 356 might_fault(); 357 allow_write_to_user(addr, size); 358 ret = __arch_clear_user(addr, size); 359 prevent_write_to_user(addr, size); 360 return ret; 361 } 362 363 static inline unsigned long clear_user(void __user *addr, unsigned long size) 364 { 365 return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size; 366 } 367 368 extern long strncpy_from_user(char *dst, const char __user *src, long count); 369 extern __must_check long strnlen_user(const char __user *str, long n); 370 371 #ifdef CONFIG_ARCH_HAS_COPY_MC 372 unsigned long __must_check 373 copy_mc_generic(void *to, const void *from, unsigned long size); 374 375 static inline unsigned long __must_check 376 copy_mc_to_kernel(void *to, const void *from, unsigned long size) 377 { 378 return copy_mc_generic(to, from, size); 379 } 380 #define copy_mc_to_kernel copy_mc_to_kernel 381 382 static inline unsigned long __must_check 383 copy_mc_to_user(void __user *to, const void *from, unsigned long n) 384 { 385 if (check_copy_size(from, n, true)) { 386 if (access_ok(to, n)) { 387 allow_write_to_user(to, n); 388 n = copy_mc_generic((void *)to, from, n); 389 prevent_write_to_user(to, n); 390 } 391 } 392 393 return n; 394 } 395 #endif 396 397 extern long __copy_from_user_flushcache(void *dst, const void __user *src, 398 unsigned size); 399 400 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) 401 { 402 if (unlikely(!access_ok(ptr, len))) 403 return false; 404 405 might_fault(); 406 407 allow_read_write_user((void __user *)ptr, ptr, len); 408 return true; 409 } 410 #define user_access_begin user_access_begin 411 #define user_access_end prevent_current_access_user 412 #define user_access_save prevent_user_access_return 413 #define user_access_restore restore_user_access 414 415 static __must_check __always_inline bool 416 user_read_access_begin(const void __user *ptr, size_t len) 417 { 418 if (unlikely(!access_ok(ptr, len))) 419 return false; 420 421 might_fault(); 422 423 allow_read_from_user(ptr, len); 424 return true; 425 } 426 #define user_read_access_begin user_read_access_begin 427 #define user_read_access_end prevent_current_read_from_user 428 429 static __must_check __always_inline bool 430 user_write_access_begin(const void __user *ptr, size_t len) 431 { 432 if (unlikely(!access_ok(ptr, len))) 433 return false; 434 435 might_fault(); 436 437 allow_write_to_user((void __user *)ptr, len); 438 return true; 439 } 440 #define user_write_access_begin user_write_access_begin 441 #define user_write_access_end prevent_current_write_to_user 442 443 #define unsafe_get_user(x, p, e) do { \ 444 __long_type(*(p)) __gu_val; \ 445 __typeof__(*(p)) __user *__gu_addr = (p); \ 446 \ 447 __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \ 448 (x) = (__typeof__(*(p)))__gu_val; \ 449 } while (0) 450 451 #define unsafe_put_user(x, p, e) \ 452 __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e) 453 454 #define unsafe_copy_from_user(d, s, l, e) \ 455 do { \ 456 u8 *_dst = (u8 *)(d); \ 457 const u8 __user *_src = (const u8 __user *)(s); \ 458 size_t _len = (l); \ 459 int _i; \ 460 \ 461 for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \ 462 unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e); \ 463 if (_len & 4) { \ 464 unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e); \ 465 _i += 4; \ 466 } \ 467 if (_len & 2) { \ 468 unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e); \ 469 _i += 2; \ 470 } \ 471 if (_len & 1) \ 472 unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e); \ 473 } while (0) 474 475 #define unsafe_copy_to_user(d, s, l, e) \ 476 do { \ 477 u8 __user *_dst = (u8 __user *)(d); \ 478 const u8 *_src = (const u8 *)(s); \ 479 size_t _len = (l); \ 480 int _i; \ 481 \ 482 for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \ 483 unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \ 484 if (_len & 4) { \ 485 unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \ 486 _i += 4; \ 487 } \ 488 if (_len & 2) { \ 489 unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \ 490 _i += 2; \ 491 } \ 492 if (_len & 1) \ 493 unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \ 494 } while (0) 495 496 #define __get_kernel_nofault(dst, src, type, err_label) \ 497 __get_user_size_goto(*((type *)(dst)), \ 498 (__force type __user *)(src), sizeof(type), err_label) 499 500 #define __put_kernel_nofault(dst, src, type, err_label) \ 501 __put_user_size_goto(*((type *)(src)), \ 502 (__force type __user *)(dst), sizeof(type), err_label) 503 504 #endif /* _ARCH_POWERPC_UACCESS_H */ 505