1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Based on arch/arm/include/asm/uaccess.h 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 */ 7 #ifndef __ASM_UACCESS_H 8 #define __ASM_UACCESS_H 9 10 #include <asm/alternative.h> 11 #include <asm/kernel-pgtable.h> 12 #include <asm/sysreg.h> 13 14 /* 15 * User space memory access functions 16 */ 17 #include <linux/bitops.h> 18 #include <linux/kasan-checks.h> 19 #include <linux/string.h> 20 21 #include <asm/cpufeature.h> 22 #include <asm/ptrace.h> 23 #include <asm/memory.h> 24 #include <asm/extable.h> 25 26 #define get_fs() (current_thread_info()->addr_limit) 27 28 static inline void set_fs(mm_segment_t fs) 29 { 30 current_thread_info()->addr_limit = fs; 31 32 /* 33 * Prevent a mispredicted conditional call to set_fs from forwarding 34 * the wrong address limit to access_ok under speculation. 35 */ 36 spec_bar(); 37 38 /* On user-mode return, check fs is correct */ 39 set_thread_flag(TIF_FSCHECK); 40 41 /* 42 * Enable/disable UAO so that copy_to_user() etc can access 43 * kernel memory with the unprivileged instructions. 44 */ 45 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS) 46 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); 47 else 48 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO, 49 CONFIG_ARM64_UAO)); 50 } 51 52 #define segment_eq(a, b) ((a) == (b)) 53 54 /* 55 * Test whether a block of memory is a valid user space address. 56 * Returns 1 if the range is valid, 0 otherwise. 57 * 58 * This is equivalent to the following test: 59 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 60 */ 61 static inline unsigned long __range_ok(const void __user *addr, unsigned long size) 62 { 63 unsigned long ret, limit = current_thread_info()->addr_limit; 64 65 /* 66 * Asynchronous I/O running in a kernel thread does not have the 67 * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag 68 * the user address before checking. 69 */ 70 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && 71 (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR))) 72 addr = untagged_addr(addr); 73 74 __chk_user_ptr(addr); 75 asm volatile( 76 // A + B <= C + 1 for all A,B,C, in four easy steps: 77 // 1: X = A + B; X' = X % 2^64 78 " adds %0, %3, %2\n" 79 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 80 " csel %1, xzr, %1, hi\n" 81 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' 82 // to compensate for the carry flag being set in step 4. For 83 // X > 2^64, X' merely has to remain nonzero, which it does. 84 " csinv %0, %0, xzr, cc\n" 85 // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 86 // comes from the carry in being clear. Otherwise, we are 87 // testing X' - C == 0, subject to the previous adjustments. 88 " sbcs xzr, %0, %1\n" 89 " cset %0, ls\n" 90 : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc"); 91 92 return ret; 93 } 94 95 #define access_ok(addr, size) __range_ok(addr, size) 96 #define user_addr_max get_fs 97 98 #define _ASM_EXTABLE(from, to) \ 99 " .pushsection __ex_table, \"a\"\n" \ 100 " .align 3\n" \ 101 " .long (" #from " - .), (" #to " - .)\n" \ 102 " .popsection\n" 103 104 /* 105 * User access enabling/disabling. 106 */ 107 #ifdef CONFIG_ARM64_SW_TTBR0_PAN 108 static inline void __uaccess_ttbr0_disable(void) 109 { 110 unsigned long flags, ttbr; 111 112 local_irq_save(flags); 113 ttbr = read_sysreg(ttbr1_el1); 114 ttbr &= ~TTBR_ASID_MASK; 115 /* reserved_ttbr0 placed before swapper_pg_dir */ 116 write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1); 117 isb(); 118 /* Set reserved ASID */ 119 write_sysreg(ttbr, ttbr1_el1); 120 isb(); 121 local_irq_restore(flags); 122 } 123 124 static inline void __uaccess_ttbr0_enable(void) 125 { 126 unsigned long flags, ttbr0, ttbr1; 127 128 /* 129 * Disable interrupts to avoid preemption between reading the 'ttbr0' 130 * variable and the MSR. A context switch could trigger an ASID 131 * roll-over and an update of 'ttbr0'. 132 */ 133 local_irq_save(flags); 134 ttbr0 = READ_ONCE(current_thread_info()->ttbr0); 135 136 /* Restore active ASID */ 137 ttbr1 = read_sysreg(ttbr1_el1); 138 ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ 139 ttbr1 |= ttbr0 & TTBR_ASID_MASK; 140 write_sysreg(ttbr1, ttbr1_el1); 141 isb(); 142 143 /* Restore user page table */ 144 write_sysreg(ttbr0, ttbr0_el1); 145 isb(); 146 local_irq_restore(flags); 147 } 148 149 static inline bool uaccess_ttbr0_disable(void) 150 { 151 if (!system_uses_ttbr0_pan()) 152 return false; 153 __uaccess_ttbr0_disable(); 154 return true; 155 } 156 157 static inline bool uaccess_ttbr0_enable(void) 158 { 159 if (!system_uses_ttbr0_pan()) 160 return false; 161 __uaccess_ttbr0_enable(); 162 return true; 163 } 164 #else 165 static inline bool uaccess_ttbr0_disable(void) 166 { 167 return false; 168 } 169 170 static inline bool uaccess_ttbr0_enable(void) 171 { 172 return false; 173 } 174 #endif 175 176 static inline void __uaccess_disable_hw_pan(void) 177 { 178 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, 179 CONFIG_ARM64_PAN)); 180 } 181 182 static inline void __uaccess_enable_hw_pan(void) 183 { 184 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, 185 CONFIG_ARM64_PAN)); 186 } 187 188 #define __uaccess_disable(alt) \ 189 do { \ 190 if (!uaccess_ttbr0_disable()) \ 191 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ 192 CONFIG_ARM64_PAN)); \ 193 } while (0) 194 195 #define __uaccess_enable(alt) \ 196 do { \ 197 if (!uaccess_ttbr0_enable()) \ 198 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ 199 CONFIG_ARM64_PAN)); \ 200 } while (0) 201 202 static inline void uaccess_disable(void) 203 { 204 __uaccess_disable(ARM64_HAS_PAN); 205 } 206 207 static inline void uaccess_enable(void) 208 { 209 __uaccess_enable(ARM64_HAS_PAN); 210 } 211 212 /* 213 * These functions are no-ops when UAO is present. 214 */ 215 static inline void uaccess_disable_not_uao(void) 216 { 217 __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); 218 } 219 220 static inline void uaccess_enable_not_uao(void) 221 { 222 __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); 223 } 224 225 /* 226 * Sanitise a uaccess pointer such that it becomes NULL if above the 227 * current addr_limit. In case the pointer is tagged (has the top byte set), 228 * untag the pointer before checking. 229 */ 230 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) 231 static inline void __user *__uaccess_mask_ptr(const void __user *ptr) 232 { 233 void __user *safe_ptr; 234 235 asm volatile( 236 " bics xzr, %3, %2\n" 237 " csel %0, %1, xzr, eq\n" 238 : "=&r" (safe_ptr) 239 : "r" (ptr), "r" (current_thread_info()->addr_limit), 240 "r" (untagged_addr(ptr)) 241 : "cc"); 242 243 csdb(); 244 return safe_ptr; 245 } 246 247 /* 248 * The "__xxx" versions of the user access functions do not verify the address 249 * space - it must have been done previously with a separate "access_ok()" 250 * call. 251 * 252 * The "__xxx_error" versions set the third argument to -EFAULT if an error 253 * occurs, and leave it unchanged on success. 254 */ 255 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 256 asm volatile( \ 257 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 258 alt_instr " " reg "1, [%2]\n", feature) \ 259 "2:\n" \ 260 " .section .fixup, \"ax\"\n" \ 261 " .align 2\n" \ 262 "3: mov %w0, %3\n" \ 263 " mov %1, #0\n" \ 264 " b 2b\n" \ 265 " .previous\n" \ 266 _ASM_EXTABLE(1b, 3b) \ 267 : "+r" (err), "=&r" (x) \ 268 : "r" (addr), "i" (-EFAULT)) 269 270 #define __raw_get_user(x, ptr, err) \ 271 do { \ 272 unsigned long __gu_val; \ 273 __chk_user_ptr(ptr); \ 274 uaccess_enable_not_uao(); \ 275 switch (sizeof(*(ptr))) { \ 276 case 1: \ 277 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ 278 (err), ARM64_HAS_UAO); \ 279 break; \ 280 case 2: \ 281 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ 282 (err), ARM64_HAS_UAO); \ 283 break; \ 284 case 4: \ 285 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ 286 (err), ARM64_HAS_UAO); \ 287 break; \ 288 case 8: \ 289 __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ 290 (err), ARM64_HAS_UAO); \ 291 break; \ 292 default: \ 293 BUILD_BUG(); \ 294 } \ 295 uaccess_disable_not_uao(); \ 296 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 297 } while (0) 298 299 #define __get_user_error(x, ptr, err) \ 300 do { \ 301 __typeof__(*(ptr)) __user *__p = (ptr); \ 302 might_fault(); \ 303 if (access_ok(__p, sizeof(*__p))) { \ 304 __p = uaccess_mask_ptr(__p); \ 305 __raw_get_user((x), __p, (err)); \ 306 } else { \ 307 (x) = (__force __typeof__(x))0; (err) = -EFAULT; \ 308 } \ 309 } while (0) 310 311 #define __get_user(x, ptr) \ 312 ({ \ 313 int __gu_err = 0; \ 314 __get_user_error((x), (ptr), __gu_err); \ 315 __gu_err; \ 316 }) 317 318 #define get_user __get_user 319 320 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 321 asm volatile( \ 322 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 323 alt_instr " " reg "1, [%2]\n", feature) \ 324 "2:\n" \ 325 " .section .fixup,\"ax\"\n" \ 326 " .align 2\n" \ 327 "3: mov %w0, %3\n" \ 328 " b 2b\n" \ 329 " .previous\n" \ 330 _ASM_EXTABLE(1b, 3b) \ 331 : "+r" (err) \ 332 : "r" (x), "r" (addr), "i" (-EFAULT)) 333 334 #define __raw_put_user(x, ptr, err) \ 335 do { \ 336 __typeof__(*(ptr)) __pu_val = (x); \ 337 __chk_user_ptr(ptr); \ 338 uaccess_enable_not_uao(); \ 339 switch (sizeof(*(ptr))) { \ 340 case 1: \ 341 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ 342 (err), ARM64_HAS_UAO); \ 343 break; \ 344 case 2: \ 345 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ 346 (err), ARM64_HAS_UAO); \ 347 break; \ 348 case 4: \ 349 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ 350 (err), ARM64_HAS_UAO); \ 351 break; \ 352 case 8: \ 353 __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ 354 (err), ARM64_HAS_UAO); \ 355 break; \ 356 default: \ 357 BUILD_BUG(); \ 358 } \ 359 uaccess_disable_not_uao(); \ 360 } while (0) 361 362 #define __put_user_error(x, ptr, err) \ 363 do { \ 364 __typeof__(*(ptr)) __user *__p = (ptr); \ 365 might_fault(); \ 366 if (access_ok(__p, sizeof(*__p))) { \ 367 __p = uaccess_mask_ptr(__p); \ 368 __raw_put_user((x), __p, (err)); \ 369 } else { \ 370 (err) = -EFAULT; \ 371 } \ 372 } while (0) 373 374 #define __put_user(x, ptr) \ 375 ({ \ 376 int __pu_err = 0; \ 377 __put_user_error((x), (ptr), __pu_err); \ 378 __pu_err; \ 379 }) 380 381 #define put_user __put_user 382 383 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); 384 #define raw_copy_from_user(to, from, n) \ 385 ({ \ 386 unsigned long __acfu_ret; \ 387 uaccess_enable_not_uao(); \ 388 __acfu_ret = __arch_copy_from_user((to), \ 389 __uaccess_mask_ptr(from), (n)); \ 390 uaccess_disable_not_uao(); \ 391 __acfu_ret; \ 392 }) 393 394 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); 395 #define raw_copy_to_user(to, from, n) \ 396 ({ \ 397 unsigned long __actu_ret; \ 398 uaccess_enable_not_uao(); \ 399 __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \ 400 (from), (n)); \ 401 uaccess_disable_not_uao(); \ 402 __actu_ret; \ 403 }) 404 405 extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); 406 #define raw_copy_in_user(to, from, n) \ 407 ({ \ 408 unsigned long __aciu_ret; \ 409 uaccess_enable_not_uao(); \ 410 __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \ 411 __uaccess_mask_ptr(from), (n)); \ 412 uaccess_disable_not_uao(); \ 413 __aciu_ret; \ 414 }) 415 416 #define INLINE_COPY_TO_USER 417 #define INLINE_COPY_FROM_USER 418 419 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); 420 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) 421 { 422 if (access_ok(to, n)) { 423 uaccess_enable_not_uao(); 424 n = __arch_clear_user(__uaccess_mask_ptr(to), n); 425 uaccess_disable_not_uao(); 426 } 427 return n; 428 } 429 #define clear_user __clear_user 430 431 extern long strncpy_from_user(char *dest, const char __user *src, long count); 432 433 extern __must_check long strnlen_user(const char __user *str, long n); 434 435 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 436 struct page; 437 void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); 438 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); 439 440 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) 441 { 442 kasan_check_write(dst, size); 443 return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); 444 } 445 #endif 446 447 #endif /* __ASM_UACCESS_H */ 448