1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Based on arch/arm/include/asm/uaccess.h 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 */ 7 #ifndef __ASM_UACCESS_H 8 #define __ASM_UACCESS_H 9 10 #include <asm/alternative.h> 11 #include <asm/kernel-pgtable.h> 12 #include <asm/sysreg.h> 13 14 /* 15 * User space memory access functions 16 */ 17 #include <linux/bitops.h> 18 #include <linux/kasan-checks.h> 19 #include <linux/string.h> 20 21 #include <asm/cpufeature.h> 22 #include <asm/ptrace.h> 23 #include <asm/memory.h> 24 #include <asm/extable.h> 25 26 #define get_fs() (current_thread_info()->addr_limit) 27 28 static inline void set_fs(mm_segment_t fs) 29 { 30 current_thread_info()->addr_limit = fs; 31 32 /* 33 * Prevent a mispredicted conditional call to set_fs from forwarding 34 * the wrong address limit to access_ok under speculation. 35 */ 36 spec_bar(); 37 38 /* On user-mode return, check fs is correct */ 39 set_thread_flag(TIF_FSCHECK); 40 41 /* 42 * Enable/disable UAO so that copy_to_user() etc can access 43 * kernel memory with the unprivileged instructions. 44 */ 45 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS) 46 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); 47 else 48 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO, 49 CONFIG_ARM64_UAO)); 50 } 51 52 #define segment_eq(a, b) ((a) == (b)) 53 54 /* 55 * Test whether a block of memory is a valid user space address. 56 * Returns 1 if the range is valid, 0 otherwise. 57 * 58 * This is equivalent to the following test: 59 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 60 */ 61 static inline unsigned long __range_ok(const void __user *addr, unsigned long size) 62 { 63 unsigned long ret, limit = current_thread_info()->addr_limit; 64 65 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && 66 test_thread_flag(TIF_TAGGED_ADDR)) 67 addr = untagged_addr(addr); 68 69 __chk_user_ptr(addr); 70 asm volatile( 71 // A + B <= C + 1 for all A,B,C, in four easy steps: 72 // 1: X = A + B; X' = X % 2^64 73 " adds %0, %3, %2\n" 74 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 75 " csel %1, xzr, %1, hi\n" 76 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' 77 // to compensate for the carry flag being set in step 4. For 78 // X > 2^64, X' merely has to remain nonzero, which it does. 79 " csinv %0, %0, xzr, cc\n" 80 // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 81 // comes from the carry in being clear. Otherwise, we are 82 // testing X' - C == 0, subject to the previous adjustments. 83 " sbcs xzr, %0, %1\n" 84 " cset %0, ls\n" 85 : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc"); 86 87 return ret; 88 } 89 90 #define access_ok(addr, size) __range_ok(addr, size) 91 #define user_addr_max get_fs 92 93 #define _ASM_EXTABLE(from, to) \ 94 " .pushsection __ex_table, \"a\"\n" \ 95 " .align 3\n" \ 96 " .long (" #from " - .), (" #to " - .)\n" \ 97 " .popsection\n" 98 99 /* 100 * User access enabling/disabling. 101 */ 102 #ifdef CONFIG_ARM64_SW_TTBR0_PAN 103 static inline void __uaccess_ttbr0_disable(void) 104 { 105 unsigned long flags, ttbr; 106 107 local_irq_save(flags); 108 ttbr = read_sysreg(ttbr1_el1); 109 ttbr &= ~TTBR_ASID_MASK; 110 /* reserved_ttbr0 placed before swapper_pg_dir */ 111 write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1); 112 isb(); 113 /* Set reserved ASID */ 114 write_sysreg(ttbr, ttbr1_el1); 115 isb(); 116 local_irq_restore(flags); 117 } 118 119 static inline void __uaccess_ttbr0_enable(void) 120 { 121 unsigned long flags, ttbr0, ttbr1; 122 123 /* 124 * Disable interrupts to avoid preemption between reading the 'ttbr0' 125 * variable and the MSR. A context switch could trigger an ASID 126 * roll-over and an update of 'ttbr0'. 127 */ 128 local_irq_save(flags); 129 ttbr0 = READ_ONCE(current_thread_info()->ttbr0); 130 131 /* Restore active ASID */ 132 ttbr1 = read_sysreg(ttbr1_el1); 133 ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ 134 ttbr1 |= ttbr0 & TTBR_ASID_MASK; 135 write_sysreg(ttbr1, ttbr1_el1); 136 isb(); 137 138 /* Restore user page table */ 139 write_sysreg(ttbr0, ttbr0_el1); 140 isb(); 141 local_irq_restore(flags); 142 } 143 144 static inline bool uaccess_ttbr0_disable(void) 145 { 146 if (!system_uses_ttbr0_pan()) 147 return false; 148 __uaccess_ttbr0_disable(); 149 return true; 150 } 151 152 static inline bool uaccess_ttbr0_enable(void) 153 { 154 if (!system_uses_ttbr0_pan()) 155 return false; 156 __uaccess_ttbr0_enable(); 157 return true; 158 } 159 #else 160 static inline bool uaccess_ttbr0_disable(void) 161 { 162 return false; 163 } 164 165 static inline bool uaccess_ttbr0_enable(void) 166 { 167 return false; 168 } 169 #endif 170 171 static inline void __uaccess_disable_hw_pan(void) 172 { 173 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, 174 CONFIG_ARM64_PAN)); 175 } 176 177 static inline void __uaccess_enable_hw_pan(void) 178 { 179 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, 180 CONFIG_ARM64_PAN)); 181 } 182 183 #define __uaccess_disable(alt) \ 184 do { \ 185 if (!uaccess_ttbr0_disable()) \ 186 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ 187 CONFIG_ARM64_PAN)); \ 188 } while (0) 189 190 #define __uaccess_enable(alt) \ 191 do { \ 192 if (!uaccess_ttbr0_enable()) \ 193 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ 194 CONFIG_ARM64_PAN)); \ 195 } while (0) 196 197 static inline void uaccess_disable(void) 198 { 199 __uaccess_disable(ARM64_HAS_PAN); 200 } 201 202 static inline void uaccess_enable(void) 203 { 204 __uaccess_enable(ARM64_HAS_PAN); 205 } 206 207 /* 208 * These functions are no-ops when UAO is present. 209 */ 210 static inline void uaccess_disable_not_uao(void) 211 { 212 __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); 213 } 214 215 static inline void uaccess_enable_not_uao(void) 216 { 217 __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); 218 } 219 220 /* 221 * Sanitise a uaccess pointer such that it becomes NULL if above the 222 * current addr_limit. In case the pointer is tagged (has the top byte set), 223 * untag the pointer before checking. 224 */ 225 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) 226 static inline void __user *__uaccess_mask_ptr(const void __user *ptr) 227 { 228 void __user *safe_ptr; 229 230 asm volatile( 231 " bics xzr, %3, %2\n" 232 " csel %0, %1, xzr, eq\n" 233 : "=&r" (safe_ptr) 234 : "r" (ptr), "r" (current_thread_info()->addr_limit), 235 "r" (untagged_addr(ptr)) 236 : "cc"); 237 238 csdb(); 239 return safe_ptr; 240 } 241 242 /* 243 * The "__xxx" versions of the user access functions do not verify the address 244 * space - it must have been done previously with a separate "access_ok()" 245 * call. 246 * 247 * The "__xxx_error" versions set the third argument to -EFAULT if an error 248 * occurs, and leave it unchanged on success. 249 */ 250 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 251 asm volatile( \ 252 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 253 alt_instr " " reg "1, [%2]\n", feature) \ 254 "2:\n" \ 255 " .section .fixup, \"ax\"\n" \ 256 " .align 2\n" \ 257 "3: mov %w0, %3\n" \ 258 " mov %1, #0\n" \ 259 " b 2b\n" \ 260 " .previous\n" \ 261 _ASM_EXTABLE(1b, 3b) \ 262 : "+r" (err), "=&r" (x) \ 263 : "r" (addr), "i" (-EFAULT)) 264 265 #define __raw_get_user(x, ptr, err) \ 266 do { \ 267 unsigned long __gu_val; \ 268 __chk_user_ptr(ptr); \ 269 uaccess_enable_not_uao(); \ 270 switch (sizeof(*(ptr))) { \ 271 case 1: \ 272 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ 273 (err), ARM64_HAS_UAO); \ 274 break; \ 275 case 2: \ 276 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ 277 (err), ARM64_HAS_UAO); \ 278 break; \ 279 case 4: \ 280 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ 281 (err), ARM64_HAS_UAO); \ 282 break; \ 283 case 8: \ 284 __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ 285 (err), ARM64_HAS_UAO); \ 286 break; \ 287 default: \ 288 BUILD_BUG(); \ 289 } \ 290 uaccess_disable_not_uao(); \ 291 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 292 } while (0) 293 294 #define __get_user_error(x, ptr, err) \ 295 do { \ 296 __typeof__(*(ptr)) __user *__p = (ptr); \ 297 might_fault(); \ 298 if (access_ok(__p, sizeof(*__p))) { \ 299 __p = uaccess_mask_ptr(__p); \ 300 __raw_get_user((x), __p, (err)); \ 301 } else { \ 302 (x) = 0; (err) = -EFAULT; \ 303 } \ 304 } while (0) 305 306 #define __get_user(x, ptr) \ 307 ({ \ 308 int __gu_err = 0; \ 309 __get_user_error((x), (ptr), __gu_err); \ 310 __gu_err; \ 311 }) 312 313 #define get_user __get_user 314 315 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 316 asm volatile( \ 317 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 318 alt_instr " " reg "1, [%2]\n", feature) \ 319 "2:\n" \ 320 " .section .fixup,\"ax\"\n" \ 321 " .align 2\n" \ 322 "3: mov %w0, %3\n" \ 323 " b 2b\n" \ 324 " .previous\n" \ 325 _ASM_EXTABLE(1b, 3b) \ 326 : "+r" (err) \ 327 : "r" (x), "r" (addr), "i" (-EFAULT)) 328 329 #define __raw_put_user(x, ptr, err) \ 330 do { \ 331 __typeof__(*(ptr)) __pu_val = (x); \ 332 __chk_user_ptr(ptr); \ 333 uaccess_enable_not_uao(); \ 334 switch (sizeof(*(ptr))) { \ 335 case 1: \ 336 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ 337 (err), ARM64_HAS_UAO); \ 338 break; \ 339 case 2: \ 340 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ 341 (err), ARM64_HAS_UAO); \ 342 break; \ 343 case 4: \ 344 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ 345 (err), ARM64_HAS_UAO); \ 346 break; \ 347 case 8: \ 348 __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ 349 (err), ARM64_HAS_UAO); \ 350 break; \ 351 default: \ 352 BUILD_BUG(); \ 353 } \ 354 uaccess_disable_not_uao(); \ 355 } while (0) 356 357 #define __put_user_error(x, ptr, err) \ 358 do { \ 359 __typeof__(*(ptr)) __user *__p = (ptr); \ 360 might_fault(); \ 361 if (access_ok(__p, sizeof(*__p))) { \ 362 __p = uaccess_mask_ptr(__p); \ 363 __raw_put_user((x), __p, (err)); \ 364 } else { \ 365 (err) = -EFAULT; \ 366 } \ 367 } while (0) 368 369 #define __put_user(x, ptr) \ 370 ({ \ 371 int __pu_err = 0; \ 372 __put_user_error((x), (ptr), __pu_err); \ 373 __pu_err; \ 374 }) 375 376 #define put_user __put_user 377 378 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); 379 #define raw_copy_from_user(to, from, n) \ 380 ({ \ 381 __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ 382 }) 383 384 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); 385 #define raw_copy_to_user(to, from, n) \ 386 ({ \ 387 __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ 388 }) 389 390 extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); 391 #define raw_copy_in_user(to, from, n) \ 392 ({ \ 393 __arch_copy_in_user(__uaccess_mask_ptr(to), \ 394 __uaccess_mask_ptr(from), (n)); \ 395 }) 396 397 #define INLINE_COPY_TO_USER 398 #define INLINE_COPY_FROM_USER 399 400 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); 401 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) 402 { 403 if (access_ok(to, n)) 404 n = __arch_clear_user(__uaccess_mask_ptr(to), n); 405 return n; 406 } 407 #define clear_user __clear_user 408 409 extern long strncpy_from_user(char *dest, const char __user *src, long count); 410 411 extern __must_check long strnlen_user(const char __user *str, long n); 412 413 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 414 struct page; 415 void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); 416 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); 417 418 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) 419 { 420 kasan_check_write(dst, size); 421 return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); 422 } 423 #endif 424 425 #endif /* __ASM_UACCESS_H */ 426