1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Based on arch/arm/include/asm/uaccess.h 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 */ 7 #ifndef __ASM_UACCESS_H 8 #define __ASM_UACCESS_H 9 10 #include <asm/alternative.h> 11 #include <asm/kernel-pgtable.h> 12 #include <asm/sysreg.h> 13 14 /* 15 * User space memory access functions 16 */ 17 #include <linux/bitops.h> 18 #include <linux/kasan-checks.h> 19 #include <linux/string.h> 20 21 #include <asm/cpufeature.h> 22 #include <asm/mmu.h> 23 #include <asm/ptrace.h> 24 #include <asm/memory.h> 25 #include <asm/extable.h> 26 27 #define HAVE_GET_KERNEL_NOFAULT 28 29 /* 30 * Test whether a block of memory is a valid user space address. 31 * Returns 1 if the range is valid, 0 otherwise. 32 * 33 * This is equivalent to the following test: 34 * (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX 35 */ 36 static inline unsigned long __range_ok(const void __user *addr, unsigned long size) 37 { 38 unsigned long ret, limit = TASK_SIZE_MAX - 1; 39 40 /* 41 * Asynchronous I/O running in a kernel thread does not have the 42 * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag 43 * the user address before checking. 44 */ 45 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && 46 (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR))) 47 addr = untagged_addr(addr); 48 49 __chk_user_ptr(addr); 50 asm volatile( 51 // A + B <= C + 1 for all A,B,C, in four easy steps: 52 // 1: X = A + B; X' = X % 2^64 53 " adds %0, %3, %2\n" 54 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 55 " csel %1, xzr, %1, hi\n" 56 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' 57 // to compensate for the carry flag being set in step 4. For 58 // X > 2^64, X' merely has to remain nonzero, which it does. 59 " csinv %0, %0, xzr, cc\n" 60 // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 61 // comes from the carry in being clear. Otherwise, we are 62 // testing X' - C == 0, subject to the previous adjustments. 63 " sbcs xzr, %0, %1\n" 64 " cset %0, ls\n" 65 : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc"); 66 67 return ret; 68 } 69 70 #define access_ok(addr, size) __range_ok(addr, size) 71 72 #define _ASM_EXTABLE(from, to) \ 73 " .pushsection __ex_table, \"a\"\n" \ 74 " .align 3\n" \ 75 " .long (" #from " - .), (" #to " - .)\n" \ 76 " .popsection\n" 77 78 /* 79 * User access enabling/disabling. 80 */ 81 #ifdef CONFIG_ARM64_SW_TTBR0_PAN 82 static inline void __uaccess_ttbr0_disable(void) 83 { 84 unsigned long flags, ttbr; 85 86 local_irq_save(flags); 87 ttbr = read_sysreg(ttbr1_el1); 88 ttbr &= ~TTBR_ASID_MASK; 89 /* reserved_pg_dir placed before swapper_pg_dir */ 90 write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1); 91 isb(); 92 /* Set reserved ASID */ 93 write_sysreg(ttbr, ttbr1_el1); 94 isb(); 95 local_irq_restore(flags); 96 } 97 98 static inline void __uaccess_ttbr0_enable(void) 99 { 100 unsigned long flags, ttbr0, ttbr1; 101 102 /* 103 * Disable interrupts to avoid preemption between reading the 'ttbr0' 104 * variable and the MSR. A context switch could trigger an ASID 105 * roll-over and an update of 'ttbr0'. 106 */ 107 local_irq_save(flags); 108 ttbr0 = READ_ONCE(current_thread_info()->ttbr0); 109 110 /* Restore active ASID */ 111 ttbr1 = read_sysreg(ttbr1_el1); 112 ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ 113 ttbr1 |= ttbr0 & TTBR_ASID_MASK; 114 write_sysreg(ttbr1, ttbr1_el1); 115 isb(); 116 117 /* Restore user page table */ 118 write_sysreg(ttbr0, ttbr0_el1); 119 isb(); 120 local_irq_restore(flags); 121 } 122 123 static inline bool uaccess_ttbr0_disable(void) 124 { 125 if (!system_uses_ttbr0_pan()) 126 return false; 127 __uaccess_ttbr0_disable(); 128 return true; 129 } 130 131 static inline bool uaccess_ttbr0_enable(void) 132 { 133 if (!system_uses_ttbr0_pan()) 134 return false; 135 __uaccess_ttbr0_enable(); 136 return true; 137 } 138 #else 139 static inline bool uaccess_ttbr0_disable(void) 140 { 141 return false; 142 } 143 144 static inline bool uaccess_ttbr0_enable(void) 145 { 146 return false; 147 } 148 #endif 149 150 static inline void __uaccess_disable_hw_pan(void) 151 { 152 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, 153 CONFIG_ARM64_PAN)); 154 } 155 156 static inline void __uaccess_enable_hw_pan(void) 157 { 158 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, 159 CONFIG_ARM64_PAN)); 160 } 161 162 static inline void uaccess_disable_privileged(void) 163 { 164 if (uaccess_ttbr0_disable()) 165 return; 166 167 __uaccess_enable_hw_pan(); 168 } 169 170 static inline void uaccess_enable_privileged(void) 171 { 172 if (uaccess_ttbr0_enable()) 173 return; 174 175 __uaccess_disable_hw_pan(); 176 } 177 178 /* 179 * Sanitise a uaccess pointer such that it becomes NULL if above the maximum 180 * user address. In case the pointer is tagged (has the top byte set), untag 181 * the pointer before checking. 182 */ 183 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) 184 static inline void __user *__uaccess_mask_ptr(const void __user *ptr) 185 { 186 void __user *safe_ptr; 187 188 asm volatile( 189 " bics xzr, %3, %2\n" 190 " csel %0, %1, xzr, eq\n" 191 : "=&r" (safe_ptr) 192 : "r" (ptr), "r" (TASK_SIZE_MAX - 1), 193 "r" (untagged_addr(ptr)) 194 : "cc"); 195 196 csdb(); 197 return safe_ptr; 198 } 199 200 /* 201 * The "__xxx" versions of the user access functions do not verify the address 202 * space - it must have been done previously with a separate "access_ok()" 203 * call. 204 * 205 * The "__xxx_error" versions set the third argument to -EFAULT if an error 206 * occurs, and leave it unchanged on success. 207 */ 208 #define __get_mem_asm(load, reg, x, addr, err) \ 209 asm volatile( \ 210 "1: " load " " reg "1, [%2]\n" \ 211 "2:\n" \ 212 " .section .fixup, \"ax\"\n" \ 213 " .align 2\n" \ 214 "3: mov %w0, %3\n" \ 215 " mov %1, #0\n" \ 216 " b 2b\n" \ 217 " .previous\n" \ 218 _ASM_EXTABLE(1b, 3b) \ 219 : "+r" (err), "=&r" (x) \ 220 : "r" (addr), "i" (-EFAULT)) 221 222 #define __raw_get_mem(ldr, x, ptr, err) \ 223 do { \ 224 unsigned long __gu_val; \ 225 switch (sizeof(*(ptr))) { \ 226 case 1: \ 227 __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \ 228 break; \ 229 case 2: \ 230 __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \ 231 break; \ 232 case 4: \ 233 __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \ 234 break; \ 235 case 8: \ 236 __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \ 237 break; \ 238 default: \ 239 BUILD_BUG(); \ 240 } \ 241 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 242 } while (0) 243 244 #define __raw_get_user(x, ptr, err) \ 245 do { \ 246 __chk_user_ptr(ptr); \ 247 uaccess_ttbr0_enable(); \ 248 __raw_get_mem("ldtr", x, ptr, err); \ 249 uaccess_ttbr0_disable(); \ 250 } while (0) 251 252 #define __get_user_error(x, ptr, err) \ 253 do { \ 254 __typeof__(*(ptr)) __user *__p = (ptr); \ 255 might_fault(); \ 256 if (access_ok(__p, sizeof(*__p))) { \ 257 __p = uaccess_mask_ptr(__p); \ 258 __raw_get_user((x), __p, (err)); \ 259 } else { \ 260 (x) = (__force __typeof__(x))0; (err) = -EFAULT; \ 261 } \ 262 } while (0) 263 264 #define __get_user(x, ptr) \ 265 ({ \ 266 int __gu_err = 0; \ 267 __get_user_error((x), (ptr), __gu_err); \ 268 __gu_err; \ 269 }) 270 271 #define get_user __get_user 272 273 #define __get_kernel_nofault(dst, src, type, err_label) \ 274 do { \ 275 int __gkn_err = 0; \ 276 \ 277 __raw_get_mem("ldr", *((type *)(dst)), \ 278 (__force type *)(src), __gkn_err); \ 279 if (unlikely(__gkn_err)) \ 280 goto err_label; \ 281 } while (0) 282 283 #define __put_mem_asm(store, reg, x, addr, err) \ 284 asm volatile( \ 285 "1: " store " " reg "1, [%2]\n" \ 286 "2:\n" \ 287 " .section .fixup,\"ax\"\n" \ 288 " .align 2\n" \ 289 "3: mov %w0, %3\n" \ 290 " b 2b\n" \ 291 " .previous\n" \ 292 _ASM_EXTABLE(1b, 3b) \ 293 : "+r" (err) \ 294 : "r" (x), "r" (addr), "i" (-EFAULT)) 295 296 #define __raw_put_mem(str, x, ptr, err) \ 297 do { \ 298 __typeof__(*(ptr)) __pu_val = (x); \ 299 switch (sizeof(*(ptr))) { \ 300 case 1: \ 301 __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \ 302 break; \ 303 case 2: \ 304 __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \ 305 break; \ 306 case 4: \ 307 __put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \ 308 break; \ 309 case 8: \ 310 __put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \ 311 break; \ 312 default: \ 313 BUILD_BUG(); \ 314 } \ 315 } while (0) 316 317 #define __raw_put_user(x, ptr, err) \ 318 do { \ 319 __chk_user_ptr(ptr); \ 320 uaccess_ttbr0_enable(); \ 321 __raw_put_mem("sttr", x, ptr, err); \ 322 uaccess_ttbr0_disable(); \ 323 } while (0) 324 325 #define __put_user_error(x, ptr, err) \ 326 do { \ 327 __typeof__(*(ptr)) __user *__p = (ptr); \ 328 might_fault(); \ 329 if (access_ok(__p, sizeof(*__p))) { \ 330 __p = uaccess_mask_ptr(__p); \ 331 __raw_put_user((x), __p, (err)); \ 332 } else { \ 333 (err) = -EFAULT; \ 334 } \ 335 } while (0) 336 337 #define __put_user(x, ptr) \ 338 ({ \ 339 int __pu_err = 0; \ 340 __put_user_error((x), (ptr), __pu_err); \ 341 __pu_err; \ 342 }) 343 344 #define put_user __put_user 345 346 #define __put_kernel_nofault(dst, src, type, err_label) \ 347 do { \ 348 int __pkn_err = 0; \ 349 \ 350 __raw_put_mem("str", *((type *)(src)), \ 351 (__force type *)(dst), __pkn_err); \ 352 if (unlikely(__pkn_err)) \ 353 goto err_label; \ 354 } while(0) 355 356 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); 357 #define raw_copy_from_user(to, from, n) \ 358 ({ \ 359 unsigned long __acfu_ret; \ 360 uaccess_ttbr0_enable(); \ 361 __acfu_ret = __arch_copy_from_user((to), \ 362 __uaccess_mask_ptr(from), (n)); \ 363 uaccess_ttbr0_disable(); \ 364 __acfu_ret; \ 365 }) 366 367 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); 368 #define raw_copy_to_user(to, from, n) \ 369 ({ \ 370 unsigned long __actu_ret; \ 371 uaccess_ttbr0_enable(); \ 372 __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \ 373 (from), (n)); \ 374 uaccess_ttbr0_disable(); \ 375 __actu_ret; \ 376 }) 377 378 extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); 379 #define raw_copy_in_user(to, from, n) \ 380 ({ \ 381 unsigned long __aciu_ret; \ 382 uaccess_ttbr0_enable(); \ 383 __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \ 384 __uaccess_mask_ptr(from), (n)); \ 385 uaccess_ttbr0_disable(); \ 386 __aciu_ret; \ 387 }) 388 389 #define INLINE_COPY_TO_USER 390 #define INLINE_COPY_FROM_USER 391 392 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); 393 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) 394 { 395 if (access_ok(to, n)) { 396 uaccess_ttbr0_enable(); 397 n = __arch_clear_user(__uaccess_mask_ptr(to), n); 398 uaccess_ttbr0_disable(); 399 } 400 return n; 401 } 402 #define clear_user __clear_user 403 404 extern long strncpy_from_user(char *dest, const char __user *src, long count); 405 406 extern __must_check long strnlen_user(const char __user *str, long n); 407 408 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 409 struct page; 410 void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); 411 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); 412 413 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) 414 { 415 kasan_check_write(dst, size); 416 return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); 417 } 418 #endif 419 420 #endif /* __ASM_UACCESS_H */ 421