1 /* 2 * Based on arch/arm/include/asm/uaccess.h 3 * 4 * Copyright (C) 2012 ARM Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #ifndef __ASM_UACCESS_H 19 #define __ASM_UACCESS_H 20 21 #include <asm/alternative.h> 22 #include <asm/kernel-pgtable.h> 23 #include <asm/sysreg.h> 24 25 /* 26 * User space memory access functions 27 */ 28 #include <linux/bitops.h> 29 #include <linux/kasan-checks.h> 30 #include <linux/string.h> 31 32 #include <asm/cpufeature.h> 33 #include <asm/ptrace.h> 34 #include <asm/memory.h> 35 #include <asm/compiler.h> 36 #include <asm/extable.h> 37 38 #define get_ds() (KERNEL_DS) 39 #define get_fs() (current_thread_info()->addr_limit) 40 41 static inline void set_fs(mm_segment_t fs) 42 { 43 current_thread_info()->addr_limit = fs; 44 45 /* 46 * Prevent a mispredicted conditional call to set_fs from forwarding 47 * the wrong address limit to access_ok under speculation. 48 */ 49 dsb(nsh); 50 isb(); 51 52 /* On user-mode return, check fs is correct */ 53 set_thread_flag(TIF_FSCHECK); 54 55 /* 56 * Enable/disable UAO so that copy_to_user() etc can access 57 * kernel memory with the unprivileged instructions. 58 */ 59 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS) 60 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); 61 else 62 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO, 63 CONFIG_ARM64_UAO)); 64 } 65 66 #define segment_eq(a, b) ((a) == (b)) 67 68 /* 69 * Test whether a block of memory is a valid user space address. 70 * Returns 1 if the range is valid, 0 otherwise. 71 * 72 * This is equivalent to the following test: 73 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 74 */ 75 static inline unsigned long __range_ok(unsigned long addr, unsigned long size) 76 { 77 unsigned long limit = current_thread_info()->addr_limit; 78 79 __chk_user_ptr(addr); 80 asm volatile( 81 // A + B <= C + 1 for all A,B,C, in four easy steps: 82 // 1: X = A + B; X' = X % 2^64 83 " adds %0, %0, %2\n" 84 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 85 " csel %1, xzr, %1, hi\n" 86 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' 87 // to compensate for the carry flag being set in step 4. For 88 // X > 2^64, X' merely has to remain nonzero, which it does. 89 " csinv %0, %0, xzr, cc\n" 90 // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 91 // comes from the carry in being clear. Otherwise, we are 92 // testing X' - C == 0, subject to the previous adjustments. 93 " sbcs xzr, %0, %1\n" 94 " cset %0, ls\n" 95 : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc"); 96 97 return addr; 98 } 99 100 /* 101 * When dealing with data aborts, watchpoints, or instruction traps we may end 102 * up with a tagged userland pointer. Clear the tag to get a sane pointer to 103 * pass on to access_ok(), for instance. 104 */ 105 #define untagged_addr(addr) sign_extend64(addr, 55) 106 107 #define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size) 108 #define user_addr_max get_fs 109 110 #define _ASM_EXTABLE(from, to) \ 111 " .pushsection __ex_table, \"a\"\n" \ 112 " .align 3\n" \ 113 " .long (" #from " - .), (" #to " - .)\n" \ 114 " .popsection\n" 115 116 /* 117 * User access enabling/disabling. 118 */ 119 #ifdef CONFIG_ARM64_SW_TTBR0_PAN 120 static inline void __uaccess_ttbr0_disable(void) 121 { 122 unsigned long flags, ttbr; 123 124 local_irq_save(flags); 125 ttbr = read_sysreg(ttbr1_el1); 126 ttbr &= ~TTBR_ASID_MASK; 127 /* reserved_ttbr0 placed before swapper_pg_dir */ 128 write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1); 129 isb(); 130 /* Set reserved ASID */ 131 write_sysreg(ttbr, ttbr1_el1); 132 isb(); 133 local_irq_restore(flags); 134 } 135 136 static inline void __uaccess_ttbr0_enable(void) 137 { 138 unsigned long flags, ttbr0, ttbr1; 139 140 /* 141 * Disable interrupts to avoid preemption between reading the 'ttbr0' 142 * variable and the MSR. A context switch could trigger an ASID 143 * roll-over and an update of 'ttbr0'. 144 */ 145 local_irq_save(flags); 146 ttbr0 = READ_ONCE(current_thread_info()->ttbr0); 147 148 /* Restore active ASID */ 149 ttbr1 = read_sysreg(ttbr1_el1); 150 ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ 151 ttbr1 |= ttbr0 & TTBR_ASID_MASK; 152 write_sysreg(ttbr1, ttbr1_el1); 153 isb(); 154 155 /* Restore user page table */ 156 write_sysreg(ttbr0, ttbr0_el1); 157 isb(); 158 local_irq_restore(flags); 159 } 160 161 static inline bool uaccess_ttbr0_disable(void) 162 { 163 if (!system_uses_ttbr0_pan()) 164 return false; 165 __uaccess_ttbr0_disable(); 166 return true; 167 } 168 169 static inline bool uaccess_ttbr0_enable(void) 170 { 171 if (!system_uses_ttbr0_pan()) 172 return false; 173 __uaccess_ttbr0_enable(); 174 return true; 175 } 176 #else 177 static inline bool uaccess_ttbr0_disable(void) 178 { 179 return false; 180 } 181 182 static inline bool uaccess_ttbr0_enable(void) 183 { 184 return false; 185 } 186 #endif 187 188 static inline void __uaccess_disable_hw_pan(void) 189 { 190 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, 191 CONFIG_ARM64_PAN)); 192 } 193 194 static inline void __uaccess_enable_hw_pan(void) 195 { 196 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, 197 CONFIG_ARM64_PAN)); 198 } 199 200 #define __uaccess_disable(alt) \ 201 do { \ 202 if (!uaccess_ttbr0_disable()) \ 203 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ 204 CONFIG_ARM64_PAN)); \ 205 } while (0) 206 207 #define __uaccess_enable(alt) \ 208 do { \ 209 if (!uaccess_ttbr0_enable()) \ 210 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ 211 CONFIG_ARM64_PAN)); \ 212 } while (0) 213 214 static inline void uaccess_disable(void) 215 { 216 __uaccess_disable(ARM64_HAS_PAN); 217 } 218 219 static inline void uaccess_enable(void) 220 { 221 __uaccess_enable(ARM64_HAS_PAN); 222 } 223 224 /* 225 * These functions are no-ops when UAO is present. 226 */ 227 static inline void uaccess_disable_not_uao(void) 228 { 229 __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); 230 } 231 232 static inline void uaccess_enable_not_uao(void) 233 { 234 __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); 235 } 236 237 /* 238 * Sanitise a uaccess pointer such that it becomes NULL if above the 239 * current addr_limit. 240 */ 241 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) 242 static inline void __user *__uaccess_mask_ptr(const void __user *ptr) 243 { 244 void __user *safe_ptr; 245 246 asm volatile( 247 " bics xzr, %1, %2\n" 248 " csel %0, %1, xzr, eq\n" 249 : "=&r" (safe_ptr) 250 : "r" (ptr), "r" (current_thread_info()->addr_limit) 251 : "cc"); 252 253 csdb(); 254 return safe_ptr; 255 } 256 257 /* 258 * The "__xxx" versions of the user access functions do not verify the address 259 * space - it must have been done previously with a separate "access_ok()" 260 * call. 261 * 262 * The "__xxx_error" versions set the third argument to -EFAULT if an error 263 * occurs, and leave it unchanged on success. 264 */ 265 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 266 asm volatile( \ 267 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 268 alt_instr " " reg "1, [%2]\n", feature) \ 269 "2:\n" \ 270 " .section .fixup, \"ax\"\n" \ 271 " .align 2\n" \ 272 "3: mov %w0, %3\n" \ 273 " mov %1, #0\n" \ 274 " b 2b\n" \ 275 " .previous\n" \ 276 _ASM_EXTABLE(1b, 3b) \ 277 : "+r" (err), "=&r" (x) \ 278 : "r" (addr), "i" (-EFAULT)) 279 280 #define __get_user_err(x, ptr, err) \ 281 do { \ 282 unsigned long __gu_val; \ 283 __chk_user_ptr(ptr); \ 284 uaccess_enable_not_uao(); \ 285 switch (sizeof(*(ptr))) { \ 286 case 1: \ 287 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ 288 (err), ARM64_HAS_UAO); \ 289 break; \ 290 case 2: \ 291 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ 292 (err), ARM64_HAS_UAO); \ 293 break; \ 294 case 4: \ 295 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ 296 (err), ARM64_HAS_UAO); \ 297 break; \ 298 case 8: \ 299 __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ 300 (err), ARM64_HAS_UAO); \ 301 break; \ 302 default: \ 303 BUILD_BUG(); \ 304 } \ 305 uaccess_disable_not_uao(); \ 306 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 307 } while (0) 308 309 #define __get_user_check(x, ptr, err) \ 310 ({ \ 311 __typeof__(*(ptr)) __user *__p = (ptr); \ 312 might_fault(); \ 313 if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ 314 __p = uaccess_mask_ptr(__p); \ 315 __get_user_err((x), __p, (err)); \ 316 } else { \ 317 (x) = 0; (err) = -EFAULT; \ 318 } \ 319 }) 320 321 #define __get_user_error(x, ptr, err) \ 322 ({ \ 323 __get_user_check((x), (ptr), (err)); \ 324 (void)0; \ 325 }) 326 327 #define __get_user(x, ptr) \ 328 ({ \ 329 int __gu_err = 0; \ 330 __get_user_check((x), (ptr), __gu_err); \ 331 __gu_err; \ 332 }) 333 334 #define get_user __get_user 335 336 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ 337 asm volatile( \ 338 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 339 alt_instr " " reg "1, [%2]\n", feature) \ 340 "2:\n" \ 341 " .section .fixup,\"ax\"\n" \ 342 " .align 2\n" \ 343 "3: mov %w0, %3\n" \ 344 " b 2b\n" \ 345 " .previous\n" \ 346 _ASM_EXTABLE(1b, 3b) \ 347 : "+r" (err) \ 348 : "r" (x), "r" (addr), "i" (-EFAULT)) 349 350 #define __put_user_err(x, ptr, err) \ 351 do { \ 352 __typeof__(*(ptr)) __pu_val = (x); \ 353 __chk_user_ptr(ptr); \ 354 uaccess_enable_not_uao(); \ 355 switch (sizeof(*(ptr))) { \ 356 case 1: \ 357 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ 358 (err), ARM64_HAS_UAO); \ 359 break; \ 360 case 2: \ 361 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ 362 (err), ARM64_HAS_UAO); \ 363 break; \ 364 case 4: \ 365 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ 366 (err), ARM64_HAS_UAO); \ 367 break; \ 368 case 8: \ 369 __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ 370 (err), ARM64_HAS_UAO); \ 371 break; \ 372 default: \ 373 BUILD_BUG(); \ 374 } \ 375 uaccess_disable_not_uao(); \ 376 } while (0) 377 378 #define __put_user_check(x, ptr, err) \ 379 ({ \ 380 __typeof__(*(ptr)) __user *__p = (ptr); \ 381 might_fault(); \ 382 if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ 383 __p = uaccess_mask_ptr(__p); \ 384 __put_user_err((x), __p, (err)); \ 385 } else { \ 386 (err) = -EFAULT; \ 387 } \ 388 }) 389 390 #define __put_user_error(x, ptr, err) \ 391 ({ \ 392 __put_user_check((x), (ptr), (err)); \ 393 (void)0; \ 394 }) 395 396 #define __put_user(x, ptr) \ 397 ({ \ 398 int __pu_err = 0; \ 399 __put_user_check((x), (ptr), __pu_err); \ 400 __pu_err; \ 401 }) 402 403 #define put_user __put_user 404 405 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); 406 #define raw_copy_from_user(to, from, n) \ 407 ({ \ 408 __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ 409 }) 410 411 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); 412 #define raw_copy_to_user(to, from, n) \ 413 ({ \ 414 __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ 415 }) 416 417 extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); 418 #define raw_copy_in_user(to, from, n) \ 419 ({ \ 420 __arch_copy_in_user(__uaccess_mask_ptr(to), \ 421 __uaccess_mask_ptr(from), (n)); \ 422 }) 423 424 #define INLINE_COPY_TO_USER 425 #define INLINE_COPY_FROM_USER 426 427 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); 428 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) 429 { 430 if (access_ok(VERIFY_WRITE, to, n)) 431 n = __arch_clear_user(__uaccess_mask_ptr(to), n); 432 return n; 433 } 434 #define clear_user __clear_user 435 436 extern long strncpy_from_user(char *dest, const char __user *src, long count); 437 438 extern __must_check long strnlen_user(const char __user *str, long n); 439 440 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 441 struct page; 442 void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); 443 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); 444 445 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) 446 { 447 kasan_check_write(dst, size); 448 return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); 449 } 450 #endif 451 452 #endif /* __ASM_UACCESS_H */ 453