1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Low-level exception handling code 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 * Authors: Catalin Marinas <catalin.marinas@arm.com> 7 * Will Deacon <will.deacon@arm.com> 8 */ 9 10#include <linux/arm-smccc.h> 11#include <linux/init.h> 12#include <linux/linkage.h> 13 14#include <asm/alternative.h> 15#include <asm/assembler.h> 16#include <asm/asm-offsets.h> 17#include <asm/asm_pointer_auth.h> 18#include <asm/bug.h> 19#include <asm/cpufeature.h> 20#include <asm/errno.h> 21#include <asm/esr.h> 22#include <asm/irq.h> 23#include <asm/memory.h> 24#include <asm/mmu.h> 25#include <asm/processor.h> 26#include <asm/ptrace.h> 27#include <asm/scs.h> 28#include <asm/thread_info.h> 29#include <asm/asm-uaccess.h> 30#include <asm/unistd.h> 31 32 .macro clear_gp_regs 33 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 34 mov x\n, xzr 35 .endr 36 .endm 37 38 .macro kernel_ventry, el:req, ht:req, regsize:req, label:req 39 .align 7 40.Lventry_start\@: 41 .if \el == 0 42 /* 43 * This must be the first instruction of the EL0 vector entries. It is 44 * skipped by the trampoline vectors, to trigger the cleanup. 45 */ 46 b .Lskip_tramp_vectors_cleanup\@ 47 .if \regsize == 64 48 mrs x30, tpidrro_el0 49 msr tpidrro_el0, xzr 50 .else 51 mov x30, xzr 52 .endif 53.Lskip_tramp_vectors_cleanup\@: 54 .endif 55 56 sub sp, sp, #PT_REGS_SIZE 57#ifdef CONFIG_VMAP_STACK 58 /* 59 * Test whether the SP has overflowed, without corrupting a GPR. 60 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT) 61 * should always be zero. 62 */ 63 add sp, sp, x0 // sp' = sp + x0 64 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp 65 tbnz x0, #THREAD_SHIFT, 0f 66 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 67 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp 68 b el\el\ht\()_\regsize\()_\label 69 700: 71 /* 72 * Either we've just detected an overflow, or we've taken an exception 73 * while on the overflow stack. Either way, we won't return to 74 * userspace, and can clobber EL0 registers to free up GPRs. 75 */ 76 77 /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */ 78 msr tpidr_el0, x0 79 80 /* Recover the original x0 value and stash it in tpidrro_el0 */ 81 sub x0, sp, x0 82 msr tpidrro_el0, x0 83 84 /* Switch to the overflow stack */ 85 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0 86 87 /* 88 * Check whether we were already on the overflow stack. This may happen 89 * after panic() re-enables interrupts. 90 */ 91 mrs x0, tpidr_el0 // sp of interrupted context 92 sub x0, sp, x0 // delta with top of overflow stack 93 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range? 94 b.ne __bad_stack // no? -> bad stack pointer 95 96 /* We were already on the overflow stack. Restore sp/x0 and carry on. */ 97 sub sp, sp, x0 98 mrs x0, tpidrro_el0 99#endif 100 b el\el\ht\()_\regsize\()_\label 101.org .Lventry_start\@ + 128 // Did we overflow the ventry slot? 102 .endm 103 104 .macro tramp_alias, dst, sym, tmp 105 mov_q \dst, TRAMP_VALIAS 106 adr_l \tmp, \sym 107 add \dst, \dst, \tmp 108 adr_l \tmp, .entry.tramp.text 109 sub \dst, \dst, \tmp 110 .endm 111 112 /* 113 * This macro corrupts x0-x3. It is the caller's duty to save/restore 114 * them if required. 115 */ 116 .macro apply_ssbd, state, tmp1, tmp2 117alternative_cb spectre_v4_patch_fw_mitigation_enable 118 b .L__asm_ssbd_skip\@ // Patched to NOP 119alternative_cb_end 120 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 121 cbz \tmp2, .L__asm_ssbd_skip\@ 122 ldr \tmp2, [tsk, #TSK_TI_FLAGS] 123 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ 124 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 125 mov w1, #\state 126alternative_cb smccc_patch_fw_mitigation_conduit 127 nop // Patched to SMC/HVC #0 128alternative_cb_end 129.L__asm_ssbd_skip\@: 130 .endm 131 132 /* Check for MTE asynchronous tag check faults */ 133 .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr 134#ifdef CONFIG_ARM64_MTE 135 .arch_extension lse 136alternative_if_not ARM64_MTE 137 b 1f 138alternative_else_nop_endif 139 /* 140 * Asynchronous tag check faults are only possible in ASYNC (2) or 141 * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is 142 * set, so skip the check if it is unset. 143 */ 144 tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f 145 mrs_s \tmp, SYS_TFSRE0_EL1 146 tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f 147 /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */ 148 mov \tmp, #_TIF_MTE_ASYNC_FAULT 149 add \ti_flags, tsk, #TSK_TI_FLAGS 150 stset \tmp, [\ti_flags] 1511: 152#endif 153 .endm 154 155 /* Clear the MTE asynchronous tag check faults */ 156 .macro clear_mte_async_tcf thread_sctlr 157#ifdef CONFIG_ARM64_MTE 158alternative_if ARM64_MTE 159 /* See comment in check_mte_async_tcf above. */ 160 tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f 161 dsb ish 162 msr_s SYS_TFSRE0_EL1, xzr 1631: 164alternative_else_nop_endif 165#endif 166 .endm 167 168 .macro mte_set_gcr, mte_ctrl, tmp 169#ifdef CONFIG_ARM64_MTE 170 ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16 171 orr \tmp, \tmp, #SYS_GCR_EL1_RRND 172 msr_s SYS_GCR_EL1, \tmp 173#endif 174 .endm 175 176 .macro mte_set_kernel_gcr, tmp, tmp2 177#ifdef CONFIG_KASAN_HW_TAGS 178alternative_cb kasan_hw_tags_enable 179 b 1f 180alternative_cb_end 181 mov \tmp, KERNEL_GCR_EL1 182 msr_s SYS_GCR_EL1, \tmp 1831: 184#endif 185 .endm 186 187 .macro mte_set_user_gcr, tsk, tmp, tmp2 188#ifdef CONFIG_KASAN_HW_TAGS 189alternative_cb kasan_hw_tags_enable 190 b 1f 191alternative_cb_end 192 ldr \tmp, [\tsk, #THREAD_MTE_CTRL] 193 194 mte_set_gcr \tmp, \tmp2 1951: 196#endif 197 .endm 198 199 .macro kernel_entry, el, regsize = 64 200 .if \regsize == 32 201 mov w0, w0 // zero upper 32 bits of x0 202 .endif 203 stp x0, x1, [sp, #16 * 0] 204 stp x2, x3, [sp, #16 * 1] 205 stp x4, x5, [sp, #16 * 2] 206 stp x6, x7, [sp, #16 * 3] 207 stp x8, x9, [sp, #16 * 4] 208 stp x10, x11, [sp, #16 * 5] 209 stp x12, x13, [sp, #16 * 6] 210 stp x14, x15, [sp, #16 * 7] 211 stp x16, x17, [sp, #16 * 8] 212 stp x18, x19, [sp, #16 * 9] 213 stp x20, x21, [sp, #16 * 10] 214 stp x22, x23, [sp, #16 * 11] 215 stp x24, x25, [sp, #16 * 12] 216 stp x26, x27, [sp, #16 * 13] 217 stp x28, x29, [sp, #16 * 14] 218 219 .if \el == 0 220 clear_gp_regs 221 mrs x21, sp_el0 222 ldr_this_cpu tsk, __entry_task, x20 223 msr sp_el0, tsk 224 225 /* 226 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions 227 * when scheduling. 228 */ 229 ldr x19, [tsk, #TSK_TI_FLAGS] 230 disable_step_tsk x19, x20 231 232 /* Check for asynchronous tag check faults in user space */ 233 ldr x0, [tsk, THREAD_SCTLR_USER] 234 check_mte_async_tcf x22, x23, x0 235 236#ifdef CONFIG_ARM64_PTR_AUTH 237alternative_if ARM64_HAS_ADDRESS_AUTH 238 /* 239 * Enable IA for in-kernel PAC if the task had it disabled. Although 240 * this could be implemented with an unconditional MRS which would avoid 241 * a load, this was measured to be slower on Cortex-A75 and Cortex-A76. 242 * 243 * Install the kernel IA key only if IA was enabled in the task. If IA 244 * was disabled on kernel exit then we would have left the kernel IA 245 * installed so there is no need to install it again. 246 */ 247 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f 248 __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23 249 b 2f 2501: 251 mrs x0, sctlr_el1 252 orr x0, x0, SCTLR_ELx_ENIA 253 msr sctlr_el1, x0 2542: 255alternative_else_nop_endif 256#endif 257 258 apply_ssbd 1, x22, x23 259 260 mte_set_kernel_gcr x22, x23 261 262 /* 263 * Any non-self-synchronizing system register updates required for 264 * kernel entry should be placed before this point. 265 */ 266alternative_if ARM64_MTE 267 isb 268 b 1f 269alternative_else_nop_endif 270alternative_if ARM64_HAS_ADDRESS_AUTH 271 isb 272alternative_else_nop_endif 2731: 274 275 scs_load tsk 276 .else 277 add x21, sp, #PT_REGS_SIZE 278 get_current_task tsk 279 .endif /* \el == 0 */ 280 mrs x22, elr_el1 281 mrs x23, spsr_el1 282 stp lr, x21, [sp, #S_LR] 283 284 /* 285 * For exceptions from EL0, create a final frame record. 286 * For exceptions from EL1, create a synthetic frame record so the 287 * interrupted code shows up in the backtrace. 288 */ 289 .if \el == 0 290 stp xzr, xzr, [sp, #S_STACKFRAME] 291 .else 292 stp x29, x22, [sp, #S_STACKFRAME] 293 .endif 294 add x29, sp, #S_STACKFRAME 295 296#ifdef CONFIG_ARM64_SW_TTBR0_PAN 297alternative_if_not ARM64_HAS_PAN 298 bl __swpan_entry_el\el 299alternative_else_nop_endif 300#endif 301 302 stp x22, x23, [sp, #S_PC] 303 304 /* Not in a syscall by default (el0_svc overwrites for real syscall) */ 305 .if \el == 0 306 mov w21, #NO_SYSCALL 307 str w21, [sp, #S_SYSCALLNO] 308 .endif 309 310#ifdef CONFIG_ARM64_PSEUDO_NMI 311 /* Save pmr */ 312alternative_if ARM64_HAS_IRQ_PRIO_MASKING 313 mrs_s x20, SYS_ICC_PMR_EL1 314 str x20, [sp, #S_PMR_SAVE] 315 mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET 316 msr_s SYS_ICC_PMR_EL1, x20 317alternative_else_nop_endif 318#endif 319 320 /* 321 * Registers that may be useful after this macro is invoked: 322 * 323 * x20 - ICC_PMR_EL1 324 * x21 - aborted SP 325 * x22 - aborted PC 326 * x23 - aborted PSTATE 327 */ 328 .endm 329 330 .macro kernel_exit, el 331 .if \el != 0 332 disable_daif 333 .endif 334 335#ifdef CONFIG_ARM64_PSEUDO_NMI 336 /* Restore pmr */ 337alternative_if ARM64_HAS_IRQ_PRIO_MASKING 338 ldr x20, [sp, #S_PMR_SAVE] 339 msr_s SYS_ICC_PMR_EL1, x20 340 mrs_s x21, SYS_ICC_CTLR_EL1 341 tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE 342 dsb sy // Ensure priority change is seen by redistributor 343.L__skip_pmr_sync\@: 344alternative_else_nop_endif 345#endif 346 347 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR 348 349#ifdef CONFIG_ARM64_SW_TTBR0_PAN 350alternative_if_not ARM64_HAS_PAN 351 bl __swpan_exit_el\el 352alternative_else_nop_endif 353#endif 354 355 .if \el == 0 356 ldr x23, [sp, #S_SP] // load return stack pointer 357 msr sp_el0, x23 358 tst x22, #PSR_MODE32_BIT // native task? 359 b.eq 3f 360 361#ifdef CONFIG_ARM64_ERRATUM_845719 362alternative_if ARM64_WORKAROUND_845719 363#ifdef CONFIG_PID_IN_CONTEXTIDR 364 mrs x29, contextidr_el1 365 msr contextidr_el1, x29 366#else 367 msr contextidr_el1, xzr 368#endif 369alternative_else_nop_endif 370#endif 3713: 372 scs_save tsk 373 374 /* Ignore asynchronous tag check faults in the uaccess routines */ 375 ldr x0, [tsk, THREAD_SCTLR_USER] 376 clear_mte_async_tcf x0 377 378#ifdef CONFIG_ARM64_PTR_AUTH 379alternative_if ARM64_HAS_ADDRESS_AUTH 380 /* 381 * IA was enabled for in-kernel PAC. Disable it now if needed, or 382 * alternatively install the user's IA. All other per-task keys and 383 * SCTLR bits were updated on task switch. 384 * 385 * No kernel C function calls after this. 386 */ 387 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f 388 __ptrauth_keys_install_user tsk, x0, x1, x2 389 b 2f 3901: 391 mrs x0, sctlr_el1 392 bic x0, x0, SCTLR_ELx_ENIA 393 msr sctlr_el1, x0 3942: 395alternative_else_nop_endif 396#endif 397 398 mte_set_user_gcr tsk, x0, x1 399 400 apply_ssbd 0, x0, x1 401 .endif 402 403 msr elr_el1, x21 // set up the return data 404 msr spsr_el1, x22 405 ldp x0, x1, [sp, #16 * 0] 406 ldp x2, x3, [sp, #16 * 1] 407 ldp x4, x5, [sp, #16 * 2] 408 ldp x6, x7, [sp, #16 * 3] 409 ldp x8, x9, [sp, #16 * 4] 410 ldp x10, x11, [sp, #16 * 5] 411 ldp x12, x13, [sp, #16 * 6] 412 ldp x14, x15, [sp, #16 * 7] 413 ldp x16, x17, [sp, #16 * 8] 414 ldp x18, x19, [sp, #16 * 9] 415 ldp x20, x21, [sp, #16 * 10] 416 ldp x22, x23, [sp, #16 * 11] 417 ldp x24, x25, [sp, #16 * 12] 418 ldp x26, x27, [sp, #16 * 13] 419 ldp x28, x29, [sp, #16 * 14] 420 421 .if \el == 0 422alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 423 ldr lr, [sp, #S_LR] 424 add sp, sp, #PT_REGS_SIZE // restore sp 425 eret 426alternative_else_nop_endif 427#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 428 bne 4f 429 msr far_el1, x29 430 tramp_alias x30, tramp_exit_native, x29 431 br x30 4324: 433 tramp_alias x30, tramp_exit_compat, x29 434 br x30 435#endif 436 .else 437 ldr lr, [sp, #S_LR] 438 add sp, sp, #PT_REGS_SIZE // restore sp 439 440 /* Ensure any device/NC reads complete */ 441 alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412 442 443 eret 444 .endif 445 sb 446 .endm 447 448#ifdef CONFIG_ARM64_SW_TTBR0_PAN 449 /* 450 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from 451 * EL0, there is no need to check the state of TTBR0_EL1 since 452 * accesses are always enabled. 453 * Note that the meaning of this bit differs from the ARMv8.1 PAN 454 * feature as all TTBR0_EL1 accesses are disabled, not just those to 455 * user mappings. 456 */ 457SYM_CODE_START_LOCAL(__swpan_entry_el1) 458 mrs x21, ttbr0_el1 459 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID 460 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR 461 b.eq 1f // TTBR0 access already disabled 462 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR 463SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL) 464 __uaccess_ttbr0_disable x21 4651: ret 466SYM_CODE_END(__swpan_entry_el1) 467 468 /* 469 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR 470 * PAN bit checking. 471 */ 472SYM_CODE_START_LOCAL(__swpan_exit_el1) 473 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set 474 __uaccess_ttbr0_enable x0, x1 4751: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit 476 ret 477SYM_CODE_END(__swpan_exit_el1) 478 479SYM_CODE_START_LOCAL(__swpan_exit_el0) 480 __uaccess_ttbr0_enable x0, x1 481 /* 482 * Enable errata workarounds only if returning to user. The only 483 * workaround currently required for TTBR0_EL1 changes are for the 484 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache 485 * corruption). 486 */ 487 b post_ttbr_update_workaround 488SYM_CODE_END(__swpan_exit_el0) 489#endif 490 491/* GPRs used by entry code */ 492tsk .req x28 // current thread_info 493 494 .text 495 496/* 497 * Exception vectors. 498 */ 499 .pushsection ".entry.text", "ax" 500 501 .align 11 502SYM_CODE_START(vectors) 503 kernel_ventry 1, t, 64, sync // Synchronous EL1t 504 kernel_ventry 1, t, 64, irq // IRQ EL1t 505 kernel_ventry 1, t, 64, fiq // FIQ EL1h 506 kernel_ventry 1, t, 64, error // Error EL1t 507 508 kernel_ventry 1, h, 64, sync // Synchronous EL1h 509 kernel_ventry 1, h, 64, irq // IRQ EL1h 510 kernel_ventry 1, h, 64, fiq // FIQ EL1h 511 kernel_ventry 1, h, 64, error // Error EL1h 512 513 kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0 514 kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0 515 kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 516 kernel_ventry 0, t, 64, error // Error 64-bit EL0 517 518 kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0 519 kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0 520 kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0 521 kernel_ventry 0, t, 32, error // Error 32-bit EL0 522SYM_CODE_END(vectors) 523 524#ifdef CONFIG_VMAP_STACK 525SYM_CODE_START_LOCAL(__bad_stack) 526 /* 527 * We detected an overflow in kernel_ventry, which switched to the 528 * overflow stack. Stash the exception regs, and head to our overflow 529 * handler. 530 */ 531 532 /* Restore the original x0 value */ 533 mrs x0, tpidrro_el0 534 535 /* 536 * Store the original GPRs to the new stack. The orginal SP (minus 537 * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry. 538 */ 539 sub sp, sp, #PT_REGS_SIZE 540 kernel_entry 1 541 mrs x0, tpidr_el0 542 add x0, x0, #PT_REGS_SIZE 543 str x0, [sp, #S_SP] 544 545 /* Stash the regs for handle_bad_stack */ 546 mov x0, sp 547 548 /* Time to die */ 549 bl handle_bad_stack 550 ASM_BUG() 551SYM_CODE_END(__bad_stack) 552#endif /* CONFIG_VMAP_STACK */ 553 554 555 .macro entry_handler el:req, ht:req, regsize:req, label:req 556SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) 557 kernel_entry \el, \regsize 558 mov x0, sp 559 bl el\el\ht\()_\regsize\()_\label\()_handler 560 .if \el == 0 561 b ret_to_user 562 .else 563 b ret_to_kernel 564 .endif 565SYM_CODE_END(el\el\ht\()_\regsize\()_\label) 566 .endm 567 568/* 569 * Early exception handlers 570 */ 571 entry_handler 1, t, 64, sync 572 entry_handler 1, t, 64, irq 573 entry_handler 1, t, 64, fiq 574 entry_handler 1, t, 64, error 575 576 entry_handler 1, h, 64, sync 577 entry_handler 1, h, 64, irq 578 entry_handler 1, h, 64, fiq 579 entry_handler 1, h, 64, error 580 581 entry_handler 0, t, 64, sync 582 entry_handler 0, t, 64, irq 583 entry_handler 0, t, 64, fiq 584 entry_handler 0, t, 64, error 585 586 entry_handler 0, t, 32, sync 587 entry_handler 0, t, 32, irq 588 entry_handler 0, t, 32, fiq 589 entry_handler 0, t, 32, error 590 591SYM_CODE_START_LOCAL(ret_to_kernel) 592 kernel_exit 1 593SYM_CODE_END(ret_to_kernel) 594 595SYM_CODE_START_LOCAL(ret_to_user) 596 ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step 597 enable_step_tsk x19, x2 598#ifdef CONFIG_GCC_PLUGIN_STACKLEAK 599 bl stackleak_erase 600#endif 601 kernel_exit 0 602SYM_CODE_END(ret_to_user) 603 604 .popsection // .entry.text 605 606 // Move from tramp_pg_dir to swapper_pg_dir 607 .macro tramp_map_kernel, tmp 608 mrs \tmp, ttbr1_el1 609 add \tmp, \tmp, #TRAMP_SWAPPER_OFFSET 610 bic \tmp, \tmp, #USER_ASID_FLAG 611 msr ttbr1_el1, \tmp 612#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 613alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 614 /* ASID already in \tmp[63:48] */ 615 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) 616 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) 617 /* 2MB boundary containing the vectors, so we nobble the walk cache */ 618 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) 619 isb 620 tlbi vae1, \tmp 621 dsb nsh 622alternative_else_nop_endif 623#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ 624 .endm 625 626 // Move from swapper_pg_dir to tramp_pg_dir 627 .macro tramp_unmap_kernel, tmp 628 mrs \tmp, ttbr1_el1 629 sub \tmp, \tmp, #TRAMP_SWAPPER_OFFSET 630 orr \tmp, \tmp, #USER_ASID_FLAG 631 msr ttbr1_el1, \tmp 632 /* 633 * We avoid running the post_ttbr_update_workaround here because 634 * it's only needed by Cavium ThunderX, which requires KPTI to be 635 * disabled. 636 */ 637 .endm 638 639 .macro tramp_data_page dst 640 adr_l \dst, .entry.tramp.text 641 sub \dst, \dst, PAGE_SIZE 642 .endm 643 644 .macro tramp_data_read_var dst, var 645#ifdef CONFIG_RANDOMIZE_BASE 646 tramp_data_page \dst 647 add \dst, \dst, #:lo12:__entry_tramp_data_\var 648 ldr \dst, [\dst] 649#else 650 ldr \dst, =\var 651#endif 652 .endm 653 654#define BHB_MITIGATION_NONE 0 655#define BHB_MITIGATION_LOOP 1 656#define BHB_MITIGATION_FW 2 657#define BHB_MITIGATION_INSN 3 658 659 .macro tramp_ventry, vector_start, regsize, kpti, bhb 660 .align 7 6611: 662 .if \regsize == 64 663 msr tpidrro_el0, x30 // Restored in kernel_ventry 664 .endif 665 666 .if \bhb == BHB_MITIGATION_LOOP 667 /* 668 * This sequence must appear before the first indirect branch. i.e. the 669 * ret out of tramp_ventry. It appears here because x30 is free. 670 */ 671 __mitigate_spectre_bhb_loop x30 672 .endif // \bhb == BHB_MITIGATION_LOOP 673 674 .if \bhb == BHB_MITIGATION_INSN 675 clearbhb 676 isb 677 .endif // \bhb == BHB_MITIGATION_INSN 678 679 .if \kpti == 1 680 /* 681 * Defend against branch aliasing attacks by pushing a dummy 682 * entry onto the return stack and using a RET instruction to 683 * enter the full-fat kernel vectors. 684 */ 685 bl 2f 686 b . 6872: 688 tramp_map_kernel x30 689alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 690 tramp_data_read_var x30, vectors 691alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 692 prfm plil1strm, [x30, #(1b - \vector_start)] 693alternative_else_nop_endif 694 695 msr vbar_el1, x30 696 isb 697 .else 698 ldr x30, =vectors 699 .endif // \kpti == 1 700 701 .if \bhb == BHB_MITIGATION_FW 702 /* 703 * The firmware sequence must appear before the first indirect branch. 704 * i.e. the ret out of tramp_ventry. But it also needs the stack to be 705 * mapped to save/restore the registers the SMC clobbers. 706 */ 707 __mitigate_spectre_bhb_fw 708 .endif // \bhb == BHB_MITIGATION_FW 709 710 add x30, x30, #(1b - \vector_start + 4) 711 ret 712.org 1b + 128 // Did we overflow the ventry slot? 713 .endm 714 715 .macro tramp_exit, regsize = 64 716 tramp_data_read_var x30, this_cpu_vector 717 get_this_cpu_offset x29 718 ldr x30, [x30, x29] 719 720 msr vbar_el1, x30 721 ldr lr, [sp, #S_LR] 722 tramp_unmap_kernel x29 723 .if \regsize == 64 724 mrs x29, far_el1 725 .endif 726 add sp, sp, #PT_REGS_SIZE // restore sp 727 eret 728 sb 729 .endm 730 731 .macro generate_tramp_vector, kpti, bhb 732.Lvector_start\@: 733 .space 0x400 734 735 .rept 4 736 tramp_ventry .Lvector_start\@, 64, \kpti, \bhb 737 .endr 738 .rept 4 739 tramp_ventry .Lvector_start\@, 32, \kpti, \bhb 740 .endr 741 .endm 742 743#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 744/* 745 * Exception vectors trampoline. 746 * The order must match __bp_harden_el1_vectors and the 747 * arm64_bp_harden_el1_vectors enum. 748 */ 749 .pushsection ".entry.tramp.text", "ax" 750 .align 11 751SYM_CODE_START_NOALIGN(tramp_vectors) 752#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY 753 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP 754 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW 755 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN 756#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ 757 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE 758SYM_CODE_END(tramp_vectors) 759 760SYM_CODE_START(tramp_exit_native) 761 tramp_exit 762SYM_CODE_END(tramp_exit_native) 763 764SYM_CODE_START(tramp_exit_compat) 765 tramp_exit 32 766SYM_CODE_END(tramp_exit_compat) 767 768 .ltorg 769 .popsection // .entry.tramp.text 770#ifdef CONFIG_RANDOMIZE_BASE 771 .pushsection ".rodata", "a" 772 .align PAGE_SHIFT 773SYM_DATA_START(__entry_tramp_data_start) 774__entry_tramp_data_vectors: 775 .quad vectors 776#ifdef CONFIG_ARM_SDE_INTERFACE 777__entry_tramp_data___sdei_asm_handler: 778 .quad __sdei_asm_handler 779#endif /* CONFIG_ARM_SDE_INTERFACE */ 780__entry_tramp_data_this_cpu_vector: 781 .quad this_cpu_vector 782SYM_DATA_END(__entry_tramp_data_start) 783 .popsection // .rodata 784#endif /* CONFIG_RANDOMIZE_BASE */ 785#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ 786 787/* 788 * Exception vectors for spectre mitigations on entry from EL1 when 789 * kpti is not in use. 790 */ 791 .macro generate_el1_vector, bhb 792.Lvector_start\@: 793 kernel_ventry 1, t, 64, sync // Synchronous EL1t 794 kernel_ventry 1, t, 64, irq // IRQ EL1t 795 kernel_ventry 1, t, 64, fiq // FIQ EL1h 796 kernel_ventry 1, t, 64, error // Error EL1t 797 798 kernel_ventry 1, h, 64, sync // Synchronous EL1h 799 kernel_ventry 1, h, 64, irq // IRQ EL1h 800 kernel_ventry 1, h, 64, fiq // FIQ EL1h 801 kernel_ventry 1, h, 64, error // Error EL1h 802 803 .rept 4 804 tramp_ventry .Lvector_start\@, 64, 0, \bhb 805 .endr 806 .rept 4 807 tramp_ventry .Lvector_start\@, 32, 0, \bhb 808 .endr 809 .endm 810 811/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */ 812 .pushsection ".entry.text", "ax" 813 .align 11 814SYM_CODE_START(__bp_harden_el1_vectors) 815#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY 816 generate_el1_vector bhb=BHB_MITIGATION_LOOP 817 generate_el1_vector bhb=BHB_MITIGATION_FW 818 generate_el1_vector bhb=BHB_MITIGATION_INSN 819#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ 820SYM_CODE_END(__bp_harden_el1_vectors) 821 .popsection 822 823 824/* 825 * Register switch for AArch64. The callee-saved registers need to be saved 826 * and restored. On entry: 827 * x0 = previous task_struct (must be preserved across the switch) 828 * x1 = next task_struct 829 * Previous and next are guaranteed not to be the same. 830 * 831 */ 832SYM_FUNC_START(cpu_switch_to) 833 mov x10, #THREAD_CPU_CONTEXT 834 add x8, x0, x10 835 mov x9, sp 836 stp x19, x20, [x8], #16 // store callee-saved registers 837 stp x21, x22, [x8], #16 838 stp x23, x24, [x8], #16 839 stp x25, x26, [x8], #16 840 stp x27, x28, [x8], #16 841 stp x29, x9, [x8], #16 842 str lr, [x8] 843 add x8, x1, x10 844 ldp x19, x20, [x8], #16 // restore callee-saved registers 845 ldp x21, x22, [x8], #16 846 ldp x23, x24, [x8], #16 847 ldp x25, x26, [x8], #16 848 ldp x27, x28, [x8], #16 849 ldp x29, x9, [x8], #16 850 ldr lr, [x8] 851 mov sp, x9 852 msr sp_el0, x1 853 ptrauth_keys_install_kernel x1, x8, x9, x10 854 scs_save x0 855 scs_load x1 856 ret 857SYM_FUNC_END(cpu_switch_to) 858NOKPROBE(cpu_switch_to) 859 860/* 861 * This is how we return from a fork. 862 */ 863SYM_CODE_START(ret_from_fork) 864 bl schedule_tail 865 cbz x19, 1f // not a kernel thread 866 mov x0, x20 867 blr x19 8681: get_current_task tsk 869 mov x0, sp 870 bl asm_exit_to_user_mode 871 b ret_to_user 872SYM_CODE_END(ret_from_fork) 873NOKPROBE(ret_from_fork) 874 875/* 876 * void call_on_irq_stack(struct pt_regs *regs, 877 * void (*func)(struct pt_regs *)); 878 * 879 * Calls func(regs) using this CPU's irq stack and shadow irq stack. 880 */ 881SYM_FUNC_START(call_on_irq_stack) 882#ifdef CONFIG_SHADOW_CALL_STACK 883 stp scs_sp, xzr, [sp, #-16]! 884 ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17 885#endif 886 /* Create a frame record to save our LR and SP (implicit in FP) */ 887 stp x29, x30, [sp, #-16]! 888 mov x29, sp 889 890 ldr_this_cpu x16, irq_stack_ptr, x17 891 mov x15, #IRQ_STACK_SIZE 892 add x16, x16, x15 893 894 /* Move to the new stack and call the function there */ 895 mov sp, x16 896 blr x1 897 898 /* 899 * Restore the SP from the FP, and restore the FP and LR from the frame 900 * record. 901 */ 902 mov sp, x29 903 ldp x29, x30, [sp], #16 904#ifdef CONFIG_SHADOW_CALL_STACK 905 ldp scs_sp, xzr, [sp], #16 906#endif 907 ret 908SYM_FUNC_END(call_on_irq_stack) 909NOKPROBE(call_on_irq_stack) 910 911#ifdef CONFIG_ARM_SDE_INTERFACE 912 913#include <asm/sdei.h> 914#include <uapi/linux/arm_sdei.h> 915 916.macro sdei_handler_exit exit_mode 917 /* On success, this call never returns... */ 918 cmp \exit_mode, #SDEI_EXIT_SMC 919 b.ne 99f 920 smc #0 921 b . 92299: hvc #0 923 b . 924.endm 925 926#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 927/* 928 * The regular SDEI entry point may have been unmapped along with the rest of 929 * the kernel. This trampoline restores the kernel mapping to make the x1 memory 930 * argument accessible. 931 * 932 * This clobbers x4, __sdei_handler() will restore this from firmware's 933 * copy. 934 */ 935.ltorg 936.pushsection ".entry.tramp.text", "ax" 937SYM_CODE_START(__sdei_asm_entry_trampoline) 938 mrs x4, ttbr1_el1 939 tbz x4, #USER_ASID_BIT, 1f 940 941 tramp_map_kernel tmp=x4 942 isb 943 mov x4, xzr 944 945 /* 946 * Remember whether to unmap the kernel on exit. 947 */ 9481: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] 949 tramp_data_read_var x4, __sdei_asm_handler 950 br x4 951SYM_CODE_END(__sdei_asm_entry_trampoline) 952NOKPROBE(__sdei_asm_entry_trampoline) 953 954/* 955 * Make the exit call and restore the original ttbr1_el1 956 * 957 * x0 & x1: setup for the exit API call 958 * x2: exit_mode 959 * x4: struct sdei_registered_event argument from registration time. 960 */ 961SYM_CODE_START(__sdei_asm_exit_trampoline) 962 ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] 963 cbnz x4, 1f 964 965 tramp_unmap_kernel tmp=x4 966 9671: sdei_handler_exit exit_mode=x2 968SYM_CODE_END(__sdei_asm_exit_trampoline) 969NOKPROBE(__sdei_asm_exit_trampoline) 970 .ltorg 971.popsection // .entry.tramp.text 972#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ 973 974/* 975 * Software Delegated Exception entry point. 976 * 977 * x0: Event number 978 * x1: struct sdei_registered_event argument from registration time. 979 * x2: interrupted PC 980 * x3: interrupted PSTATE 981 * x4: maybe clobbered by the trampoline 982 * 983 * Firmware has preserved x0->x17 for us, we must save/restore the rest to 984 * follow SMC-CC. We save (or retrieve) all the registers as the handler may 985 * want them. 986 */ 987SYM_CODE_START(__sdei_asm_handler) 988 stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC] 989 stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2] 990 stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3] 991 stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4] 992 stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5] 993 stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6] 994 stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7] 995 stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8] 996 stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9] 997 stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10] 998 stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11] 999 stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12] 1000 stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13] 1001 stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14] 1002 mov x4, sp 1003 stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR] 1004 1005 mov x19, x1 1006 1007#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK) 1008 ldrb w4, [x19, #SDEI_EVENT_PRIORITY] 1009#endif 1010 1011#ifdef CONFIG_VMAP_STACK 1012 /* 1013 * entry.S may have been using sp as a scratch register, find whether 1014 * this is a normal or critical event and switch to the appropriate 1015 * stack for this CPU. 1016 */ 1017 cbnz w4, 1f 1018 ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6 1019 b 2f 10201: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6 10212: mov x6, #SDEI_STACK_SIZE 1022 add x5, x5, x6 1023 mov sp, x5 1024#endif 1025 1026#ifdef CONFIG_SHADOW_CALL_STACK 1027 /* Use a separate shadow call stack for normal and critical events */ 1028 cbnz w4, 3f 1029 ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6 1030 b 4f 10313: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6 10324: 1033#endif 1034 1035 /* 1036 * We may have interrupted userspace, or a guest, or exit-from or 1037 * return-to either of these. We can't trust sp_el0, restore it. 1038 */ 1039 mrs x28, sp_el0 1040 ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1 1041 msr sp_el0, x0 1042 1043 /* If we interrupted the kernel point to the previous stack/frame. */ 1044 and x0, x3, #0xc 1045 mrs x1, CurrentEL 1046 cmp x0, x1 1047 csel x29, x29, xzr, eq // fp, or zero 1048 csel x4, x2, xzr, eq // elr, or zero 1049 1050 stp x29, x4, [sp, #-16]! 1051 mov x29, sp 1052 1053 add x0, x19, #SDEI_EVENT_INTREGS 1054 mov x1, x19 1055 bl __sdei_handler 1056 1057 msr sp_el0, x28 1058 /* restore regs >x17 that we clobbered */ 1059 mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline 1060 ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14] 1061 ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9] 1062 ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR] 1063 mov sp, x1 1064 1065 mov x1, x0 // address to complete_and_resume 1066 /* x0 = (x0 <= SDEI_EV_FAILED) ? 1067 * EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME 1068 */ 1069 cmp x0, #SDEI_EV_FAILED 1070 mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE 1071 mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME 1072 csel x0, x2, x3, ls 1073 1074 ldr_l x2, sdei_exit_mode 1075 1076alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 1077 sdei_handler_exit exit_mode=x2 1078alternative_else_nop_endif 1079 1080#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 1081 tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3 1082 br x5 1083#endif 1084SYM_CODE_END(__sdei_asm_handler) 1085NOKPROBE(__sdei_asm_handler) 1086#endif /* CONFIG_ARM_SDE_INTERFACE */ 1087