1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Low-level exception handling code 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 * Authors: Catalin Marinas <catalin.marinas@arm.com> 7 * Will Deacon <will.deacon@arm.com> 8 */ 9 10#include <linux/arm-smccc.h> 11#include <linux/init.h> 12#include <linux/linkage.h> 13 14#include <asm/alternative.h> 15#include <asm/assembler.h> 16#include <asm/asm-offsets.h> 17#include <asm/asm_pointer_auth.h> 18#include <asm/bug.h> 19#include <asm/cpufeature.h> 20#include <asm/errno.h> 21#include <asm/esr.h> 22#include <asm/irq.h> 23#include <asm/memory.h> 24#include <asm/mmu.h> 25#include <asm/processor.h> 26#include <asm/ptrace.h> 27#include <asm/scs.h> 28#include <asm/thread_info.h> 29#include <asm/asm-uaccess.h> 30#include <asm/unistd.h> 31 32 .macro clear_gp_regs 33 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 34 mov x\n, xzr 35 .endr 36 .endm 37 38 .macro kernel_ventry, el:req, ht:req, regsize:req, label:req 39 .align 7 40#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 41 .if \el == 0 42alternative_if ARM64_UNMAP_KERNEL_AT_EL0 43 .if \regsize == 64 44 mrs x30, tpidrro_el0 45 msr tpidrro_el0, xzr 46 .else 47 mov x30, xzr 48 .endif 49alternative_else_nop_endif 50 .endif 51#endif 52 53 sub sp, sp, #PT_REGS_SIZE 54#ifdef CONFIG_VMAP_STACK 55 /* 56 * Test whether the SP has overflowed, without corrupting a GPR. 57 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT) 58 * should always be zero. 59 */ 60 add sp, sp, x0 // sp' = sp + x0 61 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp 62 tbnz x0, #THREAD_SHIFT, 0f 63 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 64 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp 65 b el\el\ht\()_\regsize\()_\label 66 670: 68 /* 69 * Either we've just detected an overflow, or we've taken an exception 70 * while on the overflow stack. Either way, we won't return to 71 * userspace, and can clobber EL0 registers to free up GPRs. 72 */ 73 74 /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */ 75 msr tpidr_el0, x0 76 77 /* Recover the original x0 value and stash it in tpidrro_el0 */ 78 sub x0, sp, x0 79 msr tpidrro_el0, x0 80 81 /* Switch to the overflow stack */ 82 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0 83 84 /* 85 * Check whether we were already on the overflow stack. This may happen 86 * after panic() re-enables interrupts. 87 */ 88 mrs x0, tpidr_el0 // sp of interrupted context 89 sub x0, sp, x0 // delta with top of overflow stack 90 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range? 91 b.ne __bad_stack // no? -> bad stack pointer 92 93 /* We were already on the overflow stack. Restore sp/x0 and carry on. */ 94 sub sp, sp, x0 95 mrs x0, tpidrro_el0 96#endif 97 b el\el\ht\()_\regsize\()_\label 98 .endm 99 100 .macro tramp_alias, dst, sym 101 mov_q \dst, TRAMP_VALIAS 102 add \dst, \dst, #(\sym - .entry.tramp.text) 103 .endm 104 105 /* 106 * This macro corrupts x0-x3. It is the caller's duty to save/restore 107 * them if required. 108 */ 109 .macro apply_ssbd, state, tmp1, tmp2 110alternative_cb spectre_v4_patch_fw_mitigation_enable 111 b .L__asm_ssbd_skip\@ // Patched to NOP 112alternative_cb_end 113 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 114 cbz \tmp2, .L__asm_ssbd_skip\@ 115 ldr \tmp2, [tsk, #TSK_TI_FLAGS] 116 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ 117 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 118 mov w1, #\state 119alternative_cb spectre_v4_patch_fw_mitigation_conduit 120 nop // Patched to SMC/HVC #0 121alternative_cb_end 122.L__asm_ssbd_skip\@: 123 .endm 124 125 /* Check for MTE asynchronous tag check faults */ 126 .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr 127#ifdef CONFIG_ARM64_MTE 128 .arch_extension lse 129alternative_if_not ARM64_MTE 130 b 1f 131alternative_else_nop_endif 132 /* 133 * Asynchronous tag check faults are only possible in ASYNC (2) or 134 * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is 135 * set, so skip the check if it is unset. 136 */ 137 tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f 138 mrs_s \tmp, SYS_TFSRE0_EL1 139 tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f 140 /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */ 141 mov \tmp, #_TIF_MTE_ASYNC_FAULT 142 add \ti_flags, tsk, #TSK_TI_FLAGS 143 stset \tmp, [\ti_flags] 1441: 145#endif 146 .endm 147 148 /* Clear the MTE asynchronous tag check faults */ 149 .macro clear_mte_async_tcf thread_sctlr 150#ifdef CONFIG_ARM64_MTE 151alternative_if ARM64_MTE 152 /* See comment in check_mte_async_tcf above. */ 153 tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f 154 dsb ish 155 msr_s SYS_TFSRE0_EL1, xzr 1561: 157alternative_else_nop_endif 158#endif 159 .endm 160 161 .macro mte_set_gcr, mte_ctrl, tmp 162#ifdef CONFIG_ARM64_MTE 163 ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16 164 orr \tmp, \tmp, #SYS_GCR_EL1_RRND 165 msr_s SYS_GCR_EL1, \tmp 166#endif 167 .endm 168 169 .macro mte_set_kernel_gcr, tmp, tmp2 170#ifdef CONFIG_KASAN_HW_TAGS 171alternative_cb kasan_hw_tags_enable 172 b 1f 173alternative_cb_end 174 mov \tmp, KERNEL_GCR_EL1 175 msr_s SYS_GCR_EL1, \tmp 1761: 177#endif 178 .endm 179 180 .macro mte_set_user_gcr, tsk, tmp, tmp2 181#ifdef CONFIG_KASAN_HW_TAGS 182alternative_cb kasan_hw_tags_enable 183 b 1f 184alternative_cb_end 185 ldr \tmp, [\tsk, #THREAD_MTE_CTRL] 186 187 mte_set_gcr \tmp, \tmp2 1881: 189#endif 190 .endm 191 192 .macro kernel_entry, el, regsize = 64 193 .if \regsize == 32 194 mov w0, w0 // zero upper 32 bits of x0 195 .endif 196 stp x0, x1, [sp, #16 * 0] 197 stp x2, x3, [sp, #16 * 1] 198 stp x4, x5, [sp, #16 * 2] 199 stp x6, x7, [sp, #16 * 3] 200 stp x8, x9, [sp, #16 * 4] 201 stp x10, x11, [sp, #16 * 5] 202 stp x12, x13, [sp, #16 * 6] 203 stp x14, x15, [sp, #16 * 7] 204 stp x16, x17, [sp, #16 * 8] 205 stp x18, x19, [sp, #16 * 9] 206 stp x20, x21, [sp, #16 * 10] 207 stp x22, x23, [sp, #16 * 11] 208 stp x24, x25, [sp, #16 * 12] 209 stp x26, x27, [sp, #16 * 13] 210 stp x28, x29, [sp, #16 * 14] 211 212 .if \el == 0 213 clear_gp_regs 214 mrs x21, sp_el0 215 ldr_this_cpu tsk, __entry_task, x20 216 msr sp_el0, tsk 217 218 /* 219 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions 220 * when scheduling. 221 */ 222 ldr x19, [tsk, #TSK_TI_FLAGS] 223 disable_step_tsk x19, x20 224 225 /* Check for asynchronous tag check faults in user space */ 226 ldr x0, [tsk, THREAD_SCTLR_USER] 227 check_mte_async_tcf x22, x23, x0 228 229#ifdef CONFIG_ARM64_PTR_AUTH 230alternative_if ARM64_HAS_ADDRESS_AUTH 231 /* 232 * Enable IA for in-kernel PAC if the task had it disabled. Although 233 * this could be implemented with an unconditional MRS which would avoid 234 * a load, this was measured to be slower on Cortex-A75 and Cortex-A76. 235 * 236 * Install the kernel IA key only if IA was enabled in the task. If IA 237 * was disabled on kernel exit then we would have left the kernel IA 238 * installed so there is no need to install it again. 239 */ 240 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f 241 __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23 242 b 2f 2431: 244 mrs x0, sctlr_el1 245 orr x0, x0, SCTLR_ELx_ENIA 246 msr sctlr_el1, x0 2472: 248alternative_else_nop_endif 249#endif 250 251 apply_ssbd 1, x22, x23 252 253 mte_set_kernel_gcr x22, x23 254 255 /* 256 * Any non-self-synchronizing system register updates required for 257 * kernel entry should be placed before this point. 258 */ 259alternative_if ARM64_MTE 260 isb 261 b 1f 262alternative_else_nop_endif 263alternative_if ARM64_HAS_ADDRESS_AUTH 264 isb 265alternative_else_nop_endif 2661: 267 268 scs_load tsk 269 .else 270 add x21, sp, #PT_REGS_SIZE 271 get_current_task tsk 272 .endif /* \el == 0 */ 273 mrs x22, elr_el1 274 mrs x23, spsr_el1 275 stp lr, x21, [sp, #S_LR] 276 277 /* 278 * For exceptions from EL0, create a final frame record. 279 * For exceptions from EL1, create a synthetic frame record so the 280 * interrupted code shows up in the backtrace. 281 */ 282 .if \el == 0 283 stp xzr, xzr, [sp, #S_STACKFRAME] 284 .else 285 stp x29, x22, [sp, #S_STACKFRAME] 286 .endif 287 add x29, sp, #S_STACKFRAME 288 289#ifdef CONFIG_ARM64_SW_TTBR0_PAN 290alternative_if_not ARM64_HAS_PAN 291 bl __swpan_entry_el\el 292alternative_else_nop_endif 293#endif 294 295 stp x22, x23, [sp, #S_PC] 296 297 /* Not in a syscall by default (el0_svc overwrites for real syscall) */ 298 .if \el == 0 299 mov w21, #NO_SYSCALL 300 str w21, [sp, #S_SYSCALLNO] 301 .endif 302 303 /* Save pmr */ 304alternative_if ARM64_HAS_IRQ_PRIO_MASKING 305 mrs_s x20, SYS_ICC_PMR_EL1 306 str x20, [sp, #S_PMR_SAVE] 307 mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET 308 msr_s SYS_ICC_PMR_EL1, x20 309alternative_else_nop_endif 310 311 /* Re-enable tag checking (TCO set on exception entry) */ 312#ifdef CONFIG_ARM64_MTE 313alternative_if ARM64_MTE 314 SET_PSTATE_TCO(0) 315alternative_else_nop_endif 316#endif 317 318 /* 319 * Registers that may be useful after this macro is invoked: 320 * 321 * x20 - ICC_PMR_EL1 322 * x21 - aborted SP 323 * x22 - aborted PC 324 * x23 - aborted PSTATE 325 */ 326 .endm 327 328 .macro kernel_exit, el 329 .if \el != 0 330 disable_daif 331 .endif 332 333 /* Restore pmr */ 334alternative_if ARM64_HAS_IRQ_PRIO_MASKING 335 ldr x20, [sp, #S_PMR_SAVE] 336 msr_s SYS_ICC_PMR_EL1, x20 337 mrs_s x21, SYS_ICC_CTLR_EL1 338 tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE 339 dsb sy // Ensure priority change is seen by redistributor 340.L__skip_pmr_sync\@: 341alternative_else_nop_endif 342 343 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR 344 345#ifdef CONFIG_ARM64_SW_TTBR0_PAN 346alternative_if_not ARM64_HAS_PAN 347 bl __swpan_exit_el\el 348alternative_else_nop_endif 349#endif 350 351 .if \el == 0 352 ldr x23, [sp, #S_SP] // load return stack pointer 353 msr sp_el0, x23 354 tst x22, #PSR_MODE32_BIT // native task? 355 b.eq 3f 356 357#ifdef CONFIG_ARM64_ERRATUM_845719 358alternative_if ARM64_WORKAROUND_845719 359#ifdef CONFIG_PID_IN_CONTEXTIDR 360 mrs x29, contextidr_el1 361 msr contextidr_el1, x29 362#else 363 msr contextidr_el1, xzr 364#endif 365alternative_else_nop_endif 366#endif 3673: 368 scs_save tsk 369 370 /* Ignore asynchronous tag check faults in the uaccess routines */ 371 ldr x0, [tsk, THREAD_SCTLR_USER] 372 clear_mte_async_tcf x0 373 374#ifdef CONFIG_ARM64_PTR_AUTH 375alternative_if ARM64_HAS_ADDRESS_AUTH 376 /* 377 * IA was enabled for in-kernel PAC. Disable it now if needed, or 378 * alternatively install the user's IA. All other per-task keys and 379 * SCTLR bits were updated on task switch. 380 * 381 * No kernel C function calls after this. 382 */ 383 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f 384 __ptrauth_keys_install_user tsk, x0, x1, x2 385 b 2f 3861: 387 mrs x0, sctlr_el1 388 bic x0, x0, SCTLR_ELx_ENIA 389 msr sctlr_el1, x0 3902: 391alternative_else_nop_endif 392#endif 393 394 mte_set_user_gcr tsk, x0, x1 395 396 apply_ssbd 0, x0, x1 397 .endif 398 399 msr elr_el1, x21 // set up the return data 400 msr spsr_el1, x22 401 ldp x0, x1, [sp, #16 * 0] 402 ldp x2, x3, [sp, #16 * 1] 403 ldp x4, x5, [sp, #16 * 2] 404 ldp x6, x7, [sp, #16 * 3] 405 ldp x8, x9, [sp, #16 * 4] 406 ldp x10, x11, [sp, #16 * 5] 407 ldp x12, x13, [sp, #16 * 6] 408 ldp x14, x15, [sp, #16 * 7] 409 ldp x16, x17, [sp, #16 * 8] 410 ldp x18, x19, [sp, #16 * 9] 411 ldp x20, x21, [sp, #16 * 10] 412 ldp x22, x23, [sp, #16 * 11] 413 ldp x24, x25, [sp, #16 * 12] 414 ldp x26, x27, [sp, #16 * 13] 415 ldp x28, x29, [sp, #16 * 14] 416 ldr lr, [sp, #S_LR] 417 add sp, sp, #PT_REGS_SIZE // restore sp 418 419 .if \el == 0 420alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 421#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 422 bne 4f 423 msr far_el1, x30 424 tramp_alias x30, tramp_exit_native 425 br x30 4264: 427 tramp_alias x30, tramp_exit_compat 428 br x30 429#endif 430 .else 431 /* Ensure any device/NC reads complete */ 432 alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412 433 434 eret 435 .endif 436 sb 437 .endm 438 439#ifdef CONFIG_ARM64_SW_TTBR0_PAN 440 /* 441 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from 442 * EL0, there is no need to check the state of TTBR0_EL1 since 443 * accesses are always enabled. 444 * Note that the meaning of this bit differs from the ARMv8.1 PAN 445 * feature as all TTBR0_EL1 accesses are disabled, not just those to 446 * user mappings. 447 */ 448SYM_CODE_START_LOCAL(__swpan_entry_el1) 449 mrs x21, ttbr0_el1 450 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID 451 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR 452 b.eq 1f // TTBR0 access already disabled 453 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR 454SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL) 455 __uaccess_ttbr0_disable x21 4561: ret 457SYM_CODE_END(__swpan_entry_el1) 458 459 /* 460 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR 461 * PAN bit checking. 462 */ 463SYM_CODE_START_LOCAL(__swpan_exit_el1) 464 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set 465 __uaccess_ttbr0_enable x0, x1 4661: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit 467 ret 468SYM_CODE_END(__swpan_exit_el1) 469 470SYM_CODE_START_LOCAL(__swpan_exit_el0) 471 __uaccess_ttbr0_enable x0, x1 472 /* 473 * Enable errata workarounds only if returning to user. The only 474 * workaround currently required for TTBR0_EL1 changes are for the 475 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache 476 * corruption). 477 */ 478 b post_ttbr_update_workaround 479SYM_CODE_END(__swpan_exit_el0) 480#endif 481 482/* GPRs used by entry code */ 483tsk .req x28 // current thread_info 484 485 .text 486 487/* 488 * Exception vectors. 489 */ 490 .pushsection ".entry.text", "ax" 491 492 .align 11 493SYM_CODE_START(vectors) 494 kernel_ventry 1, t, 64, sync // Synchronous EL1t 495 kernel_ventry 1, t, 64, irq // IRQ EL1t 496 kernel_ventry 1, t, 64, fiq // FIQ EL1h 497 kernel_ventry 1, t, 64, error // Error EL1t 498 499 kernel_ventry 1, h, 64, sync // Synchronous EL1h 500 kernel_ventry 1, h, 64, irq // IRQ EL1h 501 kernel_ventry 1, h, 64, fiq // FIQ EL1h 502 kernel_ventry 1, h, 64, error // Error EL1h 503 504 kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0 505 kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0 506 kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 507 kernel_ventry 0, t, 64, error // Error 64-bit EL0 508 509 kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0 510 kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0 511 kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0 512 kernel_ventry 0, t, 32, error // Error 32-bit EL0 513SYM_CODE_END(vectors) 514 515#ifdef CONFIG_VMAP_STACK 516SYM_CODE_START_LOCAL(__bad_stack) 517 /* 518 * We detected an overflow in kernel_ventry, which switched to the 519 * overflow stack. Stash the exception regs, and head to our overflow 520 * handler. 521 */ 522 523 /* Restore the original x0 value */ 524 mrs x0, tpidrro_el0 525 526 /* 527 * Store the original GPRs to the new stack. The orginal SP (minus 528 * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry. 529 */ 530 sub sp, sp, #PT_REGS_SIZE 531 kernel_entry 1 532 mrs x0, tpidr_el0 533 add x0, x0, #PT_REGS_SIZE 534 str x0, [sp, #S_SP] 535 536 /* Stash the regs for handle_bad_stack */ 537 mov x0, sp 538 539 /* Time to die */ 540 bl handle_bad_stack 541 ASM_BUG() 542SYM_CODE_END(__bad_stack) 543#endif /* CONFIG_VMAP_STACK */ 544 545 546 .macro entry_handler el:req, ht:req, regsize:req, label:req 547SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) 548 kernel_entry \el, \regsize 549 mov x0, sp 550 bl el\el\ht\()_\regsize\()_\label\()_handler 551 .if \el == 0 552 b ret_to_user 553 .else 554 b ret_to_kernel 555 .endif 556SYM_CODE_END(el\el\ht\()_\regsize\()_\label) 557 .endm 558 559/* 560 * Early exception handlers 561 */ 562 entry_handler 1, t, 64, sync 563 entry_handler 1, t, 64, irq 564 entry_handler 1, t, 64, fiq 565 entry_handler 1, t, 64, error 566 567 entry_handler 1, h, 64, sync 568 entry_handler 1, h, 64, irq 569 entry_handler 1, h, 64, fiq 570 entry_handler 1, h, 64, error 571 572 entry_handler 0, t, 64, sync 573 entry_handler 0, t, 64, irq 574 entry_handler 0, t, 64, fiq 575 entry_handler 0, t, 64, error 576 577 entry_handler 0, t, 32, sync 578 entry_handler 0, t, 32, irq 579 entry_handler 0, t, 32, fiq 580 entry_handler 0, t, 32, error 581 582SYM_CODE_START_LOCAL(ret_to_kernel) 583 kernel_exit 1 584SYM_CODE_END(ret_to_kernel) 585 586SYM_CODE_START_LOCAL(ret_to_user) 587 ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step 588 enable_step_tsk x19, x2 589#ifdef CONFIG_GCC_PLUGIN_STACKLEAK 590 bl stackleak_erase 591#endif 592 kernel_exit 0 593SYM_CODE_END(ret_to_user) 594 595 .popsection // .entry.text 596 597#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 598/* 599 * Exception vectors trampoline. 600 */ 601 .pushsection ".entry.tramp.text", "ax" 602 603 // Move from tramp_pg_dir to swapper_pg_dir 604 .macro tramp_map_kernel, tmp 605 mrs \tmp, ttbr1_el1 606 add \tmp, \tmp, #TRAMP_SWAPPER_OFFSET 607 bic \tmp, \tmp, #USER_ASID_FLAG 608 msr ttbr1_el1, \tmp 609#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 610alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 611 /* ASID already in \tmp[63:48] */ 612 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) 613 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) 614 /* 2MB boundary containing the vectors, so we nobble the walk cache */ 615 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) 616 isb 617 tlbi vae1, \tmp 618 dsb nsh 619alternative_else_nop_endif 620#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ 621 .endm 622 623 // Move from swapper_pg_dir to tramp_pg_dir 624 .macro tramp_unmap_kernel, tmp 625 mrs \tmp, ttbr1_el1 626 sub \tmp, \tmp, #TRAMP_SWAPPER_OFFSET 627 orr \tmp, \tmp, #USER_ASID_FLAG 628 msr ttbr1_el1, \tmp 629 /* 630 * We avoid running the post_ttbr_update_workaround here because 631 * it's only needed by Cavium ThunderX, which requires KPTI to be 632 * disabled. 633 */ 634 .endm 635 636 .macro tramp_ventry, regsize = 64 637 .align 7 6381: 639 .if \regsize == 64 640 msr tpidrro_el0, x30 // Restored in kernel_ventry 641 .endif 642 /* 643 * Defend against branch aliasing attacks by pushing a dummy 644 * entry onto the return stack and using a RET instruction to 645 * enter the full-fat kernel vectors. 646 */ 647 bl 2f 648 b . 6492: 650 tramp_map_kernel x30 651#ifdef CONFIG_RANDOMIZE_BASE 652 adr x30, tramp_vectors + PAGE_SIZE 653alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 654 ldr x30, [x30] 655#else 656 ldr x30, =vectors 657#endif 658alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 659 prfm plil1strm, [x30, #(1b - tramp_vectors)] 660alternative_else_nop_endif 661 msr vbar_el1, x30 662 add x30, x30, #(1b - tramp_vectors) 663 isb 664 ret 665 .endm 666 667 .macro tramp_exit, regsize = 64 668 adr x30, tramp_vectors 669 msr vbar_el1, x30 670 tramp_unmap_kernel x30 671 .if \regsize == 64 672 mrs x30, far_el1 673 .endif 674 eret 675 sb 676 .endm 677 678 .align 11 679SYM_CODE_START_NOALIGN(tramp_vectors) 680 .space 0x400 681 682 tramp_ventry 683 tramp_ventry 684 tramp_ventry 685 tramp_ventry 686 687 tramp_ventry 32 688 tramp_ventry 32 689 tramp_ventry 32 690 tramp_ventry 32 691SYM_CODE_END(tramp_vectors) 692 693SYM_CODE_START(tramp_exit_native) 694 tramp_exit 695SYM_CODE_END(tramp_exit_native) 696 697SYM_CODE_START(tramp_exit_compat) 698 tramp_exit 32 699SYM_CODE_END(tramp_exit_compat) 700 701 .ltorg 702 .popsection // .entry.tramp.text 703#ifdef CONFIG_RANDOMIZE_BASE 704 .pushsection ".rodata", "a" 705 .align PAGE_SHIFT 706SYM_DATA_START(__entry_tramp_data_start) 707 .quad vectors 708SYM_DATA_END(__entry_tramp_data_start) 709 .popsection // .rodata 710#endif /* CONFIG_RANDOMIZE_BASE */ 711#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ 712 713/* 714 * Register switch for AArch64. The callee-saved registers need to be saved 715 * and restored. On entry: 716 * x0 = previous task_struct (must be preserved across the switch) 717 * x1 = next task_struct 718 * Previous and next are guaranteed not to be the same. 719 * 720 */ 721SYM_FUNC_START(cpu_switch_to) 722 mov x10, #THREAD_CPU_CONTEXT 723 add x8, x0, x10 724 mov x9, sp 725 stp x19, x20, [x8], #16 // store callee-saved registers 726 stp x21, x22, [x8], #16 727 stp x23, x24, [x8], #16 728 stp x25, x26, [x8], #16 729 stp x27, x28, [x8], #16 730 stp x29, x9, [x8], #16 731 str lr, [x8] 732 add x8, x1, x10 733 ldp x19, x20, [x8], #16 // restore callee-saved registers 734 ldp x21, x22, [x8], #16 735 ldp x23, x24, [x8], #16 736 ldp x25, x26, [x8], #16 737 ldp x27, x28, [x8], #16 738 ldp x29, x9, [x8], #16 739 ldr lr, [x8] 740 mov sp, x9 741 msr sp_el0, x1 742 ptrauth_keys_install_kernel x1, x8, x9, x10 743 scs_save x0 744 scs_load x1 745 ret 746SYM_FUNC_END(cpu_switch_to) 747NOKPROBE(cpu_switch_to) 748 749/* 750 * This is how we return from a fork. 751 */ 752SYM_CODE_START(ret_from_fork) 753 bl schedule_tail 754 cbz x19, 1f // not a kernel thread 755 mov x0, x20 756 blr x19 7571: get_current_task tsk 758 mov x0, sp 759 bl asm_exit_to_user_mode 760 b ret_to_user 761SYM_CODE_END(ret_from_fork) 762NOKPROBE(ret_from_fork) 763 764/* 765 * void call_on_irq_stack(struct pt_regs *regs, 766 * void (*func)(struct pt_regs *)); 767 * 768 * Calls func(regs) using this CPU's irq stack and shadow irq stack. 769 */ 770SYM_FUNC_START(call_on_irq_stack) 771#ifdef CONFIG_SHADOW_CALL_STACK 772 stp scs_sp, xzr, [sp, #-16]! 773 ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17 774#endif 775 /* Create a frame record to save our LR and SP (implicit in FP) */ 776 stp x29, x30, [sp, #-16]! 777 mov x29, sp 778 779 ldr_this_cpu x16, irq_stack_ptr, x17 780 mov x15, #IRQ_STACK_SIZE 781 add x16, x16, x15 782 783 /* Move to the new stack and call the function there */ 784 mov sp, x16 785 blr x1 786 787 /* 788 * Restore the SP from the FP, and restore the FP and LR from the frame 789 * record. 790 */ 791 mov sp, x29 792 ldp x29, x30, [sp], #16 793#ifdef CONFIG_SHADOW_CALL_STACK 794 ldp scs_sp, xzr, [sp], #16 795#endif 796 ret 797SYM_FUNC_END(call_on_irq_stack) 798NOKPROBE(call_on_irq_stack) 799 800#ifdef CONFIG_ARM_SDE_INTERFACE 801 802#include <asm/sdei.h> 803#include <uapi/linux/arm_sdei.h> 804 805.macro sdei_handler_exit exit_mode 806 /* On success, this call never returns... */ 807 cmp \exit_mode, #SDEI_EXIT_SMC 808 b.ne 99f 809 smc #0 810 b . 81199: hvc #0 812 b . 813.endm 814 815#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 816/* 817 * The regular SDEI entry point may have been unmapped along with the rest of 818 * the kernel. This trampoline restores the kernel mapping to make the x1 memory 819 * argument accessible. 820 * 821 * This clobbers x4, __sdei_handler() will restore this from firmware's 822 * copy. 823 */ 824.ltorg 825.pushsection ".entry.tramp.text", "ax" 826SYM_CODE_START(__sdei_asm_entry_trampoline) 827 mrs x4, ttbr1_el1 828 tbz x4, #USER_ASID_BIT, 1f 829 830 tramp_map_kernel tmp=x4 831 isb 832 mov x4, xzr 833 834 /* 835 * Remember whether to unmap the kernel on exit. 836 */ 8371: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] 838 839#ifdef CONFIG_RANDOMIZE_BASE 840 adr x4, tramp_vectors + PAGE_SIZE 841 add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler 842 ldr x4, [x4] 843#else 844 ldr x4, =__sdei_asm_handler 845#endif 846 br x4 847SYM_CODE_END(__sdei_asm_entry_trampoline) 848NOKPROBE(__sdei_asm_entry_trampoline) 849 850/* 851 * Make the exit call and restore the original ttbr1_el1 852 * 853 * x0 & x1: setup for the exit API call 854 * x2: exit_mode 855 * x4: struct sdei_registered_event argument from registration time. 856 */ 857SYM_CODE_START(__sdei_asm_exit_trampoline) 858 ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] 859 cbnz x4, 1f 860 861 tramp_unmap_kernel tmp=x4 862 8631: sdei_handler_exit exit_mode=x2 864SYM_CODE_END(__sdei_asm_exit_trampoline) 865NOKPROBE(__sdei_asm_exit_trampoline) 866 .ltorg 867.popsection // .entry.tramp.text 868#ifdef CONFIG_RANDOMIZE_BASE 869.pushsection ".rodata", "a" 870SYM_DATA_START(__sdei_asm_trampoline_next_handler) 871 .quad __sdei_asm_handler 872SYM_DATA_END(__sdei_asm_trampoline_next_handler) 873.popsection // .rodata 874#endif /* CONFIG_RANDOMIZE_BASE */ 875#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ 876 877/* 878 * Software Delegated Exception entry point. 879 * 880 * x0: Event number 881 * x1: struct sdei_registered_event argument from registration time. 882 * x2: interrupted PC 883 * x3: interrupted PSTATE 884 * x4: maybe clobbered by the trampoline 885 * 886 * Firmware has preserved x0->x17 for us, we must save/restore the rest to 887 * follow SMC-CC. We save (or retrieve) all the registers as the handler may 888 * want them. 889 */ 890SYM_CODE_START(__sdei_asm_handler) 891 stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC] 892 stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2] 893 stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3] 894 stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4] 895 stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5] 896 stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6] 897 stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7] 898 stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8] 899 stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9] 900 stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10] 901 stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11] 902 stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12] 903 stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13] 904 stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14] 905 mov x4, sp 906 stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR] 907 908 mov x19, x1 909 910#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK) 911 ldrb w4, [x19, #SDEI_EVENT_PRIORITY] 912#endif 913 914#ifdef CONFIG_VMAP_STACK 915 /* 916 * entry.S may have been using sp as a scratch register, find whether 917 * this is a normal or critical event and switch to the appropriate 918 * stack for this CPU. 919 */ 920 cbnz w4, 1f 921 ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6 922 b 2f 9231: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6 9242: mov x6, #SDEI_STACK_SIZE 925 add x5, x5, x6 926 mov sp, x5 927#endif 928 929#ifdef CONFIG_SHADOW_CALL_STACK 930 /* Use a separate shadow call stack for normal and critical events */ 931 cbnz w4, 3f 932 ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6 933 b 4f 9343: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6 9354: 936#endif 937 938 /* 939 * We may have interrupted userspace, or a guest, or exit-from or 940 * return-to either of these. We can't trust sp_el0, restore it. 941 */ 942 mrs x28, sp_el0 943 ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1 944 msr sp_el0, x0 945 946 /* If we interrupted the kernel point to the previous stack/frame. */ 947 and x0, x3, #0xc 948 mrs x1, CurrentEL 949 cmp x0, x1 950 csel x29, x29, xzr, eq // fp, or zero 951 csel x4, x2, xzr, eq // elr, or zero 952 953 stp x29, x4, [sp, #-16]! 954 mov x29, sp 955 956 add x0, x19, #SDEI_EVENT_INTREGS 957 mov x1, x19 958 bl __sdei_handler 959 960 msr sp_el0, x28 961 /* restore regs >x17 that we clobbered */ 962 mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline 963 ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14] 964 ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9] 965 ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR] 966 mov sp, x1 967 968 mov x1, x0 // address to complete_and_resume 969 /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */ 970 cmp x0, #1 971 mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE 972 mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME 973 csel x0, x2, x3, ls 974 975 ldr_l x2, sdei_exit_mode 976 977alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 978 sdei_handler_exit exit_mode=x2 979alternative_else_nop_endif 980 981#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 982 tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline 983 br x5 984#endif 985SYM_CODE_END(__sdei_asm_handler) 986NOKPROBE(__sdei_asm_handler) 987#endif /* CONFIG_ARM_SDE_INTERFACE */ 988