xref: /openbmc/linux/arch/riscv/kernel/entry.S (revision 7b7fd0ac7dc1ffcaf24d9bca0f051b0168e43cd4)
150acfb2bSThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */
27db91e57SPalmer Dabbelt/*
37db91e57SPalmer Dabbelt * Copyright (C) 2012 Regents of the University of California
47db91e57SPalmer Dabbelt * Copyright (C) 2017 SiFive
57db91e57SPalmer Dabbelt */
67db91e57SPalmer Dabbelt
77db91e57SPalmer Dabbelt#include <linux/init.h>
87db91e57SPalmer Dabbelt#include <linux/linkage.h>
97db91e57SPalmer Dabbelt
107db91e57SPalmer Dabbelt#include <asm/asm.h>
117db91e57SPalmer Dabbelt#include <asm/csr.h>
127db91e57SPalmer Dabbelt#include <asm/unistd.h>
13eff53aeaSDeepak Gupta#include <asm/page.h>
147db91e57SPalmer Dabbelt#include <asm/thread_info.h>
157db91e57SPalmer Dabbelt#include <asm/asm-offsets.h>
16800149a7SVincent Chen#include <asm/errata_list.h>
17eff53aeaSDeepak Gupta#include <linux/sizes.h>
187db91e57SPalmer Dabbelt
19a08b414dSNam Cao	.section .irqentry.text, "ax"
20a08b414dSNam Cao
21f0bddf50SGuo RenSYM_CODE_START(handle_exception)
227db91e57SPalmer Dabbelt	/*
237db91e57SPalmer Dabbelt	 * If coming from userspace, preserve the user thread pointer and load
24a4c3733dSChristoph Hellwig	 * the kernel thread pointer.  If we came from the kernel, the scratch
25a4c3733dSChristoph Hellwig	 * register will contain 0, and we should continue on the current TP.
267db91e57SPalmer Dabbelt	 */
27a4c3733dSChristoph Hellwig	csrrw tp, CSR_SCRATCH, tp
287db91e57SPalmer Dabbelt	bnez tp, _save_context
297db91e57SPalmer Dabbelt
307db91e57SPalmer Dabbelt_restore_kernel_tpsp:
31a4c3733dSChristoph Hellwig	csrr tp, CSR_SCRATCH
327db91e57SPalmer Dabbelt	REG_S sp, TASK_TI_KERNEL_SP(tp)
3331da94c2STong Tiangen
3431da94c2STong Tiangen#ifdef CONFIG_VMAP_STACK
3531da94c2STong Tiangen	addi sp, sp, -(PT_SIZE_ON_STACK)
3631da94c2STong Tiangen	srli sp, sp, THREAD_SHIFT
3731da94c2STong Tiangen	andi sp, sp, 0x1
3831da94c2STong Tiangen	bnez sp, handle_kernel_stack_overflow
3931da94c2STong Tiangen	REG_L sp, TASK_TI_KERNEL_SP(tp)
4031da94c2STong Tiangen#endif
4131da94c2STong Tiangen
427db91e57SPalmer Dabbelt_save_context:
437db91e57SPalmer Dabbelt	REG_S sp, TASK_TI_USER_SP(tp)
447db91e57SPalmer Dabbelt	REG_L sp, TASK_TI_KERNEL_SP(tp)
457db91e57SPalmer Dabbelt	addi sp, sp, -(PT_SIZE_ON_STACK)
467db91e57SPalmer Dabbelt	REG_S x1,  PT_RA(sp)
477db91e57SPalmer Dabbelt	REG_S x3,  PT_GP(sp)
487db91e57SPalmer Dabbelt	REG_S x5,  PT_T0(sp)
4945b32b94SJisheng Zhang	save_from_x6_to_x31
507db91e57SPalmer Dabbelt
517db91e57SPalmer Dabbelt	/*
52fe9b842fSChristoph Hellwig	 * Disable user-mode memory access as it should only be set in the
53fe9b842fSChristoph Hellwig	 * actual user copy routines.
54fe9b842fSChristoph Hellwig	 *
5574abe5a3SGuo Ren	 * Disable the FPU/Vector to detect illegal usage of floating point
5674abe5a3SGuo Ren	 * or vector in kernel space.
577db91e57SPalmer Dabbelt	 */
5874abe5a3SGuo Ren	li t0, SR_SUM | SR_FS_VS
597db91e57SPalmer Dabbelt
607db91e57SPalmer Dabbelt	REG_L s0, TASK_TI_USER_SP(tp)
61a4c3733dSChristoph Hellwig	csrrc s1, CSR_STATUS, t0
62a4c3733dSChristoph Hellwig	csrr s2, CSR_EPC
63a4c3733dSChristoph Hellwig	csrr s3, CSR_TVAL
64a4c3733dSChristoph Hellwig	csrr s4, CSR_CAUSE
65a4c3733dSChristoph Hellwig	csrr s5, CSR_SCRATCH
667db91e57SPalmer Dabbelt	REG_S s0, PT_SP(sp)
67a4c3733dSChristoph Hellwig	REG_S s1, PT_STATUS(sp)
68a4c3733dSChristoph Hellwig	REG_S s2, PT_EPC(sp)
69a4c3733dSChristoph Hellwig	REG_S s3, PT_BADADDR(sp)
70a4c3733dSChristoph Hellwig	REG_S s4, PT_CAUSE(sp)
717db91e57SPalmer Dabbelt	REG_S s5, PT_TP(sp)
727db91e57SPalmer Dabbelt
737db91e57SPalmer Dabbelt	/*
74a4c3733dSChristoph Hellwig	 * Set the scratch register to 0, so that if a recursive exception
757db91e57SPalmer Dabbelt	 * occurs, the exception vector knows it came from the kernel
767db91e57SPalmer Dabbelt	 */
77a4c3733dSChristoph Hellwig	csrw CSR_SCRATCH, x0
787db91e57SPalmer Dabbelt
797db91e57SPalmer Dabbelt	/* Load the global pointer */
807db91e57SPalmer Dabbelt.option push
817db91e57SPalmer Dabbelt.option norelax
827db91e57SPalmer Dabbelt	la gp, __global_pointer$
837db91e57SPalmer Dabbelt.option pop
84f0bddf50SGuo Ren	move a0, sp /* pt_regs */
85f0bddf50SGuo Ren	la ra, ret_from_exception
86ed48b297SGreentime Hu
877db91e57SPalmer Dabbelt	/*
887db91e57SPalmer Dabbelt	 * MSB of cause differentiates between
897db91e57SPalmer Dabbelt	 * interrupts and exceptions
907db91e57SPalmer Dabbelt	 */
917db91e57SPalmer Dabbelt	bge s4, zero, 1f
927db91e57SPalmer Dabbelt
937db91e57SPalmer Dabbelt	/* Handle interrupts */
94f0bddf50SGuo Ren	tail do_irq
957db91e57SPalmer Dabbelt1:
967db91e57SPalmer Dabbelt	/* Handle other exceptions */
977db91e57SPalmer Dabbelt	slli t0, s4, RISCV_LGPTR
987db91e57SPalmer Dabbelt	la t1, excp_vect_table
997db91e57SPalmer Dabbelt	la t2, excp_vect_table_end
1007db91e57SPalmer Dabbelt	add t0, t1, t0
1017db91e57SPalmer Dabbelt	/* Check if exception code lies within bounds */
1027db91e57SPalmer Dabbelt	bgeu t0, t2, 1f
1037db91e57SPalmer Dabbelt	REG_L t0, 0(t0)
1047db91e57SPalmer Dabbelt	jr t0
1057db91e57SPalmer Dabbelt1:
1067db91e57SPalmer Dabbelt	tail do_trap_unknown
107f0bddf50SGuo RenSYM_CODE_END(handle_exception)
108d5a9588cSClément LégerASM_NOKPROBE(handle_exception)
1097db91e57SPalmer Dabbelt
110643437b9SDamien Le Moal/*
111f0bddf50SGuo Ren * The ret_from_exception must be called with interrupt disabled. Here is the
112f0bddf50SGuo Ren * caller list:
113f0bddf50SGuo Ren *  - handle_exception
114f0bddf50SGuo Ren *  - ret_from_fork
115643437b9SDamien Le Moal */
1167ecdadf7SGuo RenSYM_CODE_START_NOALIGN(ret_from_exception)
117a4c3733dSChristoph Hellwig	REG_L s0, PT_STATUS(sp)
118a4c3733dSChristoph Hellwig#ifdef CONFIG_RISCV_M_MODE
119a4c3733dSChristoph Hellwig	/* the MPP value is too large to be used as an immediate arg for addi */
120a4c3733dSChristoph Hellwig	li t0, SR_MPP
121a4c3733dSChristoph Hellwig	and s0, s0, t0
122a4c3733dSChristoph Hellwig#else
1231125203cSChristoph Hellwig	andi s0, s0, SR_SPP
124a4c3733dSChristoph Hellwig#endif
125f0bddf50SGuo Ren	bnez s0, 1f
126ed48b297SGreentime Hu
1277db91e57SPalmer Dabbelt	/* Save unwound kernel stack pointer in thread_info */
1287db91e57SPalmer Dabbelt	addi s0, sp, PT_SIZE_ON_STACK
1297db91e57SPalmer Dabbelt	REG_S s0, TASK_TI_KERNEL_SP(tp)
1307db91e57SPalmer Dabbelt
1317db91e57SPalmer Dabbelt	/*
132a4c3733dSChristoph Hellwig	 * Save TP into the scratch register , so we can find the kernel data
133a4c3733dSChristoph Hellwig	 * structures again.
1347db91e57SPalmer Dabbelt	 */
135a4c3733dSChristoph Hellwig	csrw CSR_SCRATCH, tp
1363c469798SGuo Ren1:
137fdff9911SPalmer Dabbelt	REG_L a0, PT_STATUS(sp)
138fdff9911SPalmer Dabbelt	/*
139fdff9911SPalmer Dabbelt	 * The current load reservation is effectively part of the processor's
140fdff9911SPalmer Dabbelt	 * state, in the sense that load reservations cannot be shared between
141fdff9911SPalmer Dabbelt	 * different hart contexts.  We can't actually save and restore a load
142fdff9911SPalmer Dabbelt	 * reservation, so instead here we clear any existing reservation --
143fdff9911SPalmer Dabbelt	 * it's always legal for implementations to clear load reservations at
144fdff9911SPalmer Dabbelt	 * any point (as long as the forward progress guarantee is kept, but
145fdff9911SPalmer Dabbelt	 * we'll ignore that here).
146fdff9911SPalmer Dabbelt	 *
147fdff9911SPalmer Dabbelt	 * Dangling load reservations can be the result of taking a trap in the
148fdff9911SPalmer Dabbelt	 * middle of an LR/SC sequence, but can also be the result of a taken
149fdff9911SPalmer Dabbelt	 * forward branch around an SC -- which is how we implement CAS.  As a
150fdff9911SPalmer Dabbelt	 * result we need to clear reservations between the last CAS and the
151fdff9911SPalmer Dabbelt	 * jump back to the new context.  While it is unlikely the store
152fdff9911SPalmer Dabbelt	 * completes, implementations are allowed to expand reservations to be
153fdff9911SPalmer Dabbelt	 * arbitrarily large.
154fdff9911SPalmer Dabbelt	 */
155fdff9911SPalmer Dabbelt	REG_L  a2, PT_EPC(sp)
156fdff9911SPalmer Dabbelt	REG_SC x0, a2, PT_EPC(sp)
157fdff9911SPalmer Dabbelt
158fdff9911SPalmer Dabbelt	csrw CSR_STATUS, a0
159fdff9911SPalmer Dabbelt	csrw CSR_EPC, a2
160fdff9911SPalmer Dabbelt
161fdff9911SPalmer Dabbelt	REG_L x1,  PT_RA(sp)
162fdff9911SPalmer Dabbelt	REG_L x3,  PT_GP(sp)
163fdff9911SPalmer Dabbelt	REG_L x4,  PT_TP(sp)
164fdff9911SPalmer Dabbelt	REG_L x5,  PT_T0(sp)
16545b32b94SJisheng Zhang	restore_from_x6_to_x31
166fdff9911SPalmer Dabbelt
167fdff9911SPalmer Dabbelt	REG_L x2,  PT_SP(sp)
168fdff9911SPalmer Dabbelt
169a4c3733dSChristoph Hellwig#ifdef CONFIG_RISCV_M_MODE
170a4c3733dSChristoph Hellwig	mret
171a4c3733dSChristoph Hellwig#else
1727db91e57SPalmer Dabbelt	sret
173a4c3733dSChristoph Hellwig#endif
174f0bddf50SGuo RenSYM_CODE_END(ret_from_exception)
175d5a9588cSClément LégerASM_NOKPROBE(ret_from_exception)
1767db91e57SPalmer Dabbelt
17731da94c2STong Tiangen#ifdef CONFIG_VMAP_STACK
178f0bddf50SGuo RenSYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
179eff53aeaSDeepak Gupta	/* we reach here from kernel context, sscratch must be 0 */
180eff53aeaSDeepak Gupta	csrrw x31, CSR_SCRATCH, x31
181eff53aeaSDeepak Gupta	asm_per_cpu sp, overflow_stack, x31
182eff53aeaSDeepak Gupta	li x31, OVERFLOW_STACK_SIZE
183eff53aeaSDeepak Gupta	add sp, sp, x31
184eff53aeaSDeepak Gupta	/* zero out x31 again and restore x31 */
185eff53aeaSDeepak Gupta	xor x31, x31, x31
186eff53aeaSDeepak Gupta	csrrw x31, CSR_SCRATCH, x31
1877e186433SJisheng Zhang
18831da94c2STong Tiangen	addi sp, sp, -(PT_SIZE_ON_STACK)
18931da94c2STong Tiangen
19031da94c2STong Tiangen	//save context to overflow stack
19131da94c2STong Tiangen	REG_S x1,  PT_RA(sp)
19231da94c2STong Tiangen	REG_S x3,  PT_GP(sp)
19331da94c2STong Tiangen	REG_S x5,  PT_T0(sp)
19445b32b94SJisheng Zhang	save_from_x6_to_x31
19531da94c2STong Tiangen
19631da94c2STong Tiangen	REG_L s0, TASK_TI_KERNEL_SP(tp)
19731da94c2STong Tiangen	csrr s1, CSR_STATUS
19831da94c2STong Tiangen	csrr s2, CSR_EPC
19931da94c2STong Tiangen	csrr s3, CSR_TVAL
20031da94c2STong Tiangen	csrr s4, CSR_CAUSE
20131da94c2STong Tiangen	csrr s5, CSR_SCRATCH
20231da94c2STong Tiangen	REG_S s0, PT_SP(sp)
20331da94c2STong Tiangen	REG_S s1, PT_STATUS(sp)
20431da94c2STong Tiangen	REG_S s2, PT_EPC(sp)
20531da94c2STong Tiangen	REG_S s3, PT_BADADDR(sp)
20631da94c2STong Tiangen	REG_S s4, PT_CAUSE(sp)
20731da94c2STong Tiangen	REG_S s5, PT_TP(sp)
20831da94c2STong Tiangen	move a0, sp
20931da94c2STong Tiangen	tail handle_bad_stack
210f0bddf50SGuo RenSYM_CODE_END(handle_kernel_stack_overflow)
211d5a9588cSClément LégerASM_NOKPROBE(handle_kernel_stack_overflow)
21231da94c2STong Tiangen#endif
21331da94c2STong Tiangen
214f0bddf50SGuo RenSYM_CODE_START(ret_from_fork)
215f0bddf50SGuo Ren	call schedule_tail
216ab9164daSJisheng Zhang	beqz s0, 1f	/* not from kernel thread */
217ab9164daSJisheng Zhang	/* Call fn(arg) */
218ab9164daSJisheng Zhang	move a0, s1
219ab9164daSJisheng Zhang	jalr s0
220ab9164daSJisheng Zhang1:
221f0bddf50SGuo Ren	move a0, sp /* pt_regs */
222*a29a9c2cSJisheng Zhang	call syscall_exit_to_user_mode
223*a29a9c2cSJisheng Zhang	j ret_from_exception
224f0bddf50SGuo RenSYM_CODE_END(ret_from_fork)
2257db91e57SPalmer Dabbelt
2267db91e57SPalmer Dabbelt/*
2277db91e57SPalmer Dabbelt * Integer register context switch
2287db91e57SPalmer Dabbelt * The callee-saved registers must be saved and restored.
2297db91e57SPalmer Dabbelt *
2307db91e57SPalmer Dabbelt *   a0: previous task_struct (must be preserved across the switch)
2317db91e57SPalmer Dabbelt *   a1: next task_struct
2327db91e57SPalmer Dabbelt *
2337db91e57SPalmer Dabbelt * The value of a0 and a1 must be preserved by this function, as that's how
2347db91e57SPalmer Dabbelt * arguments are passed to schedule_tail.
2357db91e57SPalmer Dabbelt */
236f0bddf50SGuo RenSYM_FUNC_START(__switch_to)
2377db91e57SPalmer Dabbelt	/* Save context into prev->thread */
2387db91e57SPalmer Dabbelt	li    a4,  TASK_THREAD_RA
2397db91e57SPalmer Dabbelt	add   a3, a0, a4
2407db91e57SPalmer Dabbelt	add   a4, a1, a4
2417db91e57SPalmer Dabbelt	REG_S ra,  TASK_THREAD_RA_RA(a3)
2427db91e57SPalmer Dabbelt	REG_S sp,  TASK_THREAD_SP_RA(a3)
2437db91e57SPalmer Dabbelt	REG_S s0,  TASK_THREAD_S0_RA(a3)
2447db91e57SPalmer Dabbelt	REG_S s1,  TASK_THREAD_S1_RA(a3)
2457db91e57SPalmer Dabbelt	REG_S s2,  TASK_THREAD_S2_RA(a3)
2467db91e57SPalmer Dabbelt	REG_S s3,  TASK_THREAD_S3_RA(a3)
2477db91e57SPalmer Dabbelt	REG_S s4,  TASK_THREAD_S4_RA(a3)
2487db91e57SPalmer Dabbelt	REG_S s5,  TASK_THREAD_S5_RA(a3)
2497db91e57SPalmer Dabbelt	REG_S s6,  TASK_THREAD_S6_RA(a3)
2507db91e57SPalmer Dabbelt	REG_S s7,  TASK_THREAD_S7_RA(a3)
2517db91e57SPalmer Dabbelt	REG_S s8,  TASK_THREAD_S8_RA(a3)
2527db91e57SPalmer Dabbelt	REG_S s9,  TASK_THREAD_S9_RA(a3)
2537db91e57SPalmer Dabbelt	REG_S s10, TASK_THREAD_S10_RA(a3)
2547db91e57SPalmer Dabbelt	REG_S s11, TASK_THREAD_S11_RA(a3)
2557db91e57SPalmer Dabbelt	/* Restore context from next->thread */
2567db91e57SPalmer Dabbelt	REG_L ra,  TASK_THREAD_RA_RA(a4)
2577db91e57SPalmer Dabbelt	REG_L sp,  TASK_THREAD_SP_RA(a4)
2587db91e57SPalmer Dabbelt	REG_L s0,  TASK_THREAD_S0_RA(a4)
2597db91e57SPalmer Dabbelt	REG_L s1,  TASK_THREAD_S1_RA(a4)
2607db91e57SPalmer Dabbelt	REG_L s2,  TASK_THREAD_S2_RA(a4)
2617db91e57SPalmer Dabbelt	REG_L s3,  TASK_THREAD_S3_RA(a4)
2627db91e57SPalmer Dabbelt	REG_L s4,  TASK_THREAD_S4_RA(a4)
2637db91e57SPalmer Dabbelt	REG_L s5,  TASK_THREAD_S5_RA(a4)
2647db91e57SPalmer Dabbelt	REG_L s6,  TASK_THREAD_S6_RA(a4)
2657db91e57SPalmer Dabbelt	REG_L s7,  TASK_THREAD_S7_RA(a4)
2667db91e57SPalmer Dabbelt	REG_L s8,  TASK_THREAD_S8_RA(a4)
2677db91e57SPalmer Dabbelt	REG_L s9,  TASK_THREAD_S9_RA(a4)
2687db91e57SPalmer Dabbelt	REG_L s10, TASK_THREAD_S10_RA(a4)
2697db91e57SPalmer Dabbelt	REG_L s11, TASK_THREAD_S11_RA(a4)
2703e7b669cSGuo Ren	/* The offset of thread_info in task_struct is zero. */
2717db91e57SPalmer Dabbelt	move tp, a1
2727db91e57SPalmer Dabbelt	ret
273f0bddf50SGuo RenSYM_FUNC_END(__switch_to)
2747db91e57SPalmer Dabbelt
2756bd33e1eSChristoph Hellwig#ifndef CONFIG_MMU
2766bd33e1eSChristoph Hellwig#define do_page_fault do_trap_unknown
2776bd33e1eSChristoph Hellwig#endif
2786bd33e1eSChristoph Hellwig
2797db91e57SPalmer Dabbelt	.section ".rodata"
280ac8d0b90SZihao Yu	.align LGREG
2817db91e57SPalmer Dabbelt	/* Exception vector table */
282f0bddf50SGuo RenSYM_CODE_START(excp_vect_table)
2837db91e57SPalmer Dabbelt	RISCV_PTR do_trap_insn_misaligned
284800149a7SVincent Chen	ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
2857db91e57SPalmer Dabbelt	RISCV_PTR do_trap_insn_illegal
2867db91e57SPalmer Dabbelt	RISCV_PTR do_trap_break
2877db91e57SPalmer Dabbelt	RISCV_PTR do_trap_load_misaligned
2887db91e57SPalmer Dabbelt	RISCV_PTR do_trap_load_fault
2897db91e57SPalmer Dabbelt	RISCV_PTR do_trap_store_misaligned
2907db91e57SPalmer Dabbelt	RISCV_PTR do_trap_store_fault
291f0bddf50SGuo Ren	RISCV_PTR do_trap_ecall_u /* system call */
2927db91e57SPalmer Dabbelt	RISCV_PTR do_trap_ecall_s
2937db91e57SPalmer Dabbelt	RISCV_PTR do_trap_unknown
2947db91e57SPalmer Dabbelt	RISCV_PTR do_trap_ecall_m
295800149a7SVincent Chen	/* instruciton page fault */
296800149a7SVincent Chen	ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
2977db91e57SPalmer Dabbelt	RISCV_PTR do_page_fault   /* load page fault */
2987db91e57SPalmer Dabbelt	RISCV_PTR do_trap_unknown
2997db91e57SPalmer Dabbelt	RISCV_PTR do_page_fault   /* store page fault */
3007db91e57SPalmer Dabbeltexcp_vect_table_end:
301f0bddf50SGuo RenSYM_CODE_END(excp_vect_table)
3026bd33e1eSChristoph Hellwig
3036bd33e1eSChristoph Hellwig#ifndef CONFIG_MMU
304f0bddf50SGuo RenSYM_CODE_START(__user_rt_sigreturn)
3056bd33e1eSChristoph Hellwig	li a7, __NR_rt_sigreturn
3064681dacaSFangrui Song	ecall
307f0bddf50SGuo RenSYM_CODE_END(__user_rt_sigreturn)
3086bd33e1eSChristoph Hellwig#endif
309