xref: /openbmc/linux/arch/riscv/kernel/entry.S (revision 26cfd12b)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 */
6
7#include <linux/init.h>
8#include <linux/linkage.h>
9
10#include <asm/asm.h>
11#include <asm/csr.h>
12#include <asm/unistd.h>
13#include <asm/thread_info.h>
14#include <asm/asm-offsets.h>
15
16#if !IS_ENABLED(CONFIG_PREEMPTION)
17.set resume_kernel, restore_all
18#endif
19
20ENTRY(handle_exception)
21	/*
22	 * If coming from userspace, preserve the user thread pointer and load
23	 * the kernel thread pointer.  If we came from the kernel, the scratch
24	 * register will contain 0, and we should continue on the current TP.
25	 */
26	csrrw tp, CSR_SCRATCH, tp
27	bnez tp, _save_context
28
29_restore_kernel_tpsp:
30	csrr tp, CSR_SCRATCH
31	REG_S sp, TASK_TI_KERNEL_SP(tp)
32_save_context:
33	REG_S sp, TASK_TI_USER_SP(tp)
34	REG_L sp, TASK_TI_KERNEL_SP(tp)
35	addi sp, sp, -(PT_SIZE_ON_STACK)
36	REG_S x1,  PT_RA(sp)
37	REG_S x3,  PT_GP(sp)
38	REG_S x5,  PT_T0(sp)
39	REG_S x6,  PT_T1(sp)
40	REG_S x7,  PT_T2(sp)
41	REG_S x8,  PT_S0(sp)
42	REG_S x9,  PT_S1(sp)
43	REG_S x10, PT_A0(sp)
44	REG_S x11, PT_A1(sp)
45	REG_S x12, PT_A2(sp)
46	REG_S x13, PT_A3(sp)
47	REG_S x14, PT_A4(sp)
48	REG_S x15, PT_A5(sp)
49	REG_S x16, PT_A6(sp)
50	REG_S x17, PT_A7(sp)
51	REG_S x18, PT_S2(sp)
52	REG_S x19, PT_S3(sp)
53	REG_S x20, PT_S4(sp)
54	REG_S x21, PT_S5(sp)
55	REG_S x22, PT_S6(sp)
56	REG_S x23, PT_S7(sp)
57	REG_S x24, PT_S8(sp)
58	REG_S x25, PT_S9(sp)
59	REG_S x26, PT_S10(sp)
60	REG_S x27, PT_S11(sp)
61	REG_S x28, PT_T3(sp)
62	REG_S x29, PT_T4(sp)
63	REG_S x30, PT_T5(sp)
64	REG_S x31, PT_T6(sp)
65
66	/*
67	 * Disable user-mode memory access as it should only be set in the
68	 * actual user copy routines.
69	 *
70	 * Disable the FPU to detect illegal usage of floating point in kernel
71	 * space.
72	 */
73	li t0, SR_SUM | SR_FS
74
75	REG_L s0, TASK_TI_USER_SP(tp)
76	csrrc s1, CSR_STATUS, t0
77	csrr s2, CSR_EPC
78	csrr s3, CSR_TVAL
79	csrr s4, CSR_CAUSE
80	csrr s5, CSR_SCRATCH
81	REG_S s0, PT_SP(sp)
82	REG_S s1, PT_STATUS(sp)
83	REG_S s2, PT_EPC(sp)
84	REG_S s3, PT_BADADDR(sp)
85	REG_S s4, PT_CAUSE(sp)
86	REG_S s5, PT_TP(sp)
87
88	/*
89	 * Set the scratch register to 0, so that if a recursive exception
90	 * occurs, the exception vector knows it came from the kernel
91	 */
92	csrw CSR_SCRATCH, x0
93
94	/* Load the global pointer */
95.option push
96.option norelax
97	la gp, __global_pointer$
98.option pop
99
100	la ra, ret_from_exception
101	/*
102	 * MSB of cause differentiates between
103	 * interrupts and exceptions
104	 */
105	bge s4, zero, 1f
106
107	/* Handle interrupts */
108	move a0, sp /* pt_regs */
109	la a1, handle_arch_irq
110	REG_L a1, (a1)
111	jr a1
1121:
113	/*
114	 * Exceptions run with interrupts enabled or disabled depending on the
115	 * state of SR_PIE in m/sstatus.
116	 */
117	andi t0, s1, SR_PIE
118	beqz t0, 1f
119	csrs CSR_STATUS, SR_IE
120
1211:
122	/* Handle syscalls */
123	li t0, EXC_SYSCALL
124	beq s4, t0, handle_syscall
125
126	/* Handle other exceptions */
127	slli t0, s4, RISCV_LGPTR
128	la t1, excp_vect_table
129	la t2, excp_vect_table_end
130	move a0, sp /* pt_regs */
131	add t0, t1, t0
132	/* Check if exception code lies within bounds */
133	bgeu t0, t2, 1f
134	REG_L t0, 0(t0)
135	jr t0
1361:
137	tail do_trap_unknown
138
139handle_syscall:
140	 /* save the initial A0 value (needed in signal handlers) */
141	REG_S a0, PT_ORIG_A0(sp)
142	/*
143	 * Advance SEPC to avoid executing the original
144	 * scall instruction on sret
145	 */
146	addi s2, s2, 0x4
147	REG_S s2, PT_EPC(sp)
148	/* Trace syscalls, but only if requested by the user. */
149	REG_L t0, TASK_TI_FLAGS(tp)
150	andi t0, t0, _TIF_SYSCALL_WORK
151	bnez t0, handle_syscall_trace_enter
152check_syscall_nr:
153	/* Check to make sure we don't jump to a bogus syscall number. */
154	li t0, __NR_syscalls
155	la s0, sys_ni_syscall
156	/*
157	 * Syscall number held in a7.
158	 * If syscall number is above allowed value, redirect to ni_syscall.
159	 */
160	bge a7, t0, 1f
161	/*
162	 * Check if syscall is rejected by tracer, i.e., a7 == -1.
163	 * If yes, we pretend it was executed.
164	 */
165	li t1, -1
166	beq a7, t1, ret_from_syscall_rejected
167	blt a7, t1, 1f
168	/* Call syscall */
169	la s0, sys_call_table
170	slli t0, a7, RISCV_LGPTR
171	add s0, s0, t0
172	REG_L s0, 0(s0)
1731:
174	jalr s0
175
176ret_from_syscall:
177	/* Set user a0 to kernel a0 */
178	REG_S a0, PT_A0(sp)
179	/*
180	 * We didn't execute the actual syscall.
181	 * Seccomp already set return value for the current task pt_regs.
182	 * (If it was configured with SECCOMP_RET_ERRNO/TRACE)
183	 */
184ret_from_syscall_rejected:
185	/* Trace syscalls, but only if requested by the user. */
186	REG_L t0, TASK_TI_FLAGS(tp)
187	andi t0, t0, _TIF_SYSCALL_WORK
188	bnez t0, handle_syscall_trace_exit
189
190ret_from_exception:
191	REG_L s0, PT_STATUS(sp)
192	csrc CSR_STATUS, SR_IE
193#ifdef CONFIG_RISCV_M_MODE
194	/* the MPP value is too large to be used as an immediate arg for addi */
195	li t0, SR_MPP
196	and s0, s0, t0
197#else
198	andi s0, s0, SR_SPP
199#endif
200	bnez s0, resume_kernel
201
202resume_userspace:
203	/* Interrupts must be disabled here so flags are checked atomically */
204	REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
205	andi s1, s0, _TIF_WORK_MASK
206	bnez s1, work_pending
207
208	/* Save unwound kernel stack pointer in thread_info */
209	addi s0, sp, PT_SIZE_ON_STACK
210	REG_S s0, TASK_TI_KERNEL_SP(tp)
211
212	/*
213	 * Save TP into the scratch register , so we can find the kernel data
214	 * structures again.
215	 */
216	csrw CSR_SCRATCH, tp
217
218restore_all:
219	REG_L a0, PT_STATUS(sp)
220	/*
221	 * The current load reservation is effectively part of the processor's
222	 * state, in the sense that load reservations cannot be shared between
223	 * different hart contexts.  We can't actually save and restore a load
224	 * reservation, so instead here we clear any existing reservation --
225	 * it's always legal for implementations to clear load reservations at
226	 * any point (as long as the forward progress guarantee is kept, but
227	 * we'll ignore that here).
228	 *
229	 * Dangling load reservations can be the result of taking a trap in the
230	 * middle of an LR/SC sequence, but can also be the result of a taken
231	 * forward branch around an SC -- which is how we implement CAS.  As a
232	 * result we need to clear reservations between the last CAS and the
233	 * jump back to the new context.  While it is unlikely the store
234	 * completes, implementations are allowed to expand reservations to be
235	 * arbitrarily large.
236	 */
237	REG_L  a2, PT_EPC(sp)
238	REG_SC x0, a2, PT_EPC(sp)
239
240	csrw CSR_STATUS, a0
241	csrw CSR_EPC, a2
242
243	REG_L x1,  PT_RA(sp)
244	REG_L x3,  PT_GP(sp)
245	REG_L x4,  PT_TP(sp)
246	REG_L x5,  PT_T0(sp)
247	REG_L x6,  PT_T1(sp)
248	REG_L x7,  PT_T2(sp)
249	REG_L x8,  PT_S0(sp)
250	REG_L x9,  PT_S1(sp)
251	REG_L x10, PT_A0(sp)
252	REG_L x11, PT_A1(sp)
253	REG_L x12, PT_A2(sp)
254	REG_L x13, PT_A3(sp)
255	REG_L x14, PT_A4(sp)
256	REG_L x15, PT_A5(sp)
257	REG_L x16, PT_A6(sp)
258	REG_L x17, PT_A7(sp)
259	REG_L x18, PT_S2(sp)
260	REG_L x19, PT_S3(sp)
261	REG_L x20, PT_S4(sp)
262	REG_L x21, PT_S5(sp)
263	REG_L x22, PT_S6(sp)
264	REG_L x23, PT_S7(sp)
265	REG_L x24, PT_S8(sp)
266	REG_L x25, PT_S9(sp)
267	REG_L x26, PT_S10(sp)
268	REG_L x27, PT_S11(sp)
269	REG_L x28, PT_T3(sp)
270	REG_L x29, PT_T4(sp)
271	REG_L x30, PT_T5(sp)
272	REG_L x31, PT_T6(sp)
273
274	REG_L x2,  PT_SP(sp)
275
276#ifdef CONFIG_RISCV_M_MODE
277	mret
278#else
279	sret
280#endif
281
282#if IS_ENABLED(CONFIG_PREEMPTION)
283resume_kernel:
284	REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
285	bnez s0, restore_all
286	REG_L s0, TASK_TI_FLAGS(tp)
287	andi s0, s0, _TIF_NEED_RESCHED
288	beqz s0, restore_all
289	call preempt_schedule_irq
290	j restore_all
291#endif
292
293work_pending:
294	/* Enter slow path for supplementary processing */
295	la ra, ret_from_exception
296	andi s1, s0, _TIF_NEED_RESCHED
297	bnez s1, work_resched
298work_notifysig:
299	/* Handle pending signals and notify-resume requests */
300	csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
301	move a0, sp /* pt_regs */
302	move a1, s0 /* current_thread_info->flags */
303	tail do_notify_resume
304work_resched:
305	tail schedule
306
307/* Slow paths for ptrace. */
308handle_syscall_trace_enter:
309	move a0, sp
310	call do_syscall_trace_enter
311	move t0, a0
312	REG_L a0, PT_A0(sp)
313	REG_L a1, PT_A1(sp)
314	REG_L a2, PT_A2(sp)
315	REG_L a3, PT_A3(sp)
316	REG_L a4, PT_A4(sp)
317	REG_L a5, PT_A5(sp)
318	REG_L a6, PT_A6(sp)
319	REG_L a7, PT_A7(sp)
320	bnez t0, ret_from_syscall_rejected
321	j check_syscall_nr
322handle_syscall_trace_exit:
323	move a0, sp
324	call do_syscall_trace_exit
325	j ret_from_exception
326
327END(handle_exception)
328
329ENTRY(ret_from_fork)
330	la ra, ret_from_exception
331	tail schedule_tail
332ENDPROC(ret_from_fork)
333
334ENTRY(ret_from_kernel_thread)
335	call schedule_tail
336	/* Call fn(arg) */
337	la ra, ret_from_exception
338	move a0, s1
339	jr s0
340ENDPROC(ret_from_kernel_thread)
341
342
343/*
344 * Integer register context switch
345 * The callee-saved registers must be saved and restored.
346 *
347 *   a0: previous task_struct (must be preserved across the switch)
348 *   a1: next task_struct
349 *
350 * The value of a0 and a1 must be preserved by this function, as that's how
351 * arguments are passed to schedule_tail.
352 */
353ENTRY(__switch_to)
354	/* Save context into prev->thread */
355	li    a4,  TASK_THREAD_RA
356	add   a3, a0, a4
357	add   a4, a1, a4
358	REG_S ra,  TASK_THREAD_RA_RA(a3)
359	REG_S sp,  TASK_THREAD_SP_RA(a3)
360	REG_S s0,  TASK_THREAD_S0_RA(a3)
361	REG_S s1,  TASK_THREAD_S1_RA(a3)
362	REG_S s2,  TASK_THREAD_S2_RA(a3)
363	REG_S s3,  TASK_THREAD_S3_RA(a3)
364	REG_S s4,  TASK_THREAD_S4_RA(a3)
365	REG_S s5,  TASK_THREAD_S5_RA(a3)
366	REG_S s6,  TASK_THREAD_S6_RA(a3)
367	REG_S s7,  TASK_THREAD_S7_RA(a3)
368	REG_S s8,  TASK_THREAD_S8_RA(a3)
369	REG_S s9,  TASK_THREAD_S9_RA(a3)
370	REG_S s10, TASK_THREAD_S10_RA(a3)
371	REG_S s11, TASK_THREAD_S11_RA(a3)
372	/* Restore context from next->thread */
373	REG_L ra,  TASK_THREAD_RA_RA(a4)
374	REG_L sp,  TASK_THREAD_SP_RA(a4)
375	REG_L s0,  TASK_THREAD_S0_RA(a4)
376	REG_L s1,  TASK_THREAD_S1_RA(a4)
377	REG_L s2,  TASK_THREAD_S2_RA(a4)
378	REG_L s3,  TASK_THREAD_S3_RA(a4)
379	REG_L s4,  TASK_THREAD_S4_RA(a4)
380	REG_L s5,  TASK_THREAD_S5_RA(a4)
381	REG_L s6,  TASK_THREAD_S6_RA(a4)
382	REG_L s7,  TASK_THREAD_S7_RA(a4)
383	REG_L s8,  TASK_THREAD_S8_RA(a4)
384	REG_L s9,  TASK_THREAD_S9_RA(a4)
385	REG_L s10, TASK_THREAD_S10_RA(a4)
386	REG_L s11, TASK_THREAD_S11_RA(a4)
387	/* Swap the CPU entry around. */
388	lw a3, TASK_TI_CPU(a0)
389	lw a4, TASK_TI_CPU(a1)
390	sw a3, TASK_TI_CPU(a1)
391	sw a4, TASK_TI_CPU(a0)
392#if TASK_TI != 0
393#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work."
394	addi tp, a1, TASK_TI
395#else
396	move tp, a1
397#endif
398	ret
399ENDPROC(__switch_to)
400
401#ifndef CONFIG_MMU
402#define do_page_fault do_trap_unknown
403#endif
404
405	.section ".rodata"
406	/* Exception vector table */
407ENTRY(excp_vect_table)
408	RISCV_PTR do_trap_insn_misaligned
409	RISCV_PTR do_trap_insn_fault
410	RISCV_PTR do_trap_insn_illegal
411	RISCV_PTR do_trap_break
412	RISCV_PTR do_trap_load_misaligned
413	RISCV_PTR do_trap_load_fault
414	RISCV_PTR do_trap_store_misaligned
415	RISCV_PTR do_trap_store_fault
416	RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
417	RISCV_PTR do_trap_ecall_s
418	RISCV_PTR do_trap_unknown
419	RISCV_PTR do_trap_ecall_m
420	RISCV_PTR do_page_fault   /* instruction page fault */
421	RISCV_PTR do_page_fault   /* load page fault */
422	RISCV_PTR do_trap_unknown
423	RISCV_PTR do_page_fault   /* store page fault */
424excp_vect_table_end:
425END(excp_vect_table)
426
427#ifndef CONFIG_MMU
428ENTRY(__user_rt_sigreturn)
429	li a7, __NR_rt_sigreturn
430	scall
431END(__user_rt_sigreturn)
432#endif
433