1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> 9 10#include <asm/asm.h> 11#include <asm/csr.h> 12#include <asm/unistd.h> 13#include <asm/thread_info.h> 14#include <asm/asm-offsets.h> 15 16 .text 17 .altmacro 18 19/* 20 * Prepares to enter a system call or exception by saving all registers to the 21 * stack. 22 */ 23 .macro SAVE_ALL 24 LOCAL _restore_kernel_tpsp 25 LOCAL _save_context 26 27 /* 28 * If coming from userspace, preserve the user thread pointer and load 29 * the kernel thread pointer. If we came from the kernel, sscratch 30 * will contain 0, and we should continue on the current TP. 31 */ 32 csrrw tp, CSR_SSCRATCH, tp 33 bnez tp, _save_context 34 35_restore_kernel_tpsp: 36 csrr tp, CSR_SSCRATCH 37 REG_S sp, TASK_TI_KERNEL_SP(tp) 38_save_context: 39 REG_S sp, TASK_TI_USER_SP(tp) 40 REG_L sp, TASK_TI_KERNEL_SP(tp) 41 addi sp, sp, -(PT_SIZE_ON_STACK) 42 REG_S x1, PT_RA(sp) 43 REG_S x3, PT_GP(sp) 44 REG_S x5, PT_T0(sp) 45 REG_S x6, PT_T1(sp) 46 REG_S x7, PT_T2(sp) 47 REG_S x8, PT_S0(sp) 48 REG_S x9, PT_S1(sp) 49 REG_S x10, PT_A0(sp) 50 REG_S x11, PT_A1(sp) 51 REG_S x12, PT_A2(sp) 52 REG_S x13, PT_A3(sp) 53 REG_S x14, PT_A4(sp) 54 REG_S x15, PT_A5(sp) 55 REG_S x16, PT_A6(sp) 56 REG_S x17, PT_A7(sp) 57 REG_S x18, PT_S2(sp) 58 REG_S x19, PT_S3(sp) 59 REG_S x20, PT_S4(sp) 60 REG_S x21, PT_S5(sp) 61 REG_S x22, PT_S6(sp) 62 REG_S x23, PT_S7(sp) 63 REG_S x24, PT_S8(sp) 64 REG_S x25, PT_S9(sp) 65 REG_S x26, PT_S10(sp) 66 REG_S x27, PT_S11(sp) 67 REG_S x28, PT_T3(sp) 68 REG_S x29, PT_T4(sp) 69 REG_S x30, PT_T5(sp) 70 REG_S x31, PT_T6(sp) 71 72 /* 73 * Disable user-mode memory access as it should only be set in the 74 * actual user copy routines. 75 * 76 * Disable the FPU to detect illegal usage of floating point in kernel 77 * space. 78 */ 79 li t0, SR_SUM | SR_FS 80 81 REG_L s0, TASK_TI_USER_SP(tp) 82 csrrc s1, CSR_SSTATUS, t0 83 csrr s2, CSR_SEPC 84 csrr s3, CSR_STVAL 85 csrr s4, CSR_SCAUSE 86 csrr s5, CSR_SSCRATCH 87 REG_S s0, PT_SP(sp) 88 REG_S s1, PT_SSTATUS(sp) 89 REG_S s2, PT_SEPC(sp) 90 REG_S s3, PT_SBADADDR(sp) 91 REG_S s4, PT_SCAUSE(sp) 92 REG_S s5, PT_TP(sp) 93 .endm 94 95/* 96 * Prepares to return from a system call or exception by restoring all 97 * registers from the stack. 98 */ 99 .macro RESTORE_ALL 100 REG_L a0, PT_SSTATUS(sp) 101 REG_L a2, PT_SEPC(sp) 102 csrw CSR_SSTATUS, a0 103 csrw CSR_SEPC, a2 104 105 REG_L x1, PT_RA(sp) 106 REG_L x3, PT_GP(sp) 107 REG_L x4, PT_TP(sp) 108 REG_L x5, PT_T0(sp) 109 REG_L x6, PT_T1(sp) 110 REG_L x7, PT_T2(sp) 111 REG_L x8, PT_S0(sp) 112 REG_L x9, PT_S1(sp) 113 REG_L x10, PT_A0(sp) 114 REG_L x11, PT_A1(sp) 115 REG_L x12, PT_A2(sp) 116 REG_L x13, PT_A3(sp) 117 REG_L x14, PT_A4(sp) 118 REG_L x15, PT_A5(sp) 119 REG_L x16, PT_A6(sp) 120 REG_L x17, PT_A7(sp) 121 REG_L x18, PT_S2(sp) 122 REG_L x19, PT_S3(sp) 123 REG_L x20, PT_S4(sp) 124 REG_L x21, PT_S5(sp) 125 REG_L x22, PT_S6(sp) 126 REG_L x23, PT_S7(sp) 127 REG_L x24, PT_S8(sp) 128 REG_L x25, PT_S9(sp) 129 REG_L x26, PT_S10(sp) 130 REG_L x27, PT_S11(sp) 131 REG_L x28, PT_T3(sp) 132 REG_L x29, PT_T4(sp) 133 REG_L x30, PT_T5(sp) 134 REG_L x31, PT_T6(sp) 135 136 REG_L x2, PT_SP(sp) 137 .endm 138 139#if !IS_ENABLED(CONFIG_PREEMPT) 140.set resume_kernel, restore_all 141#endif 142 143ENTRY(handle_exception) 144 SAVE_ALL 145 146 /* 147 * Set sscratch register to 0, so that if a recursive exception 148 * occurs, the exception vector knows it came from the kernel 149 */ 150 csrw CSR_SSCRATCH, x0 151 152 /* Load the global pointer */ 153.option push 154.option norelax 155 la gp, __global_pointer$ 156.option pop 157 158 la ra, ret_from_exception 159 /* 160 * MSB of cause differentiates between 161 * interrupts and exceptions 162 */ 163 bge s4, zero, 1f 164 165 /* Handle interrupts */ 166 move a0, sp /* pt_regs */ 167 tail do_IRQ 1681: 169 /* Exceptions run with interrupts enabled */ 170 csrs sstatus, SR_SIE 171 172 /* Handle syscalls */ 173 li t0, EXC_SYSCALL 174 beq s4, t0, handle_syscall 175 176 /* Handle other exceptions */ 177 slli t0, s4, RISCV_LGPTR 178 la t1, excp_vect_table 179 la t2, excp_vect_table_end 180 move a0, sp /* pt_regs */ 181 add t0, t1, t0 182 /* Check if exception code lies within bounds */ 183 bgeu t0, t2, 1f 184 REG_L t0, 0(t0) 185 jr t0 1861: 187 tail do_trap_unknown 188 189handle_syscall: 190 /* save the initial A0 value (needed in signal handlers) */ 191 REG_S a0, PT_ORIG_A0(sp) 192 /* 193 * Advance SEPC to avoid executing the original 194 * scall instruction on sret 195 */ 196 addi s2, s2, 0x4 197 REG_S s2, PT_SEPC(sp) 198 /* Trace syscalls, but only if requested by the user. */ 199 REG_L t0, TASK_TI_FLAGS(tp) 200 andi t0, t0, _TIF_SYSCALL_WORK 201 bnez t0, handle_syscall_trace_enter 202check_syscall_nr: 203 /* Check to make sure we don't jump to a bogus syscall number. */ 204 li t0, __NR_syscalls 205 la s0, sys_ni_syscall 206 /* Syscall number held in a7 */ 207 bgeu a7, t0, 1f 208 la s0, sys_call_table 209 slli t0, a7, RISCV_LGPTR 210 add s0, s0, t0 211 REG_L s0, 0(s0) 2121: 213 jalr s0 214 215ret_from_syscall: 216 /* Set user a0 to kernel a0 */ 217 REG_S a0, PT_A0(sp) 218 /* Trace syscalls, but only if requested by the user. */ 219 REG_L t0, TASK_TI_FLAGS(tp) 220 andi t0, t0, _TIF_SYSCALL_WORK 221 bnez t0, handle_syscall_trace_exit 222 223ret_from_exception: 224 REG_L s0, PT_SSTATUS(sp) 225 csrc sstatus, SR_SIE 226 andi s0, s0, SR_SPP 227 bnez s0, resume_kernel 228 229resume_userspace: 230 /* Interrupts must be disabled here so flags are checked atomically */ 231 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ 232 andi s1, s0, _TIF_WORK_MASK 233 bnez s1, work_pending 234 235 /* Save unwound kernel stack pointer in thread_info */ 236 addi s0, sp, PT_SIZE_ON_STACK 237 REG_S s0, TASK_TI_KERNEL_SP(tp) 238 239 /* 240 * Save TP into sscratch, so we can find the kernel data structures 241 * again. 242 */ 243 csrw CSR_SSCRATCH, tp 244 245restore_all: 246 RESTORE_ALL 247 sret 248 249#if IS_ENABLED(CONFIG_PREEMPT) 250resume_kernel: 251 REG_L s0, TASK_TI_PREEMPT_COUNT(tp) 252 bnez s0, restore_all 253need_resched: 254 REG_L s0, TASK_TI_FLAGS(tp) 255 andi s0, s0, _TIF_NEED_RESCHED 256 beqz s0, restore_all 257 call preempt_schedule_irq 258 j need_resched 259#endif 260 261work_pending: 262 /* Enter slow path for supplementary processing */ 263 la ra, ret_from_exception 264 andi s1, s0, _TIF_NEED_RESCHED 265 bnez s1, work_resched 266work_notifysig: 267 /* Handle pending signals and notify-resume requests */ 268 csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */ 269 move a0, sp /* pt_regs */ 270 move a1, s0 /* current_thread_info->flags */ 271 tail do_notify_resume 272work_resched: 273 tail schedule 274 275/* Slow paths for ptrace. */ 276handle_syscall_trace_enter: 277 move a0, sp 278 call do_syscall_trace_enter 279 REG_L a0, PT_A0(sp) 280 REG_L a1, PT_A1(sp) 281 REG_L a2, PT_A2(sp) 282 REG_L a3, PT_A3(sp) 283 REG_L a4, PT_A4(sp) 284 REG_L a5, PT_A5(sp) 285 REG_L a6, PT_A6(sp) 286 REG_L a7, PT_A7(sp) 287 j check_syscall_nr 288handle_syscall_trace_exit: 289 move a0, sp 290 call do_syscall_trace_exit 291 j ret_from_exception 292 293END(handle_exception) 294 295ENTRY(ret_from_fork) 296 la ra, ret_from_exception 297 tail schedule_tail 298ENDPROC(ret_from_fork) 299 300ENTRY(ret_from_kernel_thread) 301 call schedule_tail 302 /* Call fn(arg) */ 303 la ra, ret_from_exception 304 move a0, s1 305 jr s0 306ENDPROC(ret_from_kernel_thread) 307 308 309/* 310 * Integer register context switch 311 * The callee-saved registers must be saved and restored. 312 * 313 * a0: previous task_struct (must be preserved across the switch) 314 * a1: next task_struct 315 * 316 * The value of a0 and a1 must be preserved by this function, as that's how 317 * arguments are passed to schedule_tail. 318 */ 319ENTRY(__switch_to) 320 /* Save context into prev->thread */ 321 li a4, TASK_THREAD_RA 322 add a3, a0, a4 323 add a4, a1, a4 324 REG_S ra, TASK_THREAD_RA_RA(a3) 325 REG_S sp, TASK_THREAD_SP_RA(a3) 326 REG_S s0, TASK_THREAD_S0_RA(a3) 327 REG_S s1, TASK_THREAD_S1_RA(a3) 328 REG_S s2, TASK_THREAD_S2_RA(a3) 329 REG_S s3, TASK_THREAD_S3_RA(a3) 330 REG_S s4, TASK_THREAD_S4_RA(a3) 331 REG_S s5, TASK_THREAD_S5_RA(a3) 332 REG_S s6, TASK_THREAD_S6_RA(a3) 333 REG_S s7, TASK_THREAD_S7_RA(a3) 334 REG_S s8, TASK_THREAD_S8_RA(a3) 335 REG_S s9, TASK_THREAD_S9_RA(a3) 336 REG_S s10, TASK_THREAD_S10_RA(a3) 337 REG_S s11, TASK_THREAD_S11_RA(a3) 338 /* Restore context from next->thread */ 339 REG_L ra, TASK_THREAD_RA_RA(a4) 340 REG_L sp, TASK_THREAD_SP_RA(a4) 341 REG_L s0, TASK_THREAD_S0_RA(a4) 342 REG_L s1, TASK_THREAD_S1_RA(a4) 343 REG_L s2, TASK_THREAD_S2_RA(a4) 344 REG_L s3, TASK_THREAD_S3_RA(a4) 345 REG_L s4, TASK_THREAD_S4_RA(a4) 346 REG_L s5, TASK_THREAD_S5_RA(a4) 347 REG_L s6, TASK_THREAD_S6_RA(a4) 348 REG_L s7, TASK_THREAD_S7_RA(a4) 349 REG_L s8, TASK_THREAD_S8_RA(a4) 350 REG_L s9, TASK_THREAD_S9_RA(a4) 351 REG_L s10, TASK_THREAD_S10_RA(a4) 352 REG_L s11, TASK_THREAD_S11_RA(a4) 353 /* Swap the CPU entry around. */ 354 lw a3, TASK_TI_CPU(a0) 355 lw a4, TASK_TI_CPU(a1) 356 sw a3, TASK_TI_CPU(a1) 357 sw a4, TASK_TI_CPU(a0) 358#if TASK_TI != 0 359#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work." 360 addi tp, a1, TASK_TI 361#else 362 move tp, a1 363#endif 364 ret 365ENDPROC(__switch_to) 366 367 .section ".rodata" 368 /* Exception vector table */ 369ENTRY(excp_vect_table) 370 RISCV_PTR do_trap_insn_misaligned 371 RISCV_PTR do_trap_insn_fault 372 RISCV_PTR do_trap_insn_illegal 373 RISCV_PTR do_trap_break 374 RISCV_PTR do_trap_load_misaligned 375 RISCV_PTR do_trap_load_fault 376 RISCV_PTR do_trap_store_misaligned 377 RISCV_PTR do_trap_store_fault 378 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ 379 RISCV_PTR do_trap_ecall_s 380 RISCV_PTR do_trap_unknown 381 RISCV_PTR do_trap_ecall_m 382 RISCV_PTR do_page_fault /* instruction page fault */ 383 RISCV_PTR do_page_fault /* load page fault */ 384 RISCV_PTR do_trap_unknown 385 RISCV_PTR do_page_fault /* store page fault */ 386excp_vect_table_end: 387END(excp_vect_table) 388