1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> 9 10#include <asm/asm.h> 11#include <asm/csr.h> 12#include <asm/unistd.h> 13#include <asm/thread_info.h> 14#include <asm/asm-offsets.h> 15 16 .text 17 .altmacro 18 19/* 20 * Prepares to enter a system call or exception by saving all registers to the 21 * stack. 22 */ 23 .macro SAVE_ALL 24 LOCAL _restore_kernel_tpsp 25 LOCAL _save_context 26 27 /* 28 * If coming from userspace, preserve the user thread pointer and load 29 * the kernel thread pointer. If we came from the kernel, sscratch 30 * will contain 0, and we should continue on the current TP. 31 */ 32 csrrw tp, CSR_SSCRATCH, tp 33 bnez tp, _save_context 34 35_restore_kernel_tpsp: 36 csrr tp, CSR_SSCRATCH 37 REG_S sp, TASK_TI_KERNEL_SP(tp) 38_save_context: 39 REG_S sp, TASK_TI_USER_SP(tp) 40 REG_L sp, TASK_TI_KERNEL_SP(tp) 41 addi sp, sp, -(PT_SIZE_ON_STACK) 42 REG_S x1, PT_RA(sp) 43 REG_S x3, PT_GP(sp) 44 REG_S x5, PT_T0(sp) 45 REG_S x6, PT_T1(sp) 46 REG_S x7, PT_T2(sp) 47 REG_S x8, PT_S0(sp) 48 REG_S x9, PT_S1(sp) 49 REG_S x10, PT_A0(sp) 50 REG_S x11, PT_A1(sp) 51 REG_S x12, PT_A2(sp) 52 REG_S x13, PT_A3(sp) 53 REG_S x14, PT_A4(sp) 54 REG_S x15, PT_A5(sp) 55 REG_S x16, PT_A6(sp) 56 REG_S x17, PT_A7(sp) 57 REG_S x18, PT_S2(sp) 58 REG_S x19, PT_S3(sp) 59 REG_S x20, PT_S4(sp) 60 REG_S x21, PT_S5(sp) 61 REG_S x22, PT_S6(sp) 62 REG_S x23, PT_S7(sp) 63 REG_S x24, PT_S8(sp) 64 REG_S x25, PT_S9(sp) 65 REG_S x26, PT_S10(sp) 66 REG_S x27, PT_S11(sp) 67 REG_S x28, PT_T3(sp) 68 REG_S x29, PT_T4(sp) 69 REG_S x30, PT_T5(sp) 70 REG_S x31, PT_T6(sp) 71 72 /* 73 * Disable user-mode memory access as it should only be set in the 74 * actual user copy routines. 75 * 76 * Disable the FPU to detect illegal usage of floating point in kernel 77 * space. 78 */ 79 li t0, SR_SUM | SR_FS 80 81 REG_L s0, TASK_TI_USER_SP(tp) 82 csrrc s1, CSR_SSTATUS, t0 83 csrr s2, CSR_SEPC 84 csrr s3, CSR_STVAL 85 csrr s4, CSR_SCAUSE 86 csrr s5, CSR_SSCRATCH 87 REG_S s0, PT_SP(sp) 88 REG_S s1, PT_SSTATUS(sp) 89 REG_S s2, PT_SEPC(sp) 90 REG_S s3, PT_SBADADDR(sp) 91 REG_S s4, PT_SCAUSE(sp) 92 REG_S s5, PT_TP(sp) 93 .endm 94 95/* 96 * Prepares to return from a system call or exception by restoring all 97 * registers from the stack. 98 */ 99 .macro RESTORE_ALL 100 REG_L a0, PT_SSTATUS(sp) 101 REG_L a2, PT_SEPC(sp) 102 csrw CSR_SSTATUS, a0 103 csrw CSR_SEPC, a2 104 105 REG_L x1, PT_RA(sp) 106 REG_L x3, PT_GP(sp) 107 REG_L x4, PT_TP(sp) 108 REG_L x5, PT_T0(sp) 109 REG_L x6, PT_T1(sp) 110 REG_L x7, PT_T2(sp) 111 REG_L x8, PT_S0(sp) 112 REG_L x9, PT_S1(sp) 113 REG_L x10, PT_A0(sp) 114 REG_L x11, PT_A1(sp) 115 REG_L x12, PT_A2(sp) 116 REG_L x13, PT_A3(sp) 117 REG_L x14, PT_A4(sp) 118 REG_L x15, PT_A5(sp) 119 REG_L x16, PT_A6(sp) 120 REG_L x17, PT_A7(sp) 121 REG_L x18, PT_S2(sp) 122 REG_L x19, PT_S3(sp) 123 REG_L x20, PT_S4(sp) 124 REG_L x21, PT_S5(sp) 125 REG_L x22, PT_S6(sp) 126 REG_L x23, PT_S7(sp) 127 REG_L x24, PT_S8(sp) 128 REG_L x25, PT_S9(sp) 129 REG_L x26, PT_S10(sp) 130 REG_L x27, PT_S11(sp) 131 REG_L x28, PT_T3(sp) 132 REG_L x29, PT_T4(sp) 133 REG_L x30, PT_T5(sp) 134 REG_L x31, PT_T6(sp) 135 136 REG_L x2, PT_SP(sp) 137 .endm 138 139#if !IS_ENABLED(CONFIG_PREEMPT) 140.set resume_kernel, restore_all 141#endif 142 143ENTRY(handle_exception) 144 SAVE_ALL 145 146 /* 147 * Set sscratch register to 0, so that if a recursive exception 148 * occurs, the exception vector knows it came from the kernel 149 */ 150 csrw CSR_SSCRATCH, x0 151 152 /* Load the global pointer */ 153.option push 154.option norelax 155 la gp, __global_pointer$ 156.option pop 157 158 la ra, ret_from_exception 159 /* 160 * MSB of cause differentiates between 161 * interrupts and exceptions 162 */ 163 bge s4, zero, 1f 164 165 /* Handle interrupts */ 166 move a0, sp /* pt_regs */ 167 tail do_IRQ 1681: 169 /* Exceptions run with interrupts enabled or disabled 170 depending on the state of sstatus.SR_SPIE */ 171 andi t0, s1, SR_SPIE 172 beqz t0, 1f 173 csrs CSR_SSTATUS, SR_SIE 174 1751: 176 /* Handle syscalls */ 177 li t0, EXC_SYSCALL 178 beq s4, t0, handle_syscall 179 180 /* Handle other exceptions */ 181 slli t0, s4, RISCV_LGPTR 182 la t1, excp_vect_table 183 la t2, excp_vect_table_end 184 move a0, sp /* pt_regs */ 185 add t0, t1, t0 186 /* Check if exception code lies within bounds */ 187 bgeu t0, t2, 1f 188 REG_L t0, 0(t0) 189 jr t0 1901: 191 tail do_trap_unknown 192 193handle_syscall: 194 /* save the initial A0 value (needed in signal handlers) */ 195 REG_S a0, PT_ORIG_A0(sp) 196 /* 197 * Advance SEPC to avoid executing the original 198 * scall instruction on sret 199 */ 200 addi s2, s2, 0x4 201 REG_S s2, PT_SEPC(sp) 202 /* Trace syscalls, but only if requested by the user. */ 203 REG_L t0, TASK_TI_FLAGS(tp) 204 andi t0, t0, _TIF_SYSCALL_WORK 205 bnez t0, handle_syscall_trace_enter 206check_syscall_nr: 207 /* Check to make sure we don't jump to a bogus syscall number. */ 208 li t0, __NR_syscalls 209 la s0, sys_ni_syscall 210 /* Syscall number held in a7 */ 211 bgeu a7, t0, 1f 212 la s0, sys_call_table 213 slli t0, a7, RISCV_LGPTR 214 add s0, s0, t0 215 REG_L s0, 0(s0) 2161: 217 jalr s0 218 219ret_from_syscall: 220 /* Set user a0 to kernel a0 */ 221 REG_S a0, PT_A0(sp) 222 /* Trace syscalls, but only if requested by the user. */ 223 REG_L t0, TASK_TI_FLAGS(tp) 224 andi t0, t0, _TIF_SYSCALL_WORK 225 bnez t0, handle_syscall_trace_exit 226 227ret_from_exception: 228 REG_L s0, PT_SSTATUS(sp) 229 csrc CSR_SSTATUS, SR_SIE 230 andi s0, s0, SR_SPP 231 bnez s0, resume_kernel 232 233resume_userspace: 234 /* Interrupts must be disabled here so flags are checked atomically */ 235 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ 236 andi s1, s0, _TIF_WORK_MASK 237 bnez s1, work_pending 238 239 /* Save unwound kernel stack pointer in thread_info */ 240 addi s0, sp, PT_SIZE_ON_STACK 241 REG_S s0, TASK_TI_KERNEL_SP(tp) 242 243 /* 244 * Save TP into sscratch, so we can find the kernel data structures 245 * again. 246 */ 247 csrw CSR_SSCRATCH, tp 248 249restore_all: 250 RESTORE_ALL 251 sret 252 253#if IS_ENABLED(CONFIG_PREEMPT) 254resume_kernel: 255 REG_L s0, TASK_TI_PREEMPT_COUNT(tp) 256 bnez s0, restore_all 257need_resched: 258 REG_L s0, TASK_TI_FLAGS(tp) 259 andi s0, s0, _TIF_NEED_RESCHED 260 beqz s0, restore_all 261 call preempt_schedule_irq 262 j need_resched 263#endif 264 265work_pending: 266 /* Enter slow path for supplementary processing */ 267 la ra, ret_from_exception 268 andi s1, s0, _TIF_NEED_RESCHED 269 bnez s1, work_resched 270work_notifysig: 271 /* Handle pending signals and notify-resume requests */ 272 csrs CSR_SSTATUS, SR_SIE /* Enable interrupts for do_notify_resume() */ 273 move a0, sp /* pt_regs */ 274 move a1, s0 /* current_thread_info->flags */ 275 tail do_notify_resume 276work_resched: 277 tail schedule 278 279/* Slow paths for ptrace. */ 280handle_syscall_trace_enter: 281 move a0, sp 282 call do_syscall_trace_enter 283 REG_L a0, PT_A0(sp) 284 REG_L a1, PT_A1(sp) 285 REG_L a2, PT_A2(sp) 286 REG_L a3, PT_A3(sp) 287 REG_L a4, PT_A4(sp) 288 REG_L a5, PT_A5(sp) 289 REG_L a6, PT_A6(sp) 290 REG_L a7, PT_A7(sp) 291 j check_syscall_nr 292handle_syscall_trace_exit: 293 move a0, sp 294 call do_syscall_trace_exit 295 j ret_from_exception 296 297END(handle_exception) 298 299ENTRY(ret_from_fork) 300 la ra, ret_from_exception 301 tail schedule_tail 302ENDPROC(ret_from_fork) 303 304ENTRY(ret_from_kernel_thread) 305 call schedule_tail 306 /* Call fn(arg) */ 307 la ra, ret_from_exception 308 move a0, s1 309 jr s0 310ENDPROC(ret_from_kernel_thread) 311 312 313/* 314 * Integer register context switch 315 * The callee-saved registers must be saved and restored. 316 * 317 * a0: previous task_struct (must be preserved across the switch) 318 * a1: next task_struct 319 * 320 * The value of a0 and a1 must be preserved by this function, as that's how 321 * arguments are passed to schedule_tail. 322 */ 323ENTRY(__switch_to) 324 /* Save context into prev->thread */ 325 li a4, TASK_THREAD_RA 326 add a3, a0, a4 327 add a4, a1, a4 328 REG_S ra, TASK_THREAD_RA_RA(a3) 329 REG_S sp, TASK_THREAD_SP_RA(a3) 330 REG_S s0, TASK_THREAD_S0_RA(a3) 331 REG_S s1, TASK_THREAD_S1_RA(a3) 332 REG_S s2, TASK_THREAD_S2_RA(a3) 333 REG_S s3, TASK_THREAD_S3_RA(a3) 334 REG_S s4, TASK_THREAD_S4_RA(a3) 335 REG_S s5, TASK_THREAD_S5_RA(a3) 336 REG_S s6, TASK_THREAD_S6_RA(a3) 337 REG_S s7, TASK_THREAD_S7_RA(a3) 338 REG_S s8, TASK_THREAD_S8_RA(a3) 339 REG_S s9, TASK_THREAD_S9_RA(a3) 340 REG_S s10, TASK_THREAD_S10_RA(a3) 341 REG_S s11, TASK_THREAD_S11_RA(a3) 342 /* Restore context from next->thread */ 343 REG_L ra, TASK_THREAD_RA_RA(a4) 344 REG_L sp, TASK_THREAD_SP_RA(a4) 345 REG_L s0, TASK_THREAD_S0_RA(a4) 346 REG_L s1, TASK_THREAD_S1_RA(a4) 347 REG_L s2, TASK_THREAD_S2_RA(a4) 348 REG_L s3, TASK_THREAD_S3_RA(a4) 349 REG_L s4, TASK_THREAD_S4_RA(a4) 350 REG_L s5, TASK_THREAD_S5_RA(a4) 351 REG_L s6, TASK_THREAD_S6_RA(a4) 352 REG_L s7, TASK_THREAD_S7_RA(a4) 353 REG_L s8, TASK_THREAD_S8_RA(a4) 354 REG_L s9, TASK_THREAD_S9_RA(a4) 355 REG_L s10, TASK_THREAD_S10_RA(a4) 356 REG_L s11, TASK_THREAD_S11_RA(a4) 357 /* Swap the CPU entry around. */ 358 lw a3, TASK_TI_CPU(a0) 359 lw a4, TASK_TI_CPU(a1) 360 sw a3, TASK_TI_CPU(a1) 361 sw a4, TASK_TI_CPU(a0) 362#if TASK_TI != 0 363#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work." 364 addi tp, a1, TASK_TI 365#else 366 move tp, a1 367#endif 368 ret 369ENDPROC(__switch_to) 370 371 .section ".rodata" 372 /* Exception vector table */ 373ENTRY(excp_vect_table) 374 RISCV_PTR do_trap_insn_misaligned 375 RISCV_PTR do_trap_insn_fault 376 RISCV_PTR do_trap_insn_illegal 377 RISCV_PTR do_trap_break 378 RISCV_PTR do_trap_load_misaligned 379 RISCV_PTR do_trap_load_fault 380 RISCV_PTR do_trap_store_misaligned 381 RISCV_PTR do_trap_store_fault 382 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ 383 RISCV_PTR do_trap_ecall_s 384 RISCV_PTR do_trap_unknown 385 RISCV_PTR do_trap_ecall_m 386 RISCV_PTR do_page_fault /* instruction page fault */ 387 RISCV_PTR do_page_fault /* load page fault */ 388 RISCV_PTR do_trap_unknown 389 RISCV_PTR do_page_fault /* store page fault */ 390excp_vect_table_end: 391END(excp_vect_table) 392