1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> 9 10#include <asm/asm.h> 11#include <asm/csr.h> 12#include <asm/unistd.h> 13#include <asm/thread_info.h> 14#include <asm/asm-offsets.h> 15 16#if !IS_ENABLED(CONFIG_PREEMPTION) 17.set resume_kernel, restore_all 18#endif 19 20ENTRY(handle_exception) 21 /* 22 * If coming from userspace, preserve the user thread pointer and load 23 * the kernel thread pointer. If we came from the kernel, the scratch 24 * register will contain 0, and we should continue on the current TP. 25 */ 26 csrrw tp, CSR_SCRATCH, tp 27 bnez tp, _save_context 28 29_restore_kernel_tpsp: 30 csrr tp, CSR_SCRATCH 31 REG_S sp, TASK_TI_KERNEL_SP(tp) 32_save_context: 33 REG_S sp, TASK_TI_USER_SP(tp) 34 REG_L sp, TASK_TI_KERNEL_SP(tp) 35 addi sp, sp, -(PT_SIZE_ON_STACK) 36 REG_S x1, PT_RA(sp) 37 REG_S x3, PT_GP(sp) 38 REG_S x5, PT_T0(sp) 39 REG_S x6, PT_T1(sp) 40 REG_S x7, PT_T2(sp) 41 REG_S x8, PT_S0(sp) 42 REG_S x9, PT_S1(sp) 43 REG_S x10, PT_A0(sp) 44 REG_S x11, PT_A1(sp) 45 REG_S x12, PT_A2(sp) 46 REG_S x13, PT_A3(sp) 47 REG_S x14, PT_A4(sp) 48 REG_S x15, PT_A5(sp) 49 REG_S x16, PT_A6(sp) 50 REG_S x17, PT_A7(sp) 51 REG_S x18, PT_S2(sp) 52 REG_S x19, PT_S3(sp) 53 REG_S x20, PT_S4(sp) 54 REG_S x21, PT_S5(sp) 55 REG_S x22, PT_S6(sp) 56 REG_S x23, PT_S7(sp) 57 REG_S x24, PT_S8(sp) 58 REG_S x25, PT_S9(sp) 59 REG_S x26, PT_S10(sp) 60 REG_S x27, PT_S11(sp) 61 REG_S x28, PT_T3(sp) 62 REG_S x29, PT_T4(sp) 63 REG_S x30, PT_T5(sp) 64 REG_S x31, PT_T6(sp) 65 66 /* 67 * Disable user-mode memory access as it should only be set in the 68 * actual user copy routines. 69 * 70 * Disable the FPU to detect illegal usage of floating point in kernel 71 * space. 72 */ 73 li t0, SR_SUM | SR_FS 74 75 REG_L s0, TASK_TI_USER_SP(tp) 76 csrrc s1, CSR_STATUS, t0 77 csrr s2, CSR_EPC 78 csrr s3, CSR_TVAL 79 csrr s4, CSR_CAUSE 80 csrr s5, CSR_SCRATCH 81 REG_S s0, PT_SP(sp) 82 REG_S s1, PT_STATUS(sp) 83 REG_S s2, PT_EPC(sp) 84 REG_S s3, PT_BADADDR(sp) 85 REG_S s4, PT_CAUSE(sp) 86 REG_S s5, PT_TP(sp) 87 88 /* 89 * Set the scratch register to 0, so that if a recursive exception 90 * occurs, the exception vector knows it came from the kernel 91 */ 92 csrw CSR_SCRATCH, x0 93 94 /* Load the global pointer */ 95.option push 96.option norelax 97 la gp, __global_pointer$ 98.option pop 99 100#ifdef CONFIG_TRACE_IRQFLAGS 101 call trace_hardirqs_off 102#endif 103 104#ifdef CONFIG_CONTEXT_TRACKING 105 /* If previous state is in user mode, call context_tracking_user_exit. */ 106 li a0, SR_PP 107 and a0, s1, a0 108 bnez a0, skip_context_tracking 109 call context_tracking_user_exit 110skip_context_tracking: 111#endif 112 113 /* 114 * MSB of cause differentiates between 115 * interrupts and exceptions 116 */ 117 bge s4, zero, 1f 118 119 la ra, ret_from_exception 120 121 /* Handle interrupts */ 122 move a0, sp /* pt_regs */ 123 la a1, handle_arch_irq 124 REG_L a1, (a1) 125 jr a1 1261: 127 /* 128 * Exceptions run with interrupts enabled or disabled depending on the 129 * state of SR_PIE in m/sstatus. 130 */ 131 andi t0, s1, SR_PIE 132 beqz t0, 1f 133#ifdef CONFIG_TRACE_IRQFLAGS 134 call trace_hardirqs_on 135#endif 136 csrs CSR_STATUS, SR_IE 137 1381: 139 la ra, ret_from_exception 140 /* Handle syscalls */ 141 li t0, EXC_SYSCALL 142 beq s4, t0, handle_syscall 143 144 /* Handle other exceptions */ 145 slli t0, s4, RISCV_LGPTR 146 la t1, excp_vect_table 147 la t2, excp_vect_table_end 148 move a0, sp /* pt_regs */ 149 add t0, t1, t0 150 /* Check if exception code lies within bounds */ 151 bgeu t0, t2, 1f 152 REG_L t0, 0(t0) 153 jr t0 1541: 155 tail do_trap_unknown 156 157handle_syscall: 158#ifdef CONFIG_RISCV_M_MODE 159 /* 160 * When running is M-Mode (no MMU config), MPIE does not get set. 161 * As a result, we need to force enable interrupts here because 162 * handle_exception did not do set SR_IE as it always sees SR_PIE 163 * being cleared. 164 */ 165 csrs CSR_STATUS, SR_IE 166#endif 167#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) 168 /* Recover a0 - a7 for system calls */ 169 REG_L a0, PT_A0(sp) 170 REG_L a1, PT_A1(sp) 171 REG_L a2, PT_A2(sp) 172 REG_L a3, PT_A3(sp) 173 REG_L a4, PT_A4(sp) 174 REG_L a5, PT_A5(sp) 175 REG_L a6, PT_A6(sp) 176 REG_L a7, PT_A7(sp) 177#endif 178 /* save the initial A0 value (needed in signal handlers) */ 179 REG_S a0, PT_ORIG_A0(sp) 180 /* 181 * Advance SEPC to avoid executing the original 182 * scall instruction on sret 183 */ 184 addi s2, s2, 0x4 185 REG_S s2, PT_EPC(sp) 186 /* Trace syscalls, but only if requested by the user. */ 187 REG_L t0, TASK_TI_FLAGS(tp) 188 andi t0, t0, _TIF_SYSCALL_WORK 189 bnez t0, handle_syscall_trace_enter 190check_syscall_nr: 191 /* Check to make sure we don't jump to a bogus syscall number. */ 192 li t0, __NR_syscalls 193 la s0, sys_ni_syscall 194 /* 195 * Syscall number held in a7. 196 * If syscall number is above allowed value, redirect to ni_syscall. 197 */ 198 bgeu a7, t0, 1f 199 /* Call syscall */ 200 la s0, sys_call_table 201 slli t0, a7, RISCV_LGPTR 202 add s0, s0, t0 203 REG_L s0, 0(s0) 2041: 205 jalr s0 206 207ret_from_syscall: 208 /* Set user a0 to kernel a0 */ 209 REG_S a0, PT_A0(sp) 210 /* 211 * We didn't execute the actual syscall. 212 * Seccomp already set return value for the current task pt_regs. 213 * (If it was configured with SECCOMP_RET_ERRNO/TRACE) 214 */ 215ret_from_syscall_rejected: 216 /* Trace syscalls, but only if requested by the user. */ 217 REG_L t0, TASK_TI_FLAGS(tp) 218 andi t0, t0, _TIF_SYSCALL_WORK 219 bnez t0, handle_syscall_trace_exit 220 221ret_from_exception: 222 REG_L s0, PT_STATUS(sp) 223 csrc CSR_STATUS, SR_IE 224#ifdef CONFIG_TRACE_IRQFLAGS 225 call trace_hardirqs_off 226#endif 227#ifdef CONFIG_RISCV_M_MODE 228 /* the MPP value is too large to be used as an immediate arg for addi */ 229 li t0, SR_MPP 230 and s0, s0, t0 231#else 232 andi s0, s0, SR_SPP 233#endif 234 bnez s0, resume_kernel 235 236resume_userspace: 237 /* Interrupts must be disabled here so flags are checked atomically */ 238 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ 239 andi s1, s0, _TIF_WORK_MASK 240 bnez s1, work_pending 241 242#ifdef CONFIG_CONTEXT_TRACKING 243 call context_tracking_user_enter 244#endif 245 246 /* Save unwound kernel stack pointer in thread_info */ 247 addi s0, sp, PT_SIZE_ON_STACK 248 REG_S s0, TASK_TI_KERNEL_SP(tp) 249 250 /* 251 * Save TP into the scratch register , so we can find the kernel data 252 * structures again. 253 */ 254 csrw CSR_SCRATCH, tp 255 256restore_all: 257#ifdef CONFIG_TRACE_IRQFLAGS 258 REG_L s1, PT_STATUS(sp) 259 andi t0, s1, SR_PIE 260 beqz t0, 1f 261 call trace_hardirqs_on 262 j 2f 2631: 264 call trace_hardirqs_off 2652: 266#endif 267 REG_L a0, PT_STATUS(sp) 268 /* 269 * The current load reservation is effectively part of the processor's 270 * state, in the sense that load reservations cannot be shared between 271 * different hart contexts. We can't actually save and restore a load 272 * reservation, so instead here we clear any existing reservation -- 273 * it's always legal for implementations to clear load reservations at 274 * any point (as long as the forward progress guarantee is kept, but 275 * we'll ignore that here). 276 * 277 * Dangling load reservations can be the result of taking a trap in the 278 * middle of an LR/SC sequence, but can also be the result of a taken 279 * forward branch around an SC -- which is how we implement CAS. As a 280 * result we need to clear reservations between the last CAS and the 281 * jump back to the new context. While it is unlikely the store 282 * completes, implementations are allowed to expand reservations to be 283 * arbitrarily large. 284 */ 285 REG_L a2, PT_EPC(sp) 286 REG_SC x0, a2, PT_EPC(sp) 287 288 csrw CSR_STATUS, a0 289 csrw CSR_EPC, a2 290 291 REG_L x1, PT_RA(sp) 292 REG_L x3, PT_GP(sp) 293 REG_L x4, PT_TP(sp) 294 REG_L x5, PT_T0(sp) 295 REG_L x6, PT_T1(sp) 296 REG_L x7, PT_T2(sp) 297 REG_L x8, PT_S0(sp) 298 REG_L x9, PT_S1(sp) 299 REG_L x10, PT_A0(sp) 300 REG_L x11, PT_A1(sp) 301 REG_L x12, PT_A2(sp) 302 REG_L x13, PT_A3(sp) 303 REG_L x14, PT_A4(sp) 304 REG_L x15, PT_A5(sp) 305 REG_L x16, PT_A6(sp) 306 REG_L x17, PT_A7(sp) 307 REG_L x18, PT_S2(sp) 308 REG_L x19, PT_S3(sp) 309 REG_L x20, PT_S4(sp) 310 REG_L x21, PT_S5(sp) 311 REG_L x22, PT_S6(sp) 312 REG_L x23, PT_S7(sp) 313 REG_L x24, PT_S8(sp) 314 REG_L x25, PT_S9(sp) 315 REG_L x26, PT_S10(sp) 316 REG_L x27, PT_S11(sp) 317 REG_L x28, PT_T3(sp) 318 REG_L x29, PT_T4(sp) 319 REG_L x30, PT_T5(sp) 320 REG_L x31, PT_T6(sp) 321 322 REG_L x2, PT_SP(sp) 323 324#ifdef CONFIG_RISCV_M_MODE 325 mret 326#else 327 sret 328#endif 329 330#if IS_ENABLED(CONFIG_PREEMPTION) 331resume_kernel: 332 REG_L s0, TASK_TI_PREEMPT_COUNT(tp) 333 bnez s0, restore_all 334 REG_L s0, TASK_TI_FLAGS(tp) 335 andi s0, s0, _TIF_NEED_RESCHED 336 beqz s0, restore_all 337 call preempt_schedule_irq 338 j restore_all 339#endif 340 341work_pending: 342 /* Enter slow path for supplementary processing */ 343 la ra, ret_from_exception 344 andi s1, s0, _TIF_NEED_RESCHED 345 bnez s1, work_resched 346work_notifysig: 347 /* Handle pending signals and notify-resume requests */ 348 csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ 349 move a0, sp /* pt_regs */ 350 move a1, s0 /* current_thread_info->flags */ 351 tail do_notify_resume 352work_resched: 353 tail schedule 354 355/* Slow paths for ptrace. */ 356handle_syscall_trace_enter: 357 move a0, sp 358 call do_syscall_trace_enter 359 move t0, a0 360 REG_L a0, PT_A0(sp) 361 REG_L a1, PT_A1(sp) 362 REG_L a2, PT_A2(sp) 363 REG_L a3, PT_A3(sp) 364 REG_L a4, PT_A4(sp) 365 REG_L a5, PT_A5(sp) 366 REG_L a6, PT_A6(sp) 367 REG_L a7, PT_A7(sp) 368 bnez t0, ret_from_syscall_rejected 369 j check_syscall_nr 370handle_syscall_trace_exit: 371 move a0, sp 372 call do_syscall_trace_exit 373 j ret_from_exception 374 375END(handle_exception) 376 377ENTRY(ret_from_fork) 378 la ra, ret_from_exception 379 tail schedule_tail 380ENDPROC(ret_from_fork) 381 382ENTRY(ret_from_kernel_thread) 383 call schedule_tail 384 /* Call fn(arg) */ 385 la ra, ret_from_exception 386 move a0, s1 387 jr s0 388ENDPROC(ret_from_kernel_thread) 389 390 391/* 392 * Integer register context switch 393 * The callee-saved registers must be saved and restored. 394 * 395 * a0: previous task_struct (must be preserved across the switch) 396 * a1: next task_struct 397 * 398 * The value of a0 and a1 must be preserved by this function, as that's how 399 * arguments are passed to schedule_tail. 400 */ 401ENTRY(__switch_to) 402 /* Save context into prev->thread */ 403 li a4, TASK_THREAD_RA 404 add a3, a0, a4 405 add a4, a1, a4 406 REG_S ra, TASK_THREAD_RA_RA(a3) 407 REG_S sp, TASK_THREAD_SP_RA(a3) 408 REG_S s0, TASK_THREAD_S0_RA(a3) 409 REG_S s1, TASK_THREAD_S1_RA(a3) 410 REG_S s2, TASK_THREAD_S2_RA(a3) 411 REG_S s3, TASK_THREAD_S3_RA(a3) 412 REG_S s4, TASK_THREAD_S4_RA(a3) 413 REG_S s5, TASK_THREAD_S5_RA(a3) 414 REG_S s6, TASK_THREAD_S6_RA(a3) 415 REG_S s7, TASK_THREAD_S7_RA(a3) 416 REG_S s8, TASK_THREAD_S8_RA(a3) 417 REG_S s9, TASK_THREAD_S9_RA(a3) 418 REG_S s10, TASK_THREAD_S10_RA(a3) 419 REG_S s11, TASK_THREAD_S11_RA(a3) 420 /* Restore context from next->thread */ 421 REG_L ra, TASK_THREAD_RA_RA(a4) 422 REG_L sp, TASK_THREAD_SP_RA(a4) 423 REG_L s0, TASK_THREAD_S0_RA(a4) 424 REG_L s1, TASK_THREAD_S1_RA(a4) 425 REG_L s2, TASK_THREAD_S2_RA(a4) 426 REG_L s3, TASK_THREAD_S3_RA(a4) 427 REG_L s4, TASK_THREAD_S4_RA(a4) 428 REG_L s5, TASK_THREAD_S5_RA(a4) 429 REG_L s6, TASK_THREAD_S6_RA(a4) 430 REG_L s7, TASK_THREAD_S7_RA(a4) 431 REG_L s8, TASK_THREAD_S8_RA(a4) 432 REG_L s9, TASK_THREAD_S9_RA(a4) 433 REG_L s10, TASK_THREAD_S10_RA(a4) 434 REG_L s11, TASK_THREAD_S11_RA(a4) 435 /* Swap the CPU entry around. */ 436 lw a3, TASK_TI_CPU(a0) 437 lw a4, TASK_TI_CPU(a1) 438 sw a3, TASK_TI_CPU(a1) 439 sw a4, TASK_TI_CPU(a0) 440 /* The offset of thread_info in task_struct is zero. */ 441 move tp, a1 442 ret 443ENDPROC(__switch_to) 444 445#ifndef CONFIG_MMU 446#define do_page_fault do_trap_unknown 447#endif 448 449 .section ".rodata" 450 /* Exception vector table */ 451ENTRY(excp_vect_table) 452 RISCV_PTR do_trap_insn_misaligned 453 RISCV_PTR do_trap_insn_fault 454 RISCV_PTR do_trap_insn_illegal 455 RISCV_PTR do_trap_break 456 RISCV_PTR do_trap_load_misaligned 457 RISCV_PTR do_trap_load_fault 458 RISCV_PTR do_trap_store_misaligned 459 RISCV_PTR do_trap_store_fault 460 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ 461 RISCV_PTR do_trap_ecall_s 462 RISCV_PTR do_trap_unknown 463 RISCV_PTR do_trap_ecall_m 464 RISCV_PTR do_page_fault /* instruction page fault */ 465 RISCV_PTR do_page_fault /* load page fault */ 466 RISCV_PTR do_trap_unknown 467 RISCV_PTR do_page_fault /* store page fault */ 468excp_vect_table_end: 469END(excp_vect_table) 470 471#ifndef CONFIG_MMU 472ENTRY(__user_rt_sigreturn) 473 li a7, __NR_rt_sigreturn 474 scall 475END(__user_rt_sigreturn) 476#endif 477