1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> 9 10#include <asm/asm.h> 11#include <asm/csr.h> 12#include <asm/unistd.h> 13#include <asm/thread_info.h> 14#include <asm/asm-offsets.h> 15 16#if !IS_ENABLED(CONFIG_PREEMPTION) 17.set resume_kernel, restore_all 18#endif 19 20ENTRY(handle_exception) 21 /* 22 * If coming from userspace, preserve the user thread pointer and load 23 * the kernel thread pointer. If we came from the kernel, the scratch 24 * register will contain 0, and we should continue on the current TP. 25 */ 26 csrrw tp, CSR_SCRATCH, tp 27 bnez tp, _save_context 28 29_restore_kernel_tpsp: 30 csrr tp, CSR_SCRATCH 31 REG_S sp, TASK_TI_KERNEL_SP(tp) 32_save_context: 33 REG_S sp, TASK_TI_USER_SP(tp) 34 REG_L sp, TASK_TI_KERNEL_SP(tp) 35 addi sp, sp, -(PT_SIZE_ON_STACK) 36 REG_S x1, PT_RA(sp) 37 REG_S x3, PT_GP(sp) 38 REG_S x5, PT_T0(sp) 39 REG_S x6, PT_T1(sp) 40 REG_S x7, PT_T2(sp) 41 REG_S x8, PT_S0(sp) 42 REG_S x9, PT_S1(sp) 43 REG_S x10, PT_A0(sp) 44 REG_S x11, PT_A1(sp) 45 REG_S x12, PT_A2(sp) 46 REG_S x13, PT_A3(sp) 47 REG_S x14, PT_A4(sp) 48 REG_S x15, PT_A5(sp) 49 REG_S x16, PT_A6(sp) 50 REG_S x17, PT_A7(sp) 51 REG_S x18, PT_S2(sp) 52 REG_S x19, PT_S3(sp) 53 REG_S x20, PT_S4(sp) 54 REG_S x21, PT_S5(sp) 55 REG_S x22, PT_S6(sp) 56 REG_S x23, PT_S7(sp) 57 REG_S x24, PT_S8(sp) 58 REG_S x25, PT_S9(sp) 59 REG_S x26, PT_S10(sp) 60 REG_S x27, PT_S11(sp) 61 REG_S x28, PT_T3(sp) 62 REG_S x29, PT_T4(sp) 63 REG_S x30, PT_T5(sp) 64 REG_S x31, PT_T6(sp) 65 66 /* 67 * Disable user-mode memory access as it should only be set in the 68 * actual user copy routines. 69 * 70 * Disable the FPU to detect illegal usage of floating point in kernel 71 * space. 72 */ 73 li t0, SR_SUM | SR_FS 74 75 REG_L s0, TASK_TI_USER_SP(tp) 76 csrrc s1, CSR_STATUS, t0 77 csrr s2, CSR_EPC 78 csrr s3, CSR_TVAL 79 csrr s4, CSR_CAUSE 80 csrr s5, CSR_SCRATCH 81 REG_S s0, PT_SP(sp) 82 REG_S s1, PT_STATUS(sp) 83 REG_S s2, PT_EPC(sp) 84 REG_S s3, PT_BADADDR(sp) 85 REG_S s4, PT_CAUSE(sp) 86 REG_S s5, PT_TP(sp) 87 88 /* 89 * Set the scratch register to 0, so that if a recursive exception 90 * occurs, the exception vector knows it came from the kernel 91 */ 92 csrw CSR_SCRATCH, x0 93 94 /* Load the global pointer */ 95.option push 96.option norelax 97 la gp, __global_pointer$ 98.option pop 99 100#ifdef CONFIG_TRACE_IRQFLAGS 101 call trace_hardirqs_off 102#endif 103 104#ifdef CONFIG_CONTEXT_TRACKING 105 /* If previous state is in user mode, call context_tracking_user_exit. */ 106 li a0, SR_PP 107 and a0, s1, a0 108 bnez a0, skip_context_tracking 109 call context_tracking_user_exit 110skip_context_tracking: 111#endif 112 113 /* 114 * MSB of cause differentiates between 115 * interrupts and exceptions 116 */ 117 bge s4, zero, 1f 118 119 la ra, ret_from_exception 120 121 /* Handle interrupts */ 122 move a0, sp /* pt_regs */ 123 la a1, handle_arch_irq 124 REG_L a1, (a1) 125 jr a1 1261: 127 /* 128 * Exceptions run with interrupts enabled or disabled depending on the 129 * state of SR_PIE in m/sstatus. 130 */ 131 andi t0, s1, SR_PIE 132 beqz t0, 1f 133 /* kprobes, entered via ebreak, must have interrupts disabled. */ 134 li t0, EXC_BREAKPOINT 135 beq s4, t0, 1f 136#ifdef CONFIG_TRACE_IRQFLAGS 137 call trace_hardirqs_on 138#endif 139 csrs CSR_STATUS, SR_IE 140 1411: 142 la ra, ret_from_exception 143 /* Handle syscalls */ 144 li t0, EXC_SYSCALL 145 beq s4, t0, handle_syscall 146 147 /* Handle other exceptions */ 148 slli t0, s4, RISCV_LGPTR 149 la t1, excp_vect_table 150 la t2, excp_vect_table_end 151 move a0, sp /* pt_regs */ 152 add t0, t1, t0 153 /* Check if exception code lies within bounds */ 154 bgeu t0, t2, 1f 155 REG_L t0, 0(t0) 156 jr t0 1571: 158 tail do_trap_unknown 159 160handle_syscall: 161#ifdef CONFIG_RISCV_M_MODE 162 /* 163 * When running is M-Mode (no MMU config), MPIE does not get set. 164 * As a result, we need to force enable interrupts here because 165 * handle_exception did not do set SR_IE as it always sees SR_PIE 166 * being cleared. 167 */ 168 csrs CSR_STATUS, SR_IE 169#endif 170#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) 171 /* Recover a0 - a7 for system calls */ 172 REG_L a0, PT_A0(sp) 173 REG_L a1, PT_A1(sp) 174 REG_L a2, PT_A2(sp) 175 REG_L a3, PT_A3(sp) 176 REG_L a4, PT_A4(sp) 177 REG_L a5, PT_A5(sp) 178 REG_L a6, PT_A6(sp) 179 REG_L a7, PT_A7(sp) 180#endif 181 /* save the initial A0 value (needed in signal handlers) */ 182 REG_S a0, PT_ORIG_A0(sp) 183 /* 184 * Advance SEPC to avoid executing the original 185 * scall instruction on sret 186 */ 187 addi s2, s2, 0x4 188 REG_S s2, PT_EPC(sp) 189 /* Trace syscalls, but only if requested by the user. */ 190 REG_L t0, TASK_TI_FLAGS(tp) 191 andi t0, t0, _TIF_SYSCALL_WORK 192 bnez t0, handle_syscall_trace_enter 193check_syscall_nr: 194 /* Check to make sure we don't jump to a bogus syscall number. */ 195 li t0, __NR_syscalls 196 la s0, sys_ni_syscall 197 /* 198 * Syscall number held in a7. 199 * If syscall number is above allowed value, redirect to ni_syscall. 200 */ 201 bgeu a7, t0, 1f 202 /* Call syscall */ 203 la s0, sys_call_table 204 slli t0, a7, RISCV_LGPTR 205 add s0, s0, t0 206 REG_L s0, 0(s0) 2071: 208 jalr s0 209 210ret_from_syscall: 211 /* Set user a0 to kernel a0 */ 212 REG_S a0, PT_A0(sp) 213 /* 214 * We didn't execute the actual syscall. 215 * Seccomp already set return value for the current task pt_regs. 216 * (If it was configured with SECCOMP_RET_ERRNO/TRACE) 217 */ 218ret_from_syscall_rejected: 219 /* Trace syscalls, but only if requested by the user. */ 220 REG_L t0, TASK_TI_FLAGS(tp) 221 andi t0, t0, _TIF_SYSCALL_WORK 222 bnez t0, handle_syscall_trace_exit 223 224ret_from_exception: 225 REG_L s0, PT_STATUS(sp) 226 csrc CSR_STATUS, SR_IE 227#ifdef CONFIG_TRACE_IRQFLAGS 228 call trace_hardirqs_off 229#endif 230#ifdef CONFIG_RISCV_M_MODE 231 /* the MPP value is too large to be used as an immediate arg for addi */ 232 li t0, SR_MPP 233 and s0, s0, t0 234#else 235 andi s0, s0, SR_SPP 236#endif 237 bnez s0, resume_kernel 238 239resume_userspace: 240 /* Interrupts must be disabled here so flags are checked atomically */ 241 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ 242 andi s1, s0, _TIF_WORK_MASK 243 bnez s1, work_pending 244 245#ifdef CONFIG_CONTEXT_TRACKING 246 call context_tracking_user_enter 247#endif 248 249 /* Save unwound kernel stack pointer in thread_info */ 250 addi s0, sp, PT_SIZE_ON_STACK 251 REG_S s0, TASK_TI_KERNEL_SP(tp) 252 253 /* 254 * Save TP into the scratch register , so we can find the kernel data 255 * structures again. 256 */ 257 csrw CSR_SCRATCH, tp 258 259restore_all: 260#ifdef CONFIG_TRACE_IRQFLAGS 261 REG_L s1, PT_STATUS(sp) 262 andi t0, s1, SR_PIE 263 beqz t0, 1f 264 call trace_hardirqs_on 265 j 2f 2661: 267 call trace_hardirqs_off 2682: 269#endif 270 REG_L a0, PT_STATUS(sp) 271 /* 272 * The current load reservation is effectively part of the processor's 273 * state, in the sense that load reservations cannot be shared between 274 * different hart contexts. We can't actually save and restore a load 275 * reservation, so instead here we clear any existing reservation -- 276 * it's always legal for implementations to clear load reservations at 277 * any point (as long as the forward progress guarantee is kept, but 278 * we'll ignore that here). 279 * 280 * Dangling load reservations can be the result of taking a trap in the 281 * middle of an LR/SC sequence, but can also be the result of a taken 282 * forward branch around an SC -- which is how we implement CAS. As a 283 * result we need to clear reservations between the last CAS and the 284 * jump back to the new context. While it is unlikely the store 285 * completes, implementations are allowed to expand reservations to be 286 * arbitrarily large. 287 */ 288 REG_L a2, PT_EPC(sp) 289 REG_SC x0, a2, PT_EPC(sp) 290 291 csrw CSR_STATUS, a0 292 csrw CSR_EPC, a2 293 294 REG_L x1, PT_RA(sp) 295 REG_L x3, PT_GP(sp) 296 REG_L x4, PT_TP(sp) 297 REG_L x5, PT_T0(sp) 298 REG_L x6, PT_T1(sp) 299 REG_L x7, PT_T2(sp) 300 REG_L x8, PT_S0(sp) 301 REG_L x9, PT_S1(sp) 302 REG_L x10, PT_A0(sp) 303 REG_L x11, PT_A1(sp) 304 REG_L x12, PT_A2(sp) 305 REG_L x13, PT_A3(sp) 306 REG_L x14, PT_A4(sp) 307 REG_L x15, PT_A5(sp) 308 REG_L x16, PT_A6(sp) 309 REG_L x17, PT_A7(sp) 310 REG_L x18, PT_S2(sp) 311 REG_L x19, PT_S3(sp) 312 REG_L x20, PT_S4(sp) 313 REG_L x21, PT_S5(sp) 314 REG_L x22, PT_S6(sp) 315 REG_L x23, PT_S7(sp) 316 REG_L x24, PT_S8(sp) 317 REG_L x25, PT_S9(sp) 318 REG_L x26, PT_S10(sp) 319 REG_L x27, PT_S11(sp) 320 REG_L x28, PT_T3(sp) 321 REG_L x29, PT_T4(sp) 322 REG_L x30, PT_T5(sp) 323 REG_L x31, PT_T6(sp) 324 325 REG_L x2, PT_SP(sp) 326 327#ifdef CONFIG_RISCV_M_MODE 328 mret 329#else 330 sret 331#endif 332 333#if IS_ENABLED(CONFIG_PREEMPTION) 334resume_kernel: 335 REG_L s0, TASK_TI_PREEMPT_COUNT(tp) 336 bnez s0, restore_all 337 REG_L s0, TASK_TI_FLAGS(tp) 338 andi s0, s0, _TIF_NEED_RESCHED 339 beqz s0, restore_all 340 call preempt_schedule_irq 341 j restore_all 342#endif 343 344work_pending: 345 /* Enter slow path for supplementary processing */ 346 la ra, ret_from_exception 347 andi s1, s0, _TIF_NEED_RESCHED 348 bnez s1, work_resched 349work_notifysig: 350 /* Handle pending signals and notify-resume requests */ 351 csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ 352 move a0, sp /* pt_regs */ 353 move a1, s0 /* current_thread_info->flags */ 354 tail do_notify_resume 355work_resched: 356 tail schedule 357 358/* Slow paths for ptrace. */ 359handle_syscall_trace_enter: 360 move a0, sp 361 call do_syscall_trace_enter 362 move t0, a0 363 REG_L a0, PT_A0(sp) 364 REG_L a1, PT_A1(sp) 365 REG_L a2, PT_A2(sp) 366 REG_L a3, PT_A3(sp) 367 REG_L a4, PT_A4(sp) 368 REG_L a5, PT_A5(sp) 369 REG_L a6, PT_A6(sp) 370 REG_L a7, PT_A7(sp) 371 bnez t0, ret_from_syscall_rejected 372 j check_syscall_nr 373handle_syscall_trace_exit: 374 move a0, sp 375 call do_syscall_trace_exit 376 j ret_from_exception 377 378END(handle_exception) 379 380ENTRY(ret_from_fork) 381 la ra, ret_from_exception 382 tail schedule_tail 383ENDPROC(ret_from_fork) 384 385ENTRY(ret_from_kernel_thread) 386 call schedule_tail 387 /* Call fn(arg) */ 388 la ra, ret_from_exception 389 move a0, s1 390 jr s0 391ENDPROC(ret_from_kernel_thread) 392 393 394/* 395 * Integer register context switch 396 * The callee-saved registers must be saved and restored. 397 * 398 * a0: previous task_struct (must be preserved across the switch) 399 * a1: next task_struct 400 * 401 * The value of a0 and a1 must be preserved by this function, as that's how 402 * arguments are passed to schedule_tail. 403 */ 404ENTRY(__switch_to) 405 /* Save context into prev->thread */ 406 li a4, TASK_THREAD_RA 407 add a3, a0, a4 408 add a4, a1, a4 409 REG_S ra, TASK_THREAD_RA_RA(a3) 410 REG_S sp, TASK_THREAD_SP_RA(a3) 411 REG_S s0, TASK_THREAD_S0_RA(a3) 412 REG_S s1, TASK_THREAD_S1_RA(a3) 413 REG_S s2, TASK_THREAD_S2_RA(a3) 414 REG_S s3, TASK_THREAD_S3_RA(a3) 415 REG_S s4, TASK_THREAD_S4_RA(a3) 416 REG_S s5, TASK_THREAD_S5_RA(a3) 417 REG_S s6, TASK_THREAD_S6_RA(a3) 418 REG_S s7, TASK_THREAD_S7_RA(a3) 419 REG_S s8, TASK_THREAD_S8_RA(a3) 420 REG_S s9, TASK_THREAD_S9_RA(a3) 421 REG_S s10, TASK_THREAD_S10_RA(a3) 422 REG_S s11, TASK_THREAD_S11_RA(a3) 423 /* Restore context from next->thread */ 424 REG_L ra, TASK_THREAD_RA_RA(a4) 425 REG_L sp, TASK_THREAD_SP_RA(a4) 426 REG_L s0, TASK_THREAD_S0_RA(a4) 427 REG_L s1, TASK_THREAD_S1_RA(a4) 428 REG_L s2, TASK_THREAD_S2_RA(a4) 429 REG_L s3, TASK_THREAD_S3_RA(a4) 430 REG_L s4, TASK_THREAD_S4_RA(a4) 431 REG_L s5, TASK_THREAD_S5_RA(a4) 432 REG_L s6, TASK_THREAD_S6_RA(a4) 433 REG_L s7, TASK_THREAD_S7_RA(a4) 434 REG_L s8, TASK_THREAD_S8_RA(a4) 435 REG_L s9, TASK_THREAD_S9_RA(a4) 436 REG_L s10, TASK_THREAD_S10_RA(a4) 437 REG_L s11, TASK_THREAD_S11_RA(a4) 438 /* Swap the CPU entry around. */ 439 lw a3, TASK_TI_CPU(a0) 440 lw a4, TASK_TI_CPU(a1) 441 sw a3, TASK_TI_CPU(a1) 442 sw a4, TASK_TI_CPU(a0) 443 /* The offset of thread_info in task_struct is zero. */ 444 move tp, a1 445 ret 446ENDPROC(__switch_to) 447 448#ifndef CONFIG_MMU 449#define do_page_fault do_trap_unknown 450#endif 451 452 .section ".rodata" 453 .align LGREG 454 /* Exception vector table */ 455ENTRY(excp_vect_table) 456 RISCV_PTR do_trap_insn_misaligned 457 RISCV_PTR do_trap_insn_fault 458 RISCV_PTR do_trap_insn_illegal 459 RISCV_PTR do_trap_break 460 RISCV_PTR do_trap_load_misaligned 461 RISCV_PTR do_trap_load_fault 462 RISCV_PTR do_trap_store_misaligned 463 RISCV_PTR do_trap_store_fault 464 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ 465 RISCV_PTR do_trap_ecall_s 466 RISCV_PTR do_trap_unknown 467 RISCV_PTR do_trap_ecall_m 468 RISCV_PTR do_page_fault /* instruction page fault */ 469 RISCV_PTR do_page_fault /* load page fault */ 470 RISCV_PTR do_trap_unknown 471 RISCV_PTR do_page_fault /* store page fault */ 472excp_vect_table_end: 473END(excp_vect_table) 474 475#ifndef CONFIG_MMU 476ENTRY(__user_rt_sigreturn) 477 li a7, __NR_rt_sigreturn 478 scall 479END(__user_rt_sigreturn) 480#endif 481