1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> 9 10#include <asm/asm.h> 11#include <asm/csr.h> 12#include <asm/unistd.h> 13#include <asm/thread_info.h> 14#include <asm/asm-offsets.h> 15#include <asm/errata_list.h> 16 17#if !IS_ENABLED(CONFIG_PREEMPTION) 18.set resume_kernel, restore_all 19#endif 20 21ENTRY(handle_exception) 22 /* 23 * If coming from userspace, preserve the user thread pointer and load 24 * the kernel thread pointer. If we came from the kernel, the scratch 25 * register will contain 0, and we should continue on the current TP. 26 */ 27 csrrw tp, CSR_SCRATCH, tp 28 bnez tp, _save_context 29 30_restore_kernel_tpsp: 31 csrr tp, CSR_SCRATCH 32 REG_S sp, TASK_TI_KERNEL_SP(tp) 33_save_context: 34 REG_S sp, TASK_TI_USER_SP(tp) 35 REG_L sp, TASK_TI_KERNEL_SP(tp) 36 addi sp, sp, -(PT_SIZE_ON_STACK) 37 REG_S x1, PT_RA(sp) 38 REG_S x3, PT_GP(sp) 39 REG_S x5, PT_T0(sp) 40 REG_S x6, PT_T1(sp) 41 REG_S x7, PT_T2(sp) 42 REG_S x8, PT_S0(sp) 43 REG_S x9, PT_S1(sp) 44 REG_S x10, PT_A0(sp) 45 REG_S x11, PT_A1(sp) 46 REG_S x12, PT_A2(sp) 47 REG_S x13, PT_A3(sp) 48 REG_S x14, PT_A4(sp) 49 REG_S x15, PT_A5(sp) 50 REG_S x16, PT_A6(sp) 51 REG_S x17, PT_A7(sp) 52 REG_S x18, PT_S2(sp) 53 REG_S x19, PT_S3(sp) 54 REG_S x20, PT_S4(sp) 55 REG_S x21, PT_S5(sp) 56 REG_S x22, PT_S6(sp) 57 REG_S x23, PT_S7(sp) 58 REG_S x24, PT_S8(sp) 59 REG_S x25, PT_S9(sp) 60 REG_S x26, PT_S10(sp) 61 REG_S x27, PT_S11(sp) 62 REG_S x28, PT_T3(sp) 63 REG_S x29, PT_T4(sp) 64 REG_S x30, PT_T5(sp) 65 REG_S x31, PT_T6(sp) 66 67 /* 68 * Disable user-mode memory access as it should only be set in the 69 * actual user copy routines. 70 * 71 * Disable the FPU to detect illegal usage of floating point in kernel 72 * space. 73 */ 74 li t0, SR_SUM | SR_FS 75 76 REG_L s0, TASK_TI_USER_SP(tp) 77 csrrc s1, CSR_STATUS, t0 78 csrr s2, CSR_EPC 79 csrr s3, CSR_TVAL 80 csrr s4, CSR_CAUSE 81 csrr s5, CSR_SCRATCH 82 REG_S s0, PT_SP(sp) 83 REG_S s1, PT_STATUS(sp) 84 REG_S s2, PT_EPC(sp) 85 REG_S s3, PT_BADADDR(sp) 86 REG_S s4, PT_CAUSE(sp) 87 REG_S s5, PT_TP(sp) 88 89 /* 90 * Set the scratch register to 0, so that if a recursive exception 91 * occurs, the exception vector knows it came from the kernel 92 */ 93 csrw CSR_SCRATCH, x0 94 95 /* Load the global pointer */ 96.option push 97.option norelax 98 la gp, __global_pointer$ 99.option pop 100 101#ifdef CONFIG_TRACE_IRQFLAGS 102 call trace_hardirqs_off 103#endif 104 105#ifdef CONFIG_CONTEXT_TRACKING 106 /* If previous state is in user mode, call context_tracking_user_exit. */ 107 li a0, SR_PP 108 and a0, s1, a0 109 bnez a0, skip_context_tracking 110 call context_tracking_user_exit 111skip_context_tracking: 112#endif 113 114 /* 115 * MSB of cause differentiates between 116 * interrupts and exceptions 117 */ 118 bge s4, zero, 1f 119 120 la ra, ret_from_exception 121 122 /* Handle interrupts */ 123 move a0, sp /* pt_regs */ 124 la a1, handle_arch_irq 125 REG_L a1, (a1) 126 jr a1 1271: 128 /* 129 * Exceptions run with interrupts enabled or disabled depending on the 130 * state of SR_PIE in m/sstatus. 131 */ 132 andi t0, s1, SR_PIE 133 beqz t0, 1f 134 /* kprobes, entered via ebreak, must have interrupts disabled. */ 135 li t0, EXC_BREAKPOINT 136 beq s4, t0, 1f 137#ifdef CONFIG_TRACE_IRQFLAGS 138 call trace_hardirqs_on 139#endif 140 csrs CSR_STATUS, SR_IE 141 1421: 143 la ra, ret_from_exception 144 /* Handle syscalls */ 145 li t0, EXC_SYSCALL 146 beq s4, t0, handle_syscall 147 148 /* Handle other exceptions */ 149 slli t0, s4, RISCV_LGPTR 150 la t1, excp_vect_table 151 la t2, excp_vect_table_end 152 move a0, sp /* pt_regs */ 153 add t0, t1, t0 154 /* Check if exception code lies within bounds */ 155 bgeu t0, t2, 1f 156 REG_L t0, 0(t0) 157 jr t0 1581: 159 tail do_trap_unknown 160 161handle_syscall: 162#ifdef CONFIG_RISCV_M_MODE 163 /* 164 * When running is M-Mode (no MMU config), MPIE does not get set. 165 * As a result, we need to force enable interrupts here because 166 * handle_exception did not do set SR_IE as it always sees SR_PIE 167 * being cleared. 168 */ 169 csrs CSR_STATUS, SR_IE 170#endif 171#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) 172 /* Recover a0 - a7 for system calls */ 173 REG_L a0, PT_A0(sp) 174 REG_L a1, PT_A1(sp) 175 REG_L a2, PT_A2(sp) 176 REG_L a3, PT_A3(sp) 177 REG_L a4, PT_A4(sp) 178 REG_L a5, PT_A5(sp) 179 REG_L a6, PT_A6(sp) 180 REG_L a7, PT_A7(sp) 181#endif 182 /* save the initial A0 value (needed in signal handlers) */ 183 REG_S a0, PT_ORIG_A0(sp) 184 /* 185 * Advance SEPC to avoid executing the original 186 * scall instruction on sret 187 */ 188 addi s2, s2, 0x4 189 REG_S s2, PT_EPC(sp) 190 /* Trace syscalls, but only if requested by the user. */ 191 REG_L t0, TASK_TI_FLAGS(tp) 192 andi t0, t0, _TIF_SYSCALL_WORK 193 bnez t0, handle_syscall_trace_enter 194check_syscall_nr: 195 /* Check to make sure we don't jump to a bogus syscall number. */ 196 li t0, __NR_syscalls 197 la s0, sys_ni_syscall 198 /* 199 * Syscall number held in a7. 200 * If syscall number is above allowed value, redirect to ni_syscall. 201 */ 202 bgeu a7, t0, 1f 203 /* Call syscall */ 204 la s0, sys_call_table 205 slli t0, a7, RISCV_LGPTR 206 add s0, s0, t0 207 REG_L s0, 0(s0) 2081: 209 jalr s0 210 211ret_from_syscall: 212 /* Set user a0 to kernel a0 */ 213 REG_S a0, PT_A0(sp) 214 /* 215 * We didn't execute the actual syscall. 216 * Seccomp already set return value for the current task pt_regs. 217 * (If it was configured with SECCOMP_RET_ERRNO/TRACE) 218 */ 219ret_from_syscall_rejected: 220 /* Trace syscalls, but only if requested by the user. */ 221 REG_L t0, TASK_TI_FLAGS(tp) 222 andi t0, t0, _TIF_SYSCALL_WORK 223 bnez t0, handle_syscall_trace_exit 224 225ret_from_exception: 226 REG_L s0, PT_STATUS(sp) 227 csrc CSR_STATUS, SR_IE 228#ifdef CONFIG_TRACE_IRQFLAGS 229 call trace_hardirqs_off 230#endif 231#ifdef CONFIG_RISCV_M_MODE 232 /* the MPP value is too large to be used as an immediate arg for addi */ 233 li t0, SR_MPP 234 and s0, s0, t0 235#else 236 andi s0, s0, SR_SPP 237#endif 238 bnez s0, resume_kernel 239 240resume_userspace: 241 /* Interrupts must be disabled here so flags are checked atomically */ 242 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ 243 andi s1, s0, _TIF_WORK_MASK 244 bnez s1, work_pending 245 246#ifdef CONFIG_CONTEXT_TRACKING 247 call context_tracking_user_enter 248#endif 249 250 /* Save unwound kernel stack pointer in thread_info */ 251 addi s0, sp, PT_SIZE_ON_STACK 252 REG_S s0, TASK_TI_KERNEL_SP(tp) 253 254 /* 255 * Save TP into the scratch register , so we can find the kernel data 256 * structures again. 257 */ 258 csrw CSR_SCRATCH, tp 259 260restore_all: 261#ifdef CONFIG_TRACE_IRQFLAGS 262 REG_L s1, PT_STATUS(sp) 263 andi t0, s1, SR_PIE 264 beqz t0, 1f 265 call trace_hardirqs_on 266 j 2f 2671: 268 call trace_hardirqs_off 2692: 270#endif 271 REG_L a0, PT_STATUS(sp) 272 /* 273 * The current load reservation is effectively part of the processor's 274 * state, in the sense that load reservations cannot be shared between 275 * different hart contexts. We can't actually save and restore a load 276 * reservation, so instead here we clear any existing reservation -- 277 * it's always legal for implementations to clear load reservations at 278 * any point (as long as the forward progress guarantee is kept, but 279 * we'll ignore that here). 280 * 281 * Dangling load reservations can be the result of taking a trap in the 282 * middle of an LR/SC sequence, but can also be the result of a taken 283 * forward branch around an SC -- which is how we implement CAS. As a 284 * result we need to clear reservations between the last CAS and the 285 * jump back to the new context. While it is unlikely the store 286 * completes, implementations are allowed to expand reservations to be 287 * arbitrarily large. 288 */ 289 REG_L a2, PT_EPC(sp) 290 REG_SC x0, a2, PT_EPC(sp) 291 292 csrw CSR_STATUS, a0 293 csrw CSR_EPC, a2 294 295 REG_L x1, PT_RA(sp) 296 REG_L x3, PT_GP(sp) 297 REG_L x4, PT_TP(sp) 298 REG_L x5, PT_T0(sp) 299 REG_L x6, PT_T1(sp) 300 REG_L x7, PT_T2(sp) 301 REG_L x8, PT_S0(sp) 302 REG_L x9, PT_S1(sp) 303 REG_L x10, PT_A0(sp) 304 REG_L x11, PT_A1(sp) 305 REG_L x12, PT_A2(sp) 306 REG_L x13, PT_A3(sp) 307 REG_L x14, PT_A4(sp) 308 REG_L x15, PT_A5(sp) 309 REG_L x16, PT_A6(sp) 310 REG_L x17, PT_A7(sp) 311 REG_L x18, PT_S2(sp) 312 REG_L x19, PT_S3(sp) 313 REG_L x20, PT_S4(sp) 314 REG_L x21, PT_S5(sp) 315 REG_L x22, PT_S6(sp) 316 REG_L x23, PT_S7(sp) 317 REG_L x24, PT_S8(sp) 318 REG_L x25, PT_S9(sp) 319 REG_L x26, PT_S10(sp) 320 REG_L x27, PT_S11(sp) 321 REG_L x28, PT_T3(sp) 322 REG_L x29, PT_T4(sp) 323 REG_L x30, PT_T5(sp) 324 REG_L x31, PT_T6(sp) 325 326 REG_L x2, PT_SP(sp) 327 328#ifdef CONFIG_RISCV_M_MODE 329 mret 330#else 331 sret 332#endif 333 334#if IS_ENABLED(CONFIG_PREEMPTION) 335resume_kernel: 336 REG_L s0, TASK_TI_PREEMPT_COUNT(tp) 337 bnez s0, restore_all 338 REG_L s0, TASK_TI_FLAGS(tp) 339 andi s0, s0, _TIF_NEED_RESCHED 340 beqz s0, restore_all 341 call preempt_schedule_irq 342 j restore_all 343#endif 344 345work_pending: 346 /* Enter slow path for supplementary processing */ 347 la ra, ret_from_exception 348 andi s1, s0, _TIF_NEED_RESCHED 349 bnez s1, work_resched 350work_notifysig: 351 /* Handle pending signals and notify-resume requests */ 352 csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ 353 move a0, sp /* pt_regs */ 354 move a1, s0 /* current_thread_info->flags */ 355 tail do_notify_resume 356work_resched: 357 tail schedule 358 359/* Slow paths for ptrace. */ 360handle_syscall_trace_enter: 361 move a0, sp 362 call do_syscall_trace_enter 363 move t0, a0 364 REG_L a0, PT_A0(sp) 365 REG_L a1, PT_A1(sp) 366 REG_L a2, PT_A2(sp) 367 REG_L a3, PT_A3(sp) 368 REG_L a4, PT_A4(sp) 369 REG_L a5, PT_A5(sp) 370 REG_L a6, PT_A6(sp) 371 REG_L a7, PT_A7(sp) 372 bnez t0, ret_from_syscall_rejected 373 j check_syscall_nr 374handle_syscall_trace_exit: 375 move a0, sp 376 call do_syscall_trace_exit 377 j ret_from_exception 378 379END(handle_exception) 380 381ENTRY(ret_from_fork) 382 la ra, ret_from_exception 383 tail schedule_tail 384ENDPROC(ret_from_fork) 385 386ENTRY(ret_from_kernel_thread) 387 call schedule_tail 388 /* Call fn(arg) */ 389 la ra, ret_from_exception 390 move a0, s1 391 jr s0 392ENDPROC(ret_from_kernel_thread) 393 394 395/* 396 * Integer register context switch 397 * The callee-saved registers must be saved and restored. 398 * 399 * a0: previous task_struct (must be preserved across the switch) 400 * a1: next task_struct 401 * 402 * The value of a0 and a1 must be preserved by this function, as that's how 403 * arguments are passed to schedule_tail. 404 */ 405ENTRY(__switch_to) 406 /* Save context into prev->thread */ 407 li a4, TASK_THREAD_RA 408 add a3, a0, a4 409 add a4, a1, a4 410 REG_S ra, TASK_THREAD_RA_RA(a3) 411 REG_S sp, TASK_THREAD_SP_RA(a3) 412 REG_S s0, TASK_THREAD_S0_RA(a3) 413 REG_S s1, TASK_THREAD_S1_RA(a3) 414 REG_S s2, TASK_THREAD_S2_RA(a3) 415 REG_S s3, TASK_THREAD_S3_RA(a3) 416 REG_S s4, TASK_THREAD_S4_RA(a3) 417 REG_S s5, TASK_THREAD_S5_RA(a3) 418 REG_S s6, TASK_THREAD_S6_RA(a3) 419 REG_S s7, TASK_THREAD_S7_RA(a3) 420 REG_S s8, TASK_THREAD_S8_RA(a3) 421 REG_S s9, TASK_THREAD_S9_RA(a3) 422 REG_S s10, TASK_THREAD_S10_RA(a3) 423 REG_S s11, TASK_THREAD_S11_RA(a3) 424 /* Restore context from next->thread */ 425 REG_L ra, TASK_THREAD_RA_RA(a4) 426 REG_L sp, TASK_THREAD_SP_RA(a4) 427 REG_L s0, TASK_THREAD_S0_RA(a4) 428 REG_L s1, TASK_THREAD_S1_RA(a4) 429 REG_L s2, TASK_THREAD_S2_RA(a4) 430 REG_L s3, TASK_THREAD_S3_RA(a4) 431 REG_L s4, TASK_THREAD_S4_RA(a4) 432 REG_L s5, TASK_THREAD_S5_RA(a4) 433 REG_L s6, TASK_THREAD_S6_RA(a4) 434 REG_L s7, TASK_THREAD_S7_RA(a4) 435 REG_L s8, TASK_THREAD_S8_RA(a4) 436 REG_L s9, TASK_THREAD_S9_RA(a4) 437 REG_L s10, TASK_THREAD_S10_RA(a4) 438 REG_L s11, TASK_THREAD_S11_RA(a4) 439 /* Swap the CPU entry around. */ 440 lw a3, TASK_TI_CPU(a0) 441 lw a4, TASK_TI_CPU(a1) 442 sw a3, TASK_TI_CPU(a1) 443 sw a4, TASK_TI_CPU(a0) 444 /* The offset of thread_info in task_struct is zero. */ 445 move tp, a1 446 ret 447ENDPROC(__switch_to) 448 449#ifndef CONFIG_MMU 450#define do_page_fault do_trap_unknown 451#endif 452 453 .section ".rodata" 454 .align LGREG 455 /* Exception vector table */ 456ENTRY(excp_vect_table) 457 RISCV_PTR do_trap_insn_misaligned 458 ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) 459 RISCV_PTR do_trap_insn_illegal 460 RISCV_PTR do_trap_break 461 RISCV_PTR do_trap_load_misaligned 462 RISCV_PTR do_trap_load_fault 463 RISCV_PTR do_trap_store_misaligned 464 RISCV_PTR do_trap_store_fault 465 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ 466 RISCV_PTR do_trap_ecall_s 467 RISCV_PTR do_trap_unknown 468 RISCV_PTR do_trap_ecall_m 469 /* instruciton page fault */ 470 ALT_PAGE_FAULT(RISCV_PTR do_page_fault) 471 RISCV_PTR do_page_fault /* load page fault */ 472 RISCV_PTR do_trap_unknown 473 RISCV_PTR do_page_fault /* store page fault */ 474excp_vect_table_end: 475END(excp_vect_table) 476 477#ifndef CONFIG_MMU 478ENTRY(__user_rt_sigreturn) 479 li a7, __NR_rt_sigreturn 480 scall 481END(__user_rt_sigreturn) 482#endif 483