1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> 9 10#include <asm/asm.h> 11#include <asm/csr.h> 12#include <asm/unistd.h> 13#include <asm/page.h> 14#include <asm/thread_info.h> 15#include <asm/asm-offsets.h> 16#include <asm/errata_list.h> 17#include <linux/sizes.h> 18 19 .section .irqentry.text, "ax" 20 21SYM_CODE_START(handle_exception) 22 /* 23 * If coming from userspace, preserve the user thread pointer and load 24 * the kernel thread pointer. If we came from the kernel, the scratch 25 * register will contain 0, and we should continue on the current TP. 26 */ 27 csrrw tp, CSR_SCRATCH, tp 28 bnez tp, _save_context 29 30_restore_kernel_tpsp: 31 csrr tp, CSR_SCRATCH 32 REG_S sp, TASK_TI_KERNEL_SP(tp) 33 34#ifdef CONFIG_VMAP_STACK 35 addi sp, sp, -(PT_SIZE_ON_STACK) 36 srli sp, sp, THREAD_SHIFT 37 andi sp, sp, 0x1 38 bnez sp, handle_kernel_stack_overflow 39 REG_L sp, TASK_TI_KERNEL_SP(tp) 40#endif 41 42_save_context: 43 REG_S sp, TASK_TI_USER_SP(tp) 44 REG_L sp, TASK_TI_KERNEL_SP(tp) 45 addi sp, sp, -(PT_SIZE_ON_STACK) 46 REG_S x1, PT_RA(sp) 47 REG_S x3, PT_GP(sp) 48 REG_S x5, PT_T0(sp) 49 save_from_x6_to_x31 50 51 /* 52 * Disable user-mode memory access as it should only be set in the 53 * actual user copy routines. 54 * 55 * Disable the FPU/Vector to detect illegal usage of floating point 56 * or vector in kernel space. 57 */ 58 li t0, SR_SUM | SR_FS_VS 59 60 REG_L s0, TASK_TI_USER_SP(tp) 61 csrrc s1, CSR_STATUS, t0 62 csrr s2, CSR_EPC 63 csrr s3, CSR_TVAL 64 csrr s4, CSR_CAUSE 65 csrr s5, CSR_SCRATCH 66 REG_S s0, PT_SP(sp) 67 REG_S s1, PT_STATUS(sp) 68 REG_S s2, PT_EPC(sp) 69 REG_S s3, PT_BADADDR(sp) 70 REG_S s4, PT_CAUSE(sp) 71 REG_S s5, PT_TP(sp) 72 73 /* 74 * Set the scratch register to 0, so that if a recursive exception 75 * occurs, the exception vector knows it came from the kernel 76 */ 77 csrw CSR_SCRATCH, x0 78 79 /* Load the global pointer */ 80.option push 81.option norelax 82 la gp, __global_pointer$ 83.option pop 84 move a0, sp /* pt_regs */ 85 la ra, ret_from_exception 86 87 /* 88 * MSB of cause differentiates between 89 * interrupts and exceptions 90 */ 91 bge s4, zero, 1f 92 93 /* Handle interrupts */ 94 tail do_irq 951: 96 /* Handle other exceptions */ 97 slli t0, s4, RISCV_LGPTR 98 la t1, excp_vect_table 99 la t2, excp_vect_table_end 100 add t0, t1, t0 101 /* Check if exception code lies within bounds */ 102 bgeu t0, t2, 1f 103 REG_L t0, 0(t0) 104 jr t0 1051: 106 tail do_trap_unknown 107SYM_CODE_END(handle_exception) 108ASM_NOKPROBE(handle_exception) 109 110/* 111 * The ret_from_exception must be called with interrupt disabled. Here is the 112 * caller list: 113 * - handle_exception 114 * - ret_from_fork 115 */ 116SYM_CODE_START_NOALIGN(ret_from_exception) 117 REG_L s0, PT_STATUS(sp) 118#ifdef CONFIG_RISCV_M_MODE 119 /* the MPP value is too large to be used as an immediate arg for addi */ 120 li t0, SR_MPP 121 and s0, s0, t0 122#else 123 andi s0, s0, SR_SPP 124#endif 125 bnez s0, 1f 126 127 /* Save unwound kernel stack pointer in thread_info */ 128 addi s0, sp, PT_SIZE_ON_STACK 129 REG_S s0, TASK_TI_KERNEL_SP(tp) 130 131 /* 132 * Save TP into the scratch register , so we can find the kernel data 133 * structures again. 134 */ 135 csrw CSR_SCRATCH, tp 1361: 137 REG_L a0, PT_STATUS(sp) 138 /* 139 * The current load reservation is effectively part of the processor's 140 * state, in the sense that load reservations cannot be shared between 141 * different hart contexts. We can't actually save and restore a load 142 * reservation, so instead here we clear any existing reservation -- 143 * it's always legal for implementations to clear load reservations at 144 * any point (as long as the forward progress guarantee is kept, but 145 * we'll ignore that here). 146 * 147 * Dangling load reservations can be the result of taking a trap in the 148 * middle of an LR/SC sequence, but can also be the result of a taken 149 * forward branch around an SC -- which is how we implement CAS. As a 150 * result we need to clear reservations between the last CAS and the 151 * jump back to the new context. While it is unlikely the store 152 * completes, implementations are allowed to expand reservations to be 153 * arbitrarily large. 154 */ 155 REG_L a2, PT_EPC(sp) 156 REG_SC x0, a2, PT_EPC(sp) 157 158 csrw CSR_STATUS, a0 159 csrw CSR_EPC, a2 160 161 REG_L x1, PT_RA(sp) 162 REG_L x3, PT_GP(sp) 163 REG_L x4, PT_TP(sp) 164 REG_L x5, PT_T0(sp) 165 restore_from_x6_to_x31 166 167 REG_L x2, PT_SP(sp) 168 169#ifdef CONFIG_RISCV_M_MODE 170 mret 171#else 172 sret 173#endif 174SYM_CODE_END(ret_from_exception) 175ASM_NOKPROBE(ret_from_exception) 176 177#ifdef CONFIG_VMAP_STACK 178SYM_CODE_START_LOCAL(handle_kernel_stack_overflow) 179 /* we reach here from kernel context, sscratch must be 0 */ 180 csrrw x31, CSR_SCRATCH, x31 181 asm_per_cpu sp, overflow_stack, x31 182 li x31, OVERFLOW_STACK_SIZE 183 add sp, sp, x31 184 /* zero out x31 again and restore x31 */ 185 xor x31, x31, x31 186 csrrw x31, CSR_SCRATCH, x31 187 188 addi sp, sp, -(PT_SIZE_ON_STACK) 189 190 //save context to overflow stack 191 REG_S x1, PT_RA(sp) 192 REG_S x3, PT_GP(sp) 193 REG_S x5, PT_T0(sp) 194 save_from_x6_to_x31 195 196 REG_L s0, TASK_TI_KERNEL_SP(tp) 197 csrr s1, CSR_STATUS 198 csrr s2, CSR_EPC 199 csrr s3, CSR_TVAL 200 csrr s4, CSR_CAUSE 201 csrr s5, CSR_SCRATCH 202 REG_S s0, PT_SP(sp) 203 REG_S s1, PT_STATUS(sp) 204 REG_S s2, PT_EPC(sp) 205 REG_S s3, PT_BADADDR(sp) 206 REG_S s4, PT_CAUSE(sp) 207 REG_S s5, PT_TP(sp) 208 move a0, sp 209 tail handle_bad_stack 210SYM_CODE_END(handle_kernel_stack_overflow) 211ASM_NOKPROBE(handle_kernel_stack_overflow) 212#endif 213 214SYM_CODE_START(ret_from_fork) 215 call schedule_tail 216 beqz s0, 1f /* not from kernel thread */ 217 /* Call fn(arg) */ 218 move a0, s1 219 jalr s0 2201: 221 move a0, sp /* pt_regs */ 222 la ra, ret_from_exception 223 tail syscall_exit_to_user_mode 224SYM_CODE_END(ret_from_fork) 225 226/* 227 * Integer register context switch 228 * The callee-saved registers must be saved and restored. 229 * 230 * a0: previous task_struct (must be preserved across the switch) 231 * a1: next task_struct 232 * 233 * The value of a0 and a1 must be preserved by this function, as that's how 234 * arguments are passed to schedule_tail. 235 */ 236SYM_FUNC_START(__switch_to) 237 /* Save context into prev->thread */ 238 li a4, TASK_THREAD_RA 239 add a3, a0, a4 240 add a4, a1, a4 241 REG_S ra, TASK_THREAD_RA_RA(a3) 242 REG_S sp, TASK_THREAD_SP_RA(a3) 243 REG_S s0, TASK_THREAD_S0_RA(a3) 244 REG_S s1, TASK_THREAD_S1_RA(a3) 245 REG_S s2, TASK_THREAD_S2_RA(a3) 246 REG_S s3, TASK_THREAD_S3_RA(a3) 247 REG_S s4, TASK_THREAD_S4_RA(a3) 248 REG_S s5, TASK_THREAD_S5_RA(a3) 249 REG_S s6, TASK_THREAD_S6_RA(a3) 250 REG_S s7, TASK_THREAD_S7_RA(a3) 251 REG_S s8, TASK_THREAD_S8_RA(a3) 252 REG_S s9, TASK_THREAD_S9_RA(a3) 253 REG_S s10, TASK_THREAD_S10_RA(a3) 254 REG_S s11, TASK_THREAD_S11_RA(a3) 255 /* Restore context from next->thread */ 256 REG_L ra, TASK_THREAD_RA_RA(a4) 257 REG_L sp, TASK_THREAD_SP_RA(a4) 258 REG_L s0, TASK_THREAD_S0_RA(a4) 259 REG_L s1, TASK_THREAD_S1_RA(a4) 260 REG_L s2, TASK_THREAD_S2_RA(a4) 261 REG_L s3, TASK_THREAD_S3_RA(a4) 262 REG_L s4, TASK_THREAD_S4_RA(a4) 263 REG_L s5, TASK_THREAD_S5_RA(a4) 264 REG_L s6, TASK_THREAD_S6_RA(a4) 265 REG_L s7, TASK_THREAD_S7_RA(a4) 266 REG_L s8, TASK_THREAD_S8_RA(a4) 267 REG_L s9, TASK_THREAD_S9_RA(a4) 268 REG_L s10, TASK_THREAD_S10_RA(a4) 269 REG_L s11, TASK_THREAD_S11_RA(a4) 270 /* The offset of thread_info in task_struct is zero. */ 271 move tp, a1 272 ret 273SYM_FUNC_END(__switch_to) 274 275#ifndef CONFIG_MMU 276#define do_page_fault do_trap_unknown 277#endif 278 279 .section ".rodata" 280 .align LGREG 281 /* Exception vector table */ 282SYM_CODE_START(excp_vect_table) 283 RISCV_PTR do_trap_insn_misaligned 284 ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) 285 RISCV_PTR do_trap_insn_illegal 286 RISCV_PTR do_trap_break 287 RISCV_PTR do_trap_load_misaligned 288 RISCV_PTR do_trap_load_fault 289 RISCV_PTR do_trap_store_misaligned 290 RISCV_PTR do_trap_store_fault 291 RISCV_PTR do_trap_ecall_u /* system call */ 292 RISCV_PTR do_trap_ecall_s 293 RISCV_PTR do_trap_unknown 294 RISCV_PTR do_trap_ecall_m 295 /* instruciton page fault */ 296 ALT_PAGE_FAULT(RISCV_PTR do_page_fault) 297 RISCV_PTR do_page_fault /* load page fault */ 298 RISCV_PTR do_trap_unknown 299 RISCV_PTR do_page_fault /* store page fault */ 300excp_vect_table_end: 301SYM_CODE_END(excp_vect_table) 302 303#ifndef CONFIG_MMU 304SYM_CODE_START(__user_rt_sigreturn) 305 li a7, __NR_rt_sigreturn 306 ecall 307SYM_CODE_END(__user_rt_sigreturn) 308#endif 309