1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 */ 6 7#include <linux/init.h> 8#include <linux/linkage.h> 9 10#include <asm/asm.h> 11#include <asm/csr.h> 12#include <asm/unistd.h> 13#include <asm/page.h> 14#include <asm/thread_info.h> 15#include <asm/asm-offsets.h> 16#include <asm/errata_list.h> 17#include <linux/sizes.h> 18 19SYM_CODE_START(handle_exception) 20 /* 21 * If coming from userspace, preserve the user thread pointer and load 22 * the kernel thread pointer. If we came from the kernel, the scratch 23 * register will contain 0, and we should continue on the current TP. 24 */ 25 csrrw tp, CSR_SCRATCH, tp 26 bnez tp, _save_context 27 28_restore_kernel_tpsp: 29 csrr tp, CSR_SCRATCH 30 REG_S sp, TASK_TI_KERNEL_SP(tp) 31 32#ifdef CONFIG_VMAP_STACK 33 addi sp, sp, -(PT_SIZE_ON_STACK) 34 srli sp, sp, THREAD_SHIFT 35 andi sp, sp, 0x1 36 bnez sp, handle_kernel_stack_overflow 37 REG_L sp, TASK_TI_KERNEL_SP(tp) 38#endif 39 40_save_context: 41 REG_S sp, TASK_TI_USER_SP(tp) 42 REG_L sp, TASK_TI_KERNEL_SP(tp) 43 addi sp, sp, -(PT_SIZE_ON_STACK) 44 REG_S x1, PT_RA(sp) 45 REG_S x3, PT_GP(sp) 46 REG_S x5, PT_T0(sp) 47 save_from_x6_to_x31 48 49 /* 50 * Disable user-mode memory access as it should only be set in the 51 * actual user copy routines. 52 * 53 * Disable the FPU/Vector to detect illegal usage of floating point 54 * or vector in kernel space. 55 */ 56 li t0, SR_SUM | SR_FS_VS 57 58 REG_L s0, TASK_TI_USER_SP(tp) 59 csrrc s1, CSR_STATUS, t0 60 csrr s2, CSR_EPC 61 csrr s3, CSR_TVAL 62 csrr s4, CSR_CAUSE 63 csrr s5, CSR_SCRATCH 64 REG_S s0, PT_SP(sp) 65 REG_S s1, PT_STATUS(sp) 66 REG_S s2, PT_EPC(sp) 67 REG_S s3, PT_BADADDR(sp) 68 REG_S s4, PT_CAUSE(sp) 69 REG_S s5, PT_TP(sp) 70 71 /* 72 * Set the scratch register to 0, so that if a recursive exception 73 * occurs, the exception vector knows it came from the kernel 74 */ 75 csrw CSR_SCRATCH, x0 76 77 /* Load the global pointer */ 78.option push 79.option norelax 80 la gp, __global_pointer$ 81.option pop 82 move a0, sp /* pt_regs */ 83 la ra, ret_from_exception 84 85 /* 86 * MSB of cause differentiates between 87 * interrupts and exceptions 88 */ 89 bge s4, zero, 1f 90 91 /* Handle interrupts */ 92 tail do_irq 931: 94 /* Handle other exceptions */ 95 slli t0, s4, RISCV_LGPTR 96 la t1, excp_vect_table 97 la t2, excp_vect_table_end 98 add t0, t1, t0 99 /* Check if exception code lies within bounds */ 100 bgeu t0, t2, 1f 101 REG_L t0, 0(t0) 102 jr t0 1031: 104 tail do_trap_unknown 105SYM_CODE_END(handle_exception) 106 107/* 108 * The ret_from_exception must be called with interrupt disabled. Here is the 109 * caller list: 110 * - handle_exception 111 * - ret_from_fork 112 */ 113SYM_CODE_START_NOALIGN(ret_from_exception) 114 REG_L s0, PT_STATUS(sp) 115#ifdef CONFIG_RISCV_M_MODE 116 /* the MPP value is too large to be used as an immediate arg for addi */ 117 li t0, SR_MPP 118 and s0, s0, t0 119#else 120 andi s0, s0, SR_SPP 121#endif 122 bnez s0, 1f 123 124 /* Save unwound kernel stack pointer in thread_info */ 125 addi s0, sp, PT_SIZE_ON_STACK 126 REG_S s0, TASK_TI_KERNEL_SP(tp) 127 128 /* 129 * Save TP into the scratch register , so we can find the kernel data 130 * structures again. 131 */ 132 csrw CSR_SCRATCH, tp 1331: 134 REG_L a0, PT_STATUS(sp) 135 /* 136 * The current load reservation is effectively part of the processor's 137 * state, in the sense that load reservations cannot be shared between 138 * different hart contexts. We can't actually save and restore a load 139 * reservation, so instead here we clear any existing reservation -- 140 * it's always legal for implementations to clear load reservations at 141 * any point (as long as the forward progress guarantee is kept, but 142 * we'll ignore that here). 143 * 144 * Dangling load reservations can be the result of taking a trap in the 145 * middle of an LR/SC sequence, but can also be the result of a taken 146 * forward branch around an SC -- which is how we implement CAS. As a 147 * result we need to clear reservations between the last CAS and the 148 * jump back to the new context. While it is unlikely the store 149 * completes, implementations are allowed to expand reservations to be 150 * arbitrarily large. 151 */ 152 REG_L a2, PT_EPC(sp) 153 REG_SC x0, a2, PT_EPC(sp) 154 155 csrw CSR_STATUS, a0 156 csrw CSR_EPC, a2 157 158 REG_L x1, PT_RA(sp) 159 REG_L x3, PT_GP(sp) 160 REG_L x4, PT_TP(sp) 161 REG_L x5, PT_T0(sp) 162 restore_from_x6_to_x31 163 164 REG_L x2, PT_SP(sp) 165 166#ifdef CONFIG_RISCV_M_MODE 167 mret 168#else 169 sret 170#endif 171SYM_CODE_END(ret_from_exception) 172 173#ifdef CONFIG_VMAP_STACK 174SYM_CODE_START_LOCAL(handle_kernel_stack_overflow) 175 /* we reach here from kernel context, sscratch must be 0 */ 176 csrrw x31, CSR_SCRATCH, x31 177 asm_per_cpu sp, overflow_stack, x31 178 li x31, OVERFLOW_STACK_SIZE 179 add sp, sp, x31 180 /* zero out x31 again and restore x31 */ 181 xor x31, x31, x31 182 csrrw x31, CSR_SCRATCH, x31 183 184 addi sp, sp, -(PT_SIZE_ON_STACK) 185 186 //save context to overflow stack 187 REG_S x1, PT_RA(sp) 188 REG_S x3, PT_GP(sp) 189 REG_S x5, PT_T0(sp) 190 save_from_x6_to_x31 191 192 REG_L s0, TASK_TI_KERNEL_SP(tp) 193 csrr s1, CSR_STATUS 194 csrr s2, CSR_EPC 195 csrr s3, CSR_TVAL 196 csrr s4, CSR_CAUSE 197 csrr s5, CSR_SCRATCH 198 REG_S s0, PT_SP(sp) 199 REG_S s1, PT_STATUS(sp) 200 REG_S s2, PT_EPC(sp) 201 REG_S s3, PT_BADADDR(sp) 202 REG_S s4, PT_CAUSE(sp) 203 REG_S s5, PT_TP(sp) 204 move a0, sp 205 tail handle_bad_stack 206SYM_CODE_END(handle_kernel_stack_overflow) 207#endif 208 209SYM_CODE_START(ret_from_fork) 210 call schedule_tail 211 beqz s0, 1f /* not from kernel thread */ 212 /* Call fn(arg) */ 213 move a0, s1 214 jalr s0 2151: 216 move a0, sp /* pt_regs */ 217 la ra, ret_from_exception 218 tail syscall_exit_to_user_mode 219SYM_CODE_END(ret_from_fork) 220 221/* 222 * Integer register context switch 223 * The callee-saved registers must be saved and restored. 224 * 225 * a0: previous task_struct (must be preserved across the switch) 226 * a1: next task_struct 227 * 228 * The value of a0 and a1 must be preserved by this function, as that's how 229 * arguments are passed to schedule_tail. 230 */ 231SYM_FUNC_START(__switch_to) 232 /* Save context into prev->thread */ 233 li a4, TASK_THREAD_RA 234 add a3, a0, a4 235 add a4, a1, a4 236 REG_S ra, TASK_THREAD_RA_RA(a3) 237 REG_S sp, TASK_THREAD_SP_RA(a3) 238 REG_S s0, TASK_THREAD_S0_RA(a3) 239 REG_S s1, TASK_THREAD_S1_RA(a3) 240 REG_S s2, TASK_THREAD_S2_RA(a3) 241 REG_S s3, TASK_THREAD_S3_RA(a3) 242 REG_S s4, TASK_THREAD_S4_RA(a3) 243 REG_S s5, TASK_THREAD_S5_RA(a3) 244 REG_S s6, TASK_THREAD_S6_RA(a3) 245 REG_S s7, TASK_THREAD_S7_RA(a3) 246 REG_S s8, TASK_THREAD_S8_RA(a3) 247 REG_S s9, TASK_THREAD_S9_RA(a3) 248 REG_S s10, TASK_THREAD_S10_RA(a3) 249 REG_S s11, TASK_THREAD_S11_RA(a3) 250 /* Restore context from next->thread */ 251 REG_L ra, TASK_THREAD_RA_RA(a4) 252 REG_L sp, TASK_THREAD_SP_RA(a4) 253 REG_L s0, TASK_THREAD_S0_RA(a4) 254 REG_L s1, TASK_THREAD_S1_RA(a4) 255 REG_L s2, TASK_THREAD_S2_RA(a4) 256 REG_L s3, TASK_THREAD_S3_RA(a4) 257 REG_L s4, TASK_THREAD_S4_RA(a4) 258 REG_L s5, TASK_THREAD_S5_RA(a4) 259 REG_L s6, TASK_THREAD_S6_RA(a4) 260 REG_L s7, TASK_THREAD_S7_RA(a4) 261 REG_L s8, TASK_THREAD_S8_RA(a4) 262 REG_L s9, TASK_THREAD_S9_RA(a4) 263 REG_L s10, TASK_THREAD_S10_RA(a4) 264 REG_L s11, TASK_THREAD_S11_RA(a4) 265 /* The offset of thread_info in task_struct is zero. */ 266 move tp, a1 267 ret 268SYM_FUNC_END(__switch_to) 269 270#ifndef CONFIG_MMU 271#define do_page_fault do_trap_unknown 272#endif 273 274 .section ".rodata" 275 .align LGREG 276 /* Exception vector table */ 277SYM_CODE_START(excp_vect_table) 278 RISCV_PTR do_trap_insn_misaligned 279 ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) 280 RISCV_PTR do_trap_insn_illegal 281 RISCV_PTR do_trap_break 282 RISCV_PTR do_trap_load_misaligned 283 RISCV_PTR do_trap_load_fault 284 RISCV_PTR do_trap_store_misaligned 285 RISCV_PTR do_trap_store_fault 286 RISCV_PTR do_trap_ecall_u /* system call */ 287 RISCV_PTR do_trap_ecall_s 288 RISCV_PTR do_trap_unknown 289 RISCV_PTR do_trap_ecall_m 290 /* instruciton page fault */ 291 ALT_PAGE_FAULT(RISCV_PTR do_page_fault) 292 RISCV_PTR do_page_fault /* load page fault */ 293 RISCV_PTR do_trap_unknown 294 RISCV_PTR do_page_fault /* store page fault */ 295excp_vect_table_end: 296SYM_CODE_END(excp_vect_table) 297 298#ifndef CONFIG_MMU 299SYM_CODE_START(__user_rt_sigreturn) 300 li a7, __NR_rt_sigreturn 301 ecall 302SYM_CODE_END(__user_rt_sigreturn) 303#endif 304