1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 4 * Chen Liqin <liqin.chen@sunplusct.com> 5 * Lennox Wu <lennox.wu@sunplusct.com> 6 * Copyright (C) 2012 Regents of the University of California 7 * Copyright (C) 2017 SiFive 8 */ 9 10 #include <linux/cpu.h> 11 #include <linux/kernel.h> 12 #include <linux/sched.h> 13 #include <linux/sched/debug.h> 14 #include <linux/sched/task_stack.h> 15 #include <linux/tick.h> 16 #include <linux/ptrace.h> 17 #include <linux/uaccess.h> 18 19 #include <asm/unistd.h> 20 #include <asm/processor.h> 21 #include <asm/csr.h> 22 #include <asm/stacktrace.h> 23 #include <asm/string.h> 24 #include <asm/switch_to.h> 25 #include <asm/thread_info.h> 26 #include <asm/cpuidle.h> 27 #include <asm/vector.h> 28 29 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) 30 #include <linux/stackprotector.h> 31 unsigned long __stack_chk_guard __read_mostly; 32 EXPORT_SYMBOL(__stack_chk_guard); 33 #endif 34 35 extern asmlinkage void ret_from_fork(void); 36 37 void arch_cpu_idle(void) 38 { 39 cpu_do_idle(); 40 } 41 42 void __show_regs(struct pt_regs *regs) 43 { 44 show_regs_print_info(KERN_DEFAULT); 45 46 if (!user_mode(regs)) { 47 pr_cont("epc : %pS\n", (void *)regs->epc); 48 pr_cont(" ra : %pS\n", (void *)regs->ra); 49 } 50 51 pr_cont("epc : " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n", 52 regs->epc, regs->ra, regs->sp); 53 pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n", 54 regs->gp, regs->tp, regs->t0); 55 pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n", 56 regs->t1, regs->t2, regs->s0); 57 pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n", 58 regs->s1, regs->a0, regs->a1); 59 pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n", 60 regs->a2, regs->a3, regs->a4); 61 pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n", 62 regs->a5, regs->a6, regs->a7); 63 pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n", 64 regs->s2, regs->s3, regs->s4); 65 pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n", 66 regs->s5, regs->s6, regs->s7); 67 pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n", 68 regs->s8, regs->s9, regs->s10); 69 pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n", 70 regs->s11, regs->t3, regs->t4); 71 pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n", 72 regs->t5, regs->t6); 73 74 pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n", 75 regs->status, regs->badaddr, regs->cause); 76 } 77 void show_regs(struct pt_regs *regs) 78 { 79 __show_regs(regs); 80 if (!user_mode(regs)) 81 dump_backtrace(regs, NULL, KERN_DEFAULT); 82 } 83 84 #ifdef CONFIG_COMPAT 85 static bool compat_mode_supported __read_mostly; 86 87 bool compat_elf_check_arch(Elf32_Ehdr *hdr) 88 { 89 return compat_mode_supported && 90 hdr->e_machine == EM_RISCV && 91 hdr->e_ident[EI_CLASS] == ELFCLASS32; 92 } 93 94 static int __init compat_mode_detect(void) 95 { 96 unsigned long tmp = csr_read(CSR_STATUS); 97 98 csr_write(CSR_STATUS, (tmp & ~SR_UXL) | SR_UXL_32); 99 compat_mode_supported = 100 (csr_read(CSR_STATUS) & SR_UXL) == SR_UXL_32; 101 102 csr_write(CSR_STATUS, tmp); 103 104 pr_info("riscv: ELF compat mode %s", 105 compat_mode_supported ? "supported" : "unsupported"); 106 107 return 0; 108 } 109 early_initcall(compat_mode_detect); 110 #endif 111 112 void start_thread(struct pt_regs *regs, unsigned long pc, 113 unsigned long sp) 114 { 115 regs->status = SR_PIE; 116 if (has_fpu()) { 117 regs->status |= SR_FS_INITIAL; 118 /* 119 * Restore the initial value to the FP register 120 * before starting the user program. 121 */ 122 fstate_restore(current, regs); 123 } 124 regs->epc = pc; 125 regs->sp = sp; 126 127 #ifdef CONFIG_64BIT 128 regs->status &= ~SR_UXL; 129 130 if (is_compat_task()) 131 regs->status |= SR_UXL_32; 132 else 133 regs->status |= SR_UXL_64; 134 #endif 135 } 136 137 void flush_thread(void) 138 { 139 #ifdef CONFIG_FPU 140 /* 141 * Reset FPU state and context 142 * frm: round to nearest, ties to even (IEEE default) 143 * fflags: accrued exceptions cleared 144 */ 145 fstate_off(current, task_pt_regs(current)); 146 memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate)); 147 #endif 148 #ifdef CONFIG_RISCV_ISA_V 149 /* Reset vector state */ 150 riscv_v_vstate_ctrl_init(current); 151 riscv_v_vstate_off(task_pt_regs(current)); 152 kfree(current->thread.vstate.datap); 153 memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state)); 154 #endif 155 } 156 157 void arch_release_task_struct(struct task_struct *tsk) 158 { 159 /* Free the vector context of datap. */ 160 if (has_vector()) 161 kfree(tsk->thread.vstate.datap); 162 } 163 164 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 165 { 166 fstate_save(src, task_pt_regs(src)); 167 *dst = *src; 168 /* clear entire V context, including datap for a new task */ 169 memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state)); 170 171 return 0; 172 } 173 174 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) 175 { 176 unsigned long clone_flags = args->flags; 177 unsigned long usp = args->stack; 178 unsigned long tls = args->tls; 179 struct pt_regs *childregs = task_pt_regs(p); 180 181 memset(&p->thread.s, 0, sizeof(p->thread.s)); 182 183 /* p->thread holds context to be restored by __switch_to() */ 184 if (unlikely(args->fn)) { 185 /* Kernel thread */ 186 memset(childregs, 0, sizeof(struct pt_regs)); 187 /* Supervisor/Machine, irqs on: */ 188 childregs->status = SR_PP | SR_PIE; 189 190 p->thread.s[0] = (unsigned long)args->fn; 191 p->thread.s[1] = (unsigned long)args->fn_arg; 192 } else { 193 *childregs = *(current_pt_regs()); 194 /* Turn off status.VS */ 195 riscv_v_vstate_off(childregs); 196 if (usp) /* User fork */ 197 childregs->sp = usp; 198 if (clone_flags & CLONE_SETTLS) 199 childregs->tp = tls; 200 childregs->a0 = 0; /* Return value of fork() */ 201 p->thread.s[0] = 0; 202 } 203 p->thread.ra = (unsigned long)ret_from_fork; 204 p->thread.sp = (unsigned long)childregs; /* kernel sp */ 205 return 0; 206 } 207