1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4 #include <linux/module.h> 5 #include <linux/version.h> 6 #include <linux/sched.h> 7 #include <linux/sched/task_stack.h> 8 #include <linux/sched/debug.h> 9 #include <linux/delay.h> 10 #include <linux/kallsyms.h> 11 #include <linux/uaccess.h> 12 #include <linux/ptrace.h> 13 14 #include <asm/elf.h> 15 #include <abi/reg_ops.h> 16 17 struct cpuinfo_csky cpu_data[NR_CPUS]; 18 19 asmlinkage void ret_from_fork(void); 20 asmlinkage void ret_from_kernel_thread(void); 21 22 /* 23 * Some archs flush debug and FPU info here 24 */ 25 void flush_thread(void){} 26 27 /* 28 * Return saved PC from a blocked thread 29 */ 30 unsigned long thread_saved_pc(struct task_struct *tsk) 31 { 32 struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp; 33 34 return sw->r15; 35 } 36 37 int copy_thread(unsigned long clone_flags, 38 unsigned long usp, 39 unsigned long kthread_arg, 40 struct task_struct *p) 41 { 42 struct switch_stack *childstack; 43 struct pt_regs *childregs = task_pt_regs(p); 44 45 #ifdef CONFIG_CPU_HAS_FPU 46 save_to_user_fp(&p->thread.user_fp); 47 #endif 48 49 childstack = ((struct switch_stack *) childregs) - 1; 50 memset(childstack, 0, sizeof(struct switch_stack)); 51 52 /* setup ksp for switch_to !!! */ 53 p->thread.ksp = (unsigned long)childstack; 54 55 if (unlikely(p->flags & PF_KTHREAD)) { 56 memset(childregs, 0, sizeof(struct pt_regs)); 57 childstack->r15 = (unsigned long) ret_from_kernel_thread; 58 childstack->r8 = kthread_arg; 59 childstack->r9 = usp; 60 childregs->sr = mfcr("psr"); 61 } else { 62 *childregs = *(current_pt_regs()); 63 if (usp) 64 childregs->usp = usp; 65 if (clone_flags & CLONE_SETTLS) 66 task_thread_info(p)->tp_value = childregs->tls 67 = childregs->regs[0]; 68 69 childregs->a0 = 0; 70 childstack->r15 = (unsigned long) ret_from_fork; 71 } 72 73 return 0; 74 } 75 76 /* Fill in the fpu structure for a core dump. */ 77 int dump_fpu(struct pt_regs *regs, struct user_fp *fpu) 78 { 79 memcpy(fpu, ¤t->thread.user_fp, sizeof(*fpu)); 80 return 1; 81 } 82 EXPORT_SYMBOL(dump_fpu); 83 84 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs) 85 { 86 struct pt_regs *regs = task_pt_regs(tsk); 87 88 /* NOTE: usp is error value. */ 89 ELF_CORE_COPY_REGS((*pr_regs), regs) 90 91 return 1; 92 } 93 94 unsigned long get_wchan(struct task_struct *p) 95 { 96 unsigned long esp, pc; 97 unsigned long stack_page; 98 int count = 0; 99 100 if (!p || p == current || p->state == TASK_RUNNING) 101 return 0; 102 103 stack_page = (unsigned long)p; 104 esp = p->thread.esp0; 105 do { 106 if (esp < stack_page+sizeof(struct task_struct) || 107 esp >= 8184+stack_page) 108 return 0; 109 /*FIXME: There's may be error here!*/ 110 pc = ((unsigned long *)esp)[1]; 111 /* FIXME: This depends on the order of these functions. */ 112 if (!in_sched_functions(pc)) 113 return pc; 114 esp = *(unsigned long *) esp; 115 } while (count++ < 16); 116 return 0; 117 } 118 EXPORT_SYMBOL(get_wchan); 119 120 #ifndef CONFIG_CPU_PM_NONE 121 void arch_cpu_idle(void) 122 { 123 #ifdef CONFIG_CPU_PM_WAIT 124 asm volatile("wait\n"); 125 #endif 126 127 #ifdef CONFIG_CPU_PM_DOZE 128 asm volatile("doze\n"); 129 #endif 130 131 #ifdef CONFIG_CPU_PM_STOP 132 asm volatile("stop\n"); 133 #endif 134 local_irq_enable(); 135 } 136 #endif 137