1 /* 2 * linux/arch/m68k/kernel/process.c 3 * 4 * Copyright (C) 1995 Hamish Macdonald 5 * 6 * 68060 fixes by Jesper Skov 7 */ 8 9 /* 10 * This file handles the architecture-dependent parts of process handling.. 11 */ 12 13 #include <linux/errno.h> 14 #include <linux/module.h> 15 #include <linux/sched.h> 16 #include <linux/kernel.h> 17 #include <linux/mm.h> 18 #include <linux/slab.h> 19 #include <linux/fs.h> 20 #include <linux/smp.h> 21 #include <linux/stddef.h> 22 #include <linux/unistd.h> 23 #include <linux/ptrace.h> 24 #include <linux/user.h> 25 #include <linux/reboot.h> 26 #include <linux/init_task.h> 27 #include <linux/mqueue.h> 28 #include <linux/rcupdate.h> 29 30 #include <asm/uaccess.h> 31 #include <asm/traps.h> 32 #include <asm/machdep.h> 33 #include <asm/setup.h> 34 #include <asm/pgtable.h> 35 36 37 asmlinkage void ret_from_fork(void); 38 asmlinkage void ret_from_kernel_thread(void); 39 40 41 /* 42 * Return saved PC from a blocked thread 43 */ 44 unsigned long thread_saved_pc(struct task_struct *tsk) 45 { 46 struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp; 47 /* Check whether the thread is blocked in resume() */ 48 if (in_sched_functions(sw->retpc)) 49 return ((unsigned long *)sw->a6)[1]; 50 else 51 return sw->retpc; 52 } 53 54 /* 55 * The idle loop on an m68k.. 56 */ 57 static void default_idle(void) 58 { 59 if (!need_resched()) 60 #if defined(MACH_ATARI_ONLY) 61 /* block out HSYNC on the atari (falcon) */ 62 __asm__("stop #0x2200" : : : "cc"); 63 #else 64 __asm__("stop #0x2000" : : : "cc"); 65 #endif 66 } 67 68 void (*idle)(void) = default_idle; 69 70 /* 71 * The idle thread. There's no useful work to be 72 * done, so just try to conserve power and have a 73 * low exit latency (ie sit in a loop waiting for 74 * somebody to say that they'd like to reschedule) 75 */ 76 void cpu_idle(void) 77 { 78 /* endless idle loop with no priority at all */ 79 while (1) { 80 rcu_idle_enter(); 81 while (!need_resched()) 82 idle(); 83 rcu_idle_exit(); 84 schedule_preempt_disabled(); 85 } 86 } 87 88 void machine_restart(char * __unused) 89 { 90 if (mach_reset) 91 mach_reset(); 92 for (;;); 93 } 94 95 void machine_halt(void) 96 { 97 if (mach_halt) 98 mach_halt(); 99 for (;;); 100 } 101 102 void machine_power_off(void) 103 { 104 if (mach_power_off) 105 mach_power_off(); 106 for (;;); 107 } 108 109 void (*pm_power_off)(void) = machine_power_off; 110 EXPORT_SYMBOL(pm_power_off); 111 112 void show_regs(struct pt_regs * regs) 113 { 114 printk("\n"); 115 printk("Format %02x Vector: %04x PC: %08lx Status: %04x %s\n", 116 regs->format, regs->vector, regs->pc, regs->sr, print_tainted()); 117 printk("ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n", 118 regs->orig_d0, regs->d0, regs->a2, regs->a1); 119 printk("A0: %08lx D5: %08lx D4: %08lx\n", 120 regs->a0, regs->d5, regs->d4); 121 printk("D3: %08lx D2: %08lx D1: %08lx\n", 122 regs->d3, regs->d2, regs->d1); 123 if (!(regs->sr & PS_S)) 124 printk("USP: %08lx\n", rdusp()); 125 } 126 127 void flush_thread(void) 128 { 129 current->thread.fs = __USER_DS; 130 #ifdef CONFIG_FPU 131 if (!FPU_IS_EMU) { 132 unsigned long zero = 0; 133 asm volatile("frestore %0": :"m" (zero)); 134 } 135 #endif 136 } 137 138 /* 139 * Why not generic sys_clone, you ask? m68k passes all arguments on stack. 140 * And we need all registers saved, which means a bunch of stuff pushed 141 * on top of pt_regs, which means that sys_clone() arguments would be 142 * buried. We could, of course, copy them, but it's too costly for no 143 * good reason - generic clone() would have to copy them *again* for 144 * do_fork() anyway. So in this case it's actually better to pass pt_regs * 145 * and extract arguments for do_fork() from there. Eventually we might 146 * go for calling do_fork() directly from the wrapper, but only after we 147 * are finished with do_fork() prototype conversion. 148 */ 149 asmlinkage int m68k_clone(struct pt_regs *regs) 150 { 151 /* regs will be equal to current_pt_regs() */ 152 return do_fork(regs->d1, regs->d2, 0, 153 (int __user *)regs->d3, (int __user *)regs->d4); 154 } 155 156 int copy_thread(unsigned long clone_flags, unsigned long usp, 157 unsigned long arg, struct task_struct *p) 158 { 159 struct fork_frame { 160 struct switch_stack sw; 161 struct pt_regs regs; 162 } *frame; 163 164 frame = (struct fork_frame *) (task_stack_page(p) + THREAD_SIZE) - 1; 165 166 p->thread.ksp = (unsigned long)frame; 167 p->thread.esp0 = (unsigned long)&frame->regs; 168 169 /* 170 * Must save the current SFC/DFC value, NOT the value when 171 * the parent was last descheduled - RGH 10-08-96 172 */ 173 p->thread.fs = get_fs().seg; 174 175 if (unlikely(p->flags & PF_KTHREAD)) { 176 /* kernel thread */ 177 memset(frame, 0, sizeof(struct fork_frame)); 178 frame->regs.sr = PS_S; 179 frame->sw.a3 = usp; /* function */ 180 frame->sw.d7 = arg; 181 frame->sw.retpc = (unsigned long)ret_from_kernel_thread; 182 p->thread.usp = 0; 183 return 0; 184 } 185 memcpy(frame, container_of(current_pt_regs(), struct fork_frame, regs), 186 sizeof(struct fork_frame)); 187 frame->regs.d0 = 0; 188 frame->sw.retpc = (unsigned long)ret_from_fork; 189 p->thread.usp = usp ?: rdusp(); 190 191 if (clone_flags & CLONE_SETTLS) 192 task_thread_info(p)->tp_value = frame->regs.d5; 193 194 #ifdef CONFIG_FPU 195 if (!FPU_IS_EMU) { 196 /* Copy the current fpu state */ 197 asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory"); 198 199 if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) { 200 if (CPU_IS_COLDFIRE) { 201 asm volatile ("fmovemd %/fp0-%/fp7,%0\n\t" 202 "fmovel %/fpiar,%1\n\t" 203 "fmovel %/fpcr,%2\n\t" 204 "fmovel %/fpsr,%3" 205 : 206 : "m" (p->thread.fp[0]), 207 "m" (p->thread.fpcntl[0]), 208 "m" (p->thread.fpcntl[1]), 209 "m" (p->thread.fpcntl[2]) 210 : "memory"); 211 } else { 212 asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t" 213 "fmoveml %/fpiar/%/fpcr/%/fpsr,%1" 214 : 215 : "m" (p->thread.fp[0]), 216 "m" (p->thread.fpcntl[0]) 217 : "memory"); 218 } 219 } 220 221 /* Restore the state in case the fpu was busy */ 222 asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0])); 223 } 224 #endif /* CONFIG_FPU */ 225 226 return 0; 227 } 228 229 /* Fill in the fpu structure for a core dump. */ 230 #ifdef CONFIG_FPU 231 int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu) 232 { 233 char fpustate[216]; 234 235 if (FPU_IS_EMU) { 236 int i; 237 238 memcpy(fpu->fpcntl, current->thread.fpcntl, 12); 239 memcpy(fpu->fpregs, current->thread.fp, 96); 240 /* Convert internal fpu reg representation 241 * into long double format 242 */ 243 for (i = 0; i < 24; i += 3) 244 fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) | 245 ((fpu->fpregs[i] & 0x0000ffff) << 16); 246 return 1; 247 } 248 249 /* First dump the fpu context to avoid protocol violation. */ 250 asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory"); 251 if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2]) 252 return 0; 253 254 if (CPU_IS_COLDFIRE) { 255 asm volatile ("fmovel %/fpiar,%0\n\t" 256 "fmovel %/fpcr,%1\n\t" 257 "fmovel %/fpsr,%2\n\t" 258 "fmovemd %/fp0-%/fp7,%3" 259 : 260 : "m" (fpu->fpcntl[0]), 261 "m" (fpu->fpcntl[1]), 262 "m" (fpu->fpcntl[2]), 263 "m" (fpu->fpregs[0]) 264 : "memory"); 265 } else { 266 asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0" 267 : 268 : "m" (fpu->fpcntl[0]) 269 : "memory"); 270 asm volatile ("fmovemx %/fp0-%/fp7,%0" 271 : 272 : "m" (fpu->fpregs[0]) 273 : "memory"); 274 } 275 276 return 1; 277 } 278 EXPORT_SYMBOL(dump_fpu); 279 #endif /* CONFIG_FPU */ 280 281 unsigned long get_wchan(struct task_struct *p) 282 { 283 unsigned long fp, pc; 284 unsigned long stack_page; 285 int count = 0; 286 if (!p || p == current || p->state == TASK_RUNNING) 287 return 0; 288 289 stack_page = (unsigned long)task_stack_page(p); 290 fp = ((struct switch_stack *)p->thread.ksp)->a6; 291 do { 292 if (fp < stack_page+sizeof(struct thread_info) || 293 fp >= 8184+stack_page) 294 return 0; 295 pc = ((unsigned long *)fp)[1]; 296 if (!in_sched_functions(pc)) 297 return pc; 298 fp = *(unsigned long *) fp; 299 } while (count++ < 16); 300 return 0; 301 } 302