1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others. 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2004 Thiemo Seufer 9 */ 10 #include <linux/config.h> 11 #include <linux/errno.h> 12 #include <linux/module.h> 13 #include <linux/sched.h> 14 #include <linux/kernel.h> 15 #include <linux/mm.h> 16 #include <linux/stddef.h> 17 #include <linux/unistd.h> 18 #include <linux/ptrace.h> 19 #include <linux/slab.h> 20 #include <linux/mman.h> 21 #include <linux/personality.h> 22 #include <linux/sys.h> 23 #include <linux/user.h> 24 #include <linux/a.out.h> 25 #include <linux/init.h> 26 #include <linux/completion.h> 27 28 #include <asm/bootinfo.h> 29 #include <asm/cpu.h> 30 #include <asm/fpu.h> 31 #include <asm/pgtable.h> 32 #include <asm/system.h> 33 #include <asm/mipsregs.h> 34 #include <asm/processor.h> 35 #include <asm/uaccess.h> 36 #include <asm/io.h> 37 #include <asm/elf.h> 38 #include <asm/isadep.h> 39 #include <asm/inst.h> 40 41 /* 42 * We use this if we don't have any better idle routine.. 43 * (This to kill: kernel/platform.c. 44 */ 45 void default_idle (void) 46 { 47 } 48 49 /* 50 * The idle thread. There's no useful work to be done, so just try to conserve 51 * power and have a low exit latency (ie sit in a loop waiting for somebody to 52 * say that they'd like to reschedule) 53 */ 54 ATTRIB_NORET void cpu_idle(void) 55 { 56 /* endless idle loop with no priority at all */ 57 while (1) { 58 while (!need_resched()) 59 if (cpu_wait) 60 (*cpu_wait)(); 61 schedule(); 62 } 63 } 64 65 asmlinkage void ret_from_fork(void); 66 67 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) 68 { 69 unsigned long status; 70 71 /* New thread loses kernel privileges. */ 72 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|KU_MASK); 73 #ifdef CONFIG_MIPS64 74 status &= ~ST0_FR; 75 status |= (current->thread.mflags & MF_32BIT_REGS) ? 0 : ST0_FR; 76 #endif 77 status |= KU_USER; 78 regs->cp0_status = status; 79 clear_used_math(); 80 lose_fpu(); 81 regs->cp0_epc = pc; 82 regs->regs[29] = sp; 83 current_thread_info()->addr_limit = USER_DS; 84 } 85 86 void exit_thread(void) 87 { 88 } 89 90 void flush_thread(void) 91 { 92 } 93 94 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 95 unsigned long unused, struct task_struct *p, struct pt_regs *regs) 96 { 97 struct thread_info *ti = p->thread_info; 98 struct pt_regs *childregs; 99 long childksp; 100 101 childksp = (unsigned long)ti + THREAD_SIZE - 32; 102 103 preempt_disable(); 104 105 if (is_fpu_owner()) { 106 save_fp(p); 107 } 108 109 preempt_enable(); 110 111 /* set up new TSS. */ 112 childregs = (struct pt_regs *) childksp - 1; 113 *childregs = *regs; 114 childregs->regs[7] = 0; /* Clear error flag */ 115 116 #if defined(CONFIG_BINFMT_IRIX) 117 if (current->personality != PER_LINUX) { 118 /* Under IRIX things are a little different. */ 119 childregs->regs[3] = 1; 120 regs->regs[3] = 0; 121 } 122 #endif 123 childregs->regs[2] = 0; /* Child gets zero as return value */ 124 regs->regs[2] = p->pid; 125 126 if (childregs->cp0_status & ST0_CU0) { 127 childregs->regs[28] = (unsigned long) ti; 128 childregs->regs[29] = childksp; 129 ti->addr_limit = KERNEL_DS; 130 } else { 131 childregs->regs[29] = usp; 132 ti->addr_limit = USER_DS; 133 } 134 p->thread.reg29 = (unsigned long) childregs; 135 p->thread.reg31 = (unsigned long) ret_from_fork; 136 137 /* 138 * New tasks lose permission to use the fpu. This accelerates context 139 * switching for most programs since they don't use the fpu. 140 */ 141 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); 142 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); 143 clear_tsk_thread_flag(p, TIF_USEDFPU); 144 145 return 0; 146 } 147 148 /* Fill in the fpu structure for a core dump.. */ 149 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) 150 { 151 memcpy(r, ¤t->thread.fpu, sizeof(current->thread.fpu)); 152 153 return 1; 154 } 155 156 void dump_regs(elf_greg_t *gp, struct pt_regs *regs) 157 { 158 int i; 159 160 for (i = 0; i < EF_R0; i++) 161 gp[i] = 0; 162 gp[EF_R0] = 0; 163 for (i = 1; i <= 31; i++) 164 gp[EF_R0 + i] = regs->regs[i]; 165 gp[EF_R26] = 0; 166 gp[EF_R27] = 0; 167 gp[EF_LO] = regs->lo; 168 gp[EF_HI] = regs->hi; 169 gp[EF_CP0_EPC] = regs->cp0_epc; 170 gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr; 171 gp[EF_CP0_STATUS] = regs->cp0_status; 172 gp[EF_CP0_CAUSE] = regs->cp0_cause; 173 #ifdef EF_UNUSED0 174 gp[EF_UNUSED0] = 0; 175 #endif 176 } 177 178 int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr) 179 { 180 memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu)); 181 182 return 1; 183 } 184 185 /* 186 * Create a kernel thread 187 */ 188 ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *)) 189 { 190 do_exit(fn(arg)); 191 } 192 193 long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 194 { 195 struct pt_regs regs; 196 197 memset(®s, 0, sizeof(regs)); 198 199 regs.regs[4] = (unsigned long) arg; 200 regs.regs[5] = (unsigned long) fn; 201 regs.cp0_epc = (unsigned long) kernel_thread_helper; 202 regs.cp0_status = read_c0_status(); 203 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 204 regs.cp0_status &= ~(ST0_KUP | ST0_IEC); 205 regs.cp0_status |= ST0_IEP; 206 #else 207 regs.cp0_status |= ST0_EXL; 208 #endif 209 210 /* Ok, create the new process.. */ 211 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); 212 } 213 214 struct mips_frame_info { 215 int frame_offset; 216 int pc_offset; 217 }; 218 static struct mips_frame_info schedule_frame; 219 static struct mips_frame_info schedule_timeout_frame; 220 static struct mips_frame_info sleep_on_frame; 221 static struct mips_frame_info sleep_on_timeout_frame; 222 static struct mips_frame_info wait_for_completion_frame; 223 static int mips_frame_info_initialized; 224 static int __init get_frame_info(struct mips_frame_info *info, void *func) 225 { 226 int i; 227 union mips_instruction *ip = (union mips_instruction *)func; 228 info->pc_offset = -1; 229 info->frame_offset = -1; 230 for (i = 0; i < 128; i++, ip++) { 231 /* if jal, jalr, jr, stop. */ 232 if (ip->j_format.opcode == jal_op || 233 (ip->r_format.opcode == spec_op && 234 (ip->r_format.func == jalr_op || 235 ip->r_format.func == jr_op))) 236 break; 237 238 if ( 239 #ifdef CONFIG_MIPS32 240 ip->i_format.opcode == sw_op && 241 #endif 242 #ifdef CONFIG_MIPS64 243 ip->i_format.opcode == sd_op && 244 #endif 245 ip->i_format.rs == 29) 246 { 247 /* sw / sd $ra, offset($sp) */ 248 if (ip->i_format.rt == 31) { 249 if (info->pc_offset != -1) 250 break; 251 info->pc_offset = 252 ip->i_format.simmediate / sizeof(long); 253 } 254 /* sw / sd $s8, offset($sp) */ 255 if (ip->i_format.rt == 30) { 256 if (info->frame_offset != -1) 257 break; 258 info->frame_offset = 259 ip->i_format.simmediate / sizeof(long); 260 } 261 } 262 } 263 if (info->pc_offset == -1 || info->frame_offset == -1) { 264 printk("Can't analyze prologue code at %p\n", func); 265 info->pc_offset = -1; 266 info->frame_offset = -1; 267 return -1; 268 } 269 270 return 0; 271 } 272 273 static int __init frame_info_init(void) 274 { 275 mips_frame_info_initialized = 276 !get_frame_info(&schedule_frame, schedule) && 277 !get_frame_info(&schedule_timeout_frame, schedule_timeout) && 278 !get_frame_info(&sleep_on_frame, sleep_on) && 279 !get_frame_info(&sleep_on_timeout_frame, sleep_on_timeout) && 280 !get_frame_info(&wait_for_completion_frame, wait_for_completion); 281 282 return 0; 283 } 284 285 arch_initcall(frame_info_init); 286 287 /* 288 * Return saved PC of a blocked thread. 289 */ 290 unsigned long thread_saved_pc(struct task_struct *tsk) 291 { 292 struct thread_struct *t = &tsk->thread; 293 294 /* New born processes are a special case */ 295 if (t->reg31 == (unsigned long) ret_from_fork) 296 return t->reg31; 297 298 if (schedule_frame.pc_offset < 0) 299 return 0; 300 return ((unsigned long *)t->reg29)[schedule_frame.pc_offset]; 301 } 302 303 /* get_wchan - a maintenance nightmare^W^Wpain in the ass ... */ 304 unsigned long get_wchan(struct task_struct *p) 305 { 306 unsigned long frame, pc; 307 308 if (!p || p == current || p->state == TASK_RUNNING) 309 return 0; 310 311 if (!mips_frame_info_initialized) 312 return 0; 313 pc = thread_saved_pc(p); 314 if (!in_sched_functions(pc)) 315 goto out; 316 317 if (pc >= (unsigned long) sleep_on_timeout) 318 goto schedule_timeout_caller; 319 if (pc >= (unsigned long) sleep_on) 320 goto schedule_caller; 321 if (pc >= (unsigned long) interruptible_sleep_on_timeout) 322 goto schedule_timeout_caller; 323 if (pc >= (unsigned long)interruptible_sleep_on) 324 goto schedule_caller; 325 if (pc >= (unsigned long)wait_for_completion) 326 goto schedule_caller; 327 goto schedule_timeout_caller; 328 329 schedule_caller: 330 frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset]; 331 if (pc >= (unsigned long) sleep_on) 332 pc = ((unsigned long *)frame)[sleep_on_frame.pc_offset]; 333 else 334 pc = ((unsigned long *)frame)[wait_for_completion_frame.pc_offset]; 335 goto out; 336 337 schedule_timeout_caller: 338 /* 339 * The schedule_timeout frame 340 */ 341 frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset]; 342 343 /* 344 * frame now points to sleep_on_timeout's frame 345 */ 346 pc = ((unsigned long *)frame)[schedule_timeout_frame.pc_offset]; 347 348 if (in_sched_functions(pc)) { 349 /* schedule_timeout called by [interruptible_]sleep_on_timeout */ 350 frame = ((unsigned long *)frame)[schedule_timeout_frame.frame_offset]; 351 pc = ((unsigned long *)frame)[sleep_on_timeout_frame.pc_offset]; 352 } 353 354 out: 355 356 #ifdef CONFIG_MIPS64 357 if (current->thread.mflags & MF_32BIT_REGS) /* Kludge for 32-bit ps */ 358 pc &= 0xffffffffUL; 359 #endif 360 361 return pc; 362 } 363 364 EXPORT_SYMBOL(get_wchan); 365