1 /* arch/sparc64/kernel/process.c 2 * 3 * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net) 4 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 5 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 */ 7 8 /* 9 * This file handles the architecture-dependent parts of process handling.. 10 */ 11 12 #include <stdarg.h> 13 14 #include <linux/errno.h> 15 #include <linux/module.h> 16 #include <linux/sched.h> 17 #include <linux/kernel.h> 18 #include <linux/mm.h> 19 #include <linux/fs.h> 20 #include <linux/smp.h> 21 #include <linux/stddef.h> 22 #include <linux/ptrace.h> 23 #include <linux/slab.h> 24 #include <linux/user.h> 25 #include <linux/delay.h> 26 #include <linux/compat.h> 27 #include <linux/tick.h> 28 #include <linux/init.h> 29 #include <linux/cpu.h> 30 #include <linux/elfcore.h> 31 #include <linux/sysrq.h> 32 #include <linux/nmi.h> 33 34 #include <asm/uaccess.h> 35 #include <asm/system.h> 36 #include <asm/page.h> 37 #include <asm/pgalloc.h> 38 #include <asm/pgtable.h> 39 #include <asm/processor.h> 40 #include <asm/pstate.h> 41 #include <asm/elf.h> 42 #include <asm/fpumacro.h> 43 #include <asm/head.h> 44 #include <asm/cpudata.h> 45 #include <asm/mmu_context.h> 46 #include <asm/unistd.h> 47 #include <asm/hypervisor.h> 48 #include <asm/syscalls.h> 49 #include <asm/irq_regs.h> 50 #include <asm/smp.h> 51 52 #include "kstack.h" 53 54 static void sparc64_yield(int cpu) 55 { 56 if (tlb_type != hypervisor) { 57 touch_nmi_watchdog(); 58 return; 59 } 60 61 clear_thread_flag(TIF_POLLING_NRFLAG); 62 smp_mb__after_clear_bit(); 63 64 while (!need_resched() && !cpu_is_offline(cpu)) { 65 unsigned long pstate; 66 67 /* Disable interrupts. */ 68 __asm__ __volatile__( 69 "rdpr %%pstate, %0\n\t" 70 "andn %0, %1, %0\n\t" 71 "wrpr %0, %%g0, %%pstate" 72 : "=&r" (pstate) 73 : "i" (PSTATE_IE)); 74 75 if (!need_resched() && !cpu_is_offline(cpu)) 76 sun4v_cpu_yield(); 77 78 /* Re-enable interrupts. */ 79 __asm__ __volatile__( 80 "rdpr %%pstate, %0\n\t" 81 "or %0, %1, %0\n\t" 82 "wrpr %0, %%g0, %%pstate" 83 : "=&r" (pstate) 84 : "i" (PSTATE_IE)); 85 } 86 87 set_thread_flag(TIF_POLLING_NRFLAG); 88 } 89 90 /* The idle loop on sparc64. */ 91 void cpu_idle(void) 92 { 93 int cpu = smp_processor_id(); 94 95 set_thread_flag(TIF_POLLING_NRFLAG); 96 97 while(1) { 98 tick_nohz_stop_sched_tick(1); 99 100 while (!need_resched() && !cpu_is_offline(cpu)) 101 sparc64_yield(cpu); 102 103 tick_nohz_restart_sched_tick(); 104 105 preempt_enable_no_resched(); 106 107 #ifdef CONFIG_HOTPLUG_CPU 108 if (cpu_is_offline(cpu)) 109 cpu_play_dead(); 110 #endif 111 112 schedule(); 113 preempt_disable(); 114 } 115 } 116 117 #ifdef CONFIG_COMPAT 118 static void show_regwindow32(struct pt_regs *regs) 119 { 120 struct reg_window32 __user *rw; 121 struct reg_window32 r_w; 122 mm_segment_t old_fs; 123 124 __asm__ __volatile__ ("flushw"); 125 rw = compat_ptr((unsigned)regs->u_regs[14]); 126 old_fs = get_fs(); 127 set_fs (USER_DS); 128 if (copy_from_user (&r_w, rw, sizeof(r_w))) { 129 set_fs (old_fs); 130 return; 131 } 132 133 set_fs (old_fs); 134 printk("l0: %08x l1: %08x l2: %08x l3: %08x " 135 "l4: %08x l5: %08x l6: %08x l7: %08x\n", 136 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3], 137 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]); 138 printk("i0: %08x i1: %08x i2: %08x i3: %08x " 139 "i4: %08x i5: %08x i6: %08x i7: %08x\n", 140 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3], 141 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]); 142 } 143 #else 144 #define show_regwindow32(regs) do { } while (0) 145 #endif 146 147 static void show_regwindow(struct pt_regs *regs) 148 { 149 struct reg_window __user *rw; 150 struct reg_window *rwk; 151 struct reg_window r_w; 152 mm_segment_t old_fs; 153 154 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) { 155 __asm__ __volatile__ ("flushw"); 156 rw = (struct reg_window __user *) 157 (regs->u_regs[14] + STACK_BIAS); 158 rwk = (struct reg_window *) 159 (regs->u_regs[14] + STACK_BIAS); 160 if (!(regs->tstate & TSTATE_PRIV)) { 161 old_fs = get_fs(); 162 set_fs (USER_DS); 163 if (copy_from_user (&r_w, rw, sizeof(r_w))) { 164 set_fs (old_fs); 165 return; 166 } 167 rwk = &r_w; 168 set_fs (old_fs); 169 } 170 } else { 171 show_regwindow32(regs); 172 return; 173 } 174 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n", 175 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]); 176 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n", 177 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]); 178 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n", 179 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]); 180 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", 181 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); 182 if (regs->tstate & TSTATE_PRIV) 183 printk("I7: <%pS>\n", (void *) rwk->ins[7]); 184 } 185 186 void show_regs(struct pt_regs *regs) 187 { 188 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, 189 regs->tpc, regs->tnpc, regs->y, print_tainted()); 190 printk("TPC: <%pS>\n", (void *) regs->tpc); 191 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", 192 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], 193 regs->u_regs[3]); 194 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n", 195 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6], 196 regs->u_regs[7]); 197 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n", 198 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10], 199 regs->u_regs[11]); 200 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", 201 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], 202 regs->u_regs[15]); 203 printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); 204 show_regwindow(regs); 205 } 206 207 struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; 208 static DEFINE_SPINLOCK(global_reg_snapshot_lock); 209 210 static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, 211 int this_cpu) 212 { 213 flushw_all(); 214 215 global_reg_snapshot[this_cpu].tstate = regs->tstate; 216 global_reg_snapshot[this_cpu].tpc = regs->tpc; 217 global_reg_snapshot[this_cpu].tnpc = regs->tnpc; 218 global_reg_snapshot[this_cpu].o7 = regs->u_regs[UREG_I7]; 219 220 if (regs->tstate & TSTATE_PRIV) { 221 struct reg_window *rw; 222 223 rw = (struct reg_window *) 224 (regs->u_regs[UREG_FP] + STACK_BIAS); 225 if (kstack_valid(tp, (unsigned long) rw)) { 226 global_reg_snapshot[this_cpu].i7 = rw->ins[7]; 227 rw = (struct reg_window *) 228 (rw->ins[6] + STACK_BIAS); 229 if (kstack_valid(tp, (unsigned long) rw)) 230 global_reg_snapshot[this_cpu].rpc = rw->ins[7]; 231 } 232 } else { 233 global_reg_snapshot[this_cpu].i7 = 0; 234 global_reg_snapshot[this_cpu].rpc = 0; 235 } 236 global_reg_snapshot[this_cpu].thread = tp; 237 } 238 239 /* In order to avoid hangs we do not try to synchronize with the 240 * global register dump client cpus. The last store they make is to 241 * the thread pointer, so do a short poll waiting for that to become 242 * non-NULL. 243 */ 244 static void __global_reg_poll(struct global_reg_snapshot *gp) 245 { 246 int limit = 0; 247 248 while (!gp->thread && ++limit < 100) { 249 barrier(); 250 udelay(1); 251 } 252 } 253 254 void arch_trigger_all_cpu_backtrace(void) 255 { 256 struct thread_info *tp = current_thread_info(); 257 struct pt_regs *regs = get_irq_regs(); 258 unsigned long flags; 259 int this_cpu, cpu; 260 261 if (!regs) 262 regs = tp->kregs; 263 264 spin_lock_irqsave(&global_reg_snapshot_lock, flags); 265 266 memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); 267 268 this_cpu = raw_smp_processor_id(); 269 270 __global_reg_self(tp, regs, this_cpu); 271 272 smp_fetch_global_regs(); 273 274 for_each_online_cpu(cpu) { 275 struct global_reg_snapshot *gp = &global_reg_snapshot[cpu]; 276 277 __global_reg_poll(gp); 278 279 tp = gp->thread; 280 printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n", 281 (cpu == this_cpu ? '*' : ' '), cpu, 282 gp->tstate, gp->tpc, gp->tnpc, 283 ((tp && tp->task) ? tp->task->comm : "NULL"), 284 ((tp && tp->task) ? tp->task->pid : -1)); 285 286 if (gp->tstate & TSTATE_PRIV) { 287 printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", 288 (void *) gp->tpc, 289 (void *) gp->o7, 290 (void *) gp->i7, 291 (void *) gp->rpc); 292 } else { 293 printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", 294 gp->tpc, gp->o7, gp->i7, gp->rpc); 295 } 296 } 297 298 memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); 299 300 spin_unlock_irqrestore(&global_reg_snapshot_lock, flags); 301 } 302 303 #ifdef CONFIG_MAGIC_SYSRQ 304 305 static void sysrq_handle_globreg(int key, struct tty_struct *tty) 306 { 307 arch_trigger_all_cpu_backtrace(); 308 } 309 310 static struct sysrq_key_op sparc_globalreg_op = { 311 .handler = sysrq_handle_globreg, 312 .help_msg = "Globalregs", 313 .action_msg = "Show Global CPU Regs", 314 }; 315 316 static int __init sparc_globreg_init(void) 317 { 318 return register_sysrq_key('y', &sparc_globalreg_op); 319 } 320 321 core_initcall(sparc_globreg_init); 322 323 #endif 324 325 unsigned long thread_saved_pc(struct task_struct *tsk) 326 { 327 struct thread_info *ti = task_thread_info(tsk); 328 unsigned long ret = 0xdeadbeefUL; 329 330 if (ti && ti->ksp) { 331 unsigned long *sp; 332 sp = (unsigned long *)(ti->ksp + STACK_BIAS); 333 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL && 334 sp[14]) { 335 unsigned long *fp; 336 fp = (unsigned long *)(sp[14] + STACK_BIAS); 337 if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL) 338 ret = fp[15]; 339 } 340 } 341 return ret; 342 } 343 344 /* Free current thread data structures etc.. */ 345 void exit_thread(void) 346 { 347 struct thread_info *t = current_thread_info(); 348 349 if (t->utraps) { 350 if (t->utraps[0] < 2) 351 kfree (t->utraps); 352 else 353 t->utraps[0]--; 354 } 355 356 if (test_and_clear_thread_flag(TIF_PERFCTR)) { 357 t->user_cntd0 = t->user_cntd1 = NULL; 358 t->pcr_reg = 0; 359 write_pcr(0); 360 } 361 } 362 363 void flush_thread(void) 364 { 365 struct thread_info *t = current_thread_info(); 366 struct mm_struct *mm; 367 368 if (test_ti_thread_flag(t, TIF_ABI_PENDING)) { 369 clear_ti_thread_flag(t, TIF_ABI_PENDING); 370 if (test_ti_thread_flag(t, TIF_32BIT)) 371 clear_ti_thread_flag(t, TIF_32BIT); 372 else 373 set_ti_thread_flag(t, TIF_32BIT); 374 } 375 376 mm = t->task->mm; 377 if (mm) 378 tsb_context_switch(mm); 379 380 set_thread_wsaved(0); 381 382 /* Turn off performance counters if on. */ 383 if (test_and_clear_thread_flag(TIF_PERFCTR)) { 384 t->user_cntd0 = t->user_cntd1 = NULL; 385 t->pcr_reg = 0; 386 write_pcr(0); 387 } 388 389 /* Clear FPU register state. */ 390 t->fpsaved[0] = 0; 391 392 if (get_thread_current_ds() != ASI_AIUS) 393 set_fs(USER_DS); 394 } 395 396 /* It's a bit more tricky when 64-bit tasks are involved... */ 397 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) 398 { 399 unsigned long fp, distance, rval; 400 401 if (!(test_thread_flag(TIF_32BIT))) { 402 csp += STACK_BIAS; 403 psp += STACK_BIAS; 404 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); 405 fp += STACK_BIAS; 406 } else 407 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); 408 409 /* Now 8-byte align the stack as this is mandatory in the 410 * Sparc ABI due to how register windows work. This hides 411 * the restriction from thread libraries etc. -DaveM 412 */ 413 csp &= ~7UL; 414 415 distance = fp - psp; 416 rval = (csp - distance); 417 if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) 418 rval = 0; 419 else if (test_thread_flag(TIF_32BIT)) { 420 if (put_user(((u32)csp), 421 &(((struct reg_window32 __user *)rval)->ins[6]))) 422 rval = 0; 423 } else { 424 if (put_user(((u64)csp - STACK_BIAS), 425 &(((struct reg_window __user *)rval)->ins[6]))) 426 rval = 0; 427 else 428 rval = rval - STACK_BIAS; 429 } 430 431 return rval; 432 } 433 434 /* Standard stuff. */ 435 static inline void shift_window_buffer(int first_win, int last_win, 436 struct thread_info *t) 437 { 438 int i; 439 440 for (i = first_win; i < last_win; i++) { 441 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1]; 442 memcpy(&t->reg_window[i], &t->reg_window[i+1], 443 sizeof(struct reg_window)); 444 } 445 } 446 447 void synchronize_user_stack(void) 448 { 449 struct thread_info *t = current_thread_info(); 450 unsigned long window; 451 452 flush_user_windows(); 453 if ((window = get_thread_wsaved()) != 0) { 454 int winsize = sizeof(struct reg_window); 455 int bias = 0; 456 457 if (test_thread_flag(TIF_32BIT)) 458 winsize = sizeof(struct reg_window32); 459 else 460 bias = STACK_BIAS; 461 462 window -= 1; 463 do { 464 unsigned long sp = (t->rwbuf_stkptrs[window] + bias); 465 struct reg_window *rwin = &t->reg_window[window]; 466 467 if (!copy_to_user((char __user *)sp, rwin, winsize)) { 468 shift_window_buffer(window, get_thread_wsaved() - 1, t); 469 set_thread_wsaved(get_thread_wsaved() - 1); 470 } 471 } while (window--); 472 } 473 } 474 475 static void stack_unaligned(unsigned long sp) 476 { 477 siginfo_t info; 478 479 info.si_signo = SIGBUS; 480 info.si_errno = 0; 481 info.si_code = BUS_ADRALN; 482 info.si_addr = (void __user *) sp; 483 info.si_trapno = 0; 484 force_sig_info(SIGBUS, &info, current); 485 } 486 487 void fault_in_user_windows(void) 488 { 489 struct thread_info *t = current_thread_info(); 490 unsigned long window; 491 int winsize = sizeof(struct reg_window); 492 int bias = 0; 493 494 if (test_thread_flag(TIF_32BIT)) 495 winsize = sizeof(struct reg_window32); 496 else 497 bias = STACK_BIAS; 498 499 flush_user_windows(); 500 window = get_thread_wsaved(); 501 502 if (likely(window != 0)) { 503 window -= 1; 504 do { 505 unsigned long sp = (t->rwbuf_stkptrs[window] + bias); 506 struct reg_window *rwin = &t->reg_window[window]; 507 508 if (unlikely(sp & 0x7UL)) 509 stack_unaligned(sp); 510 511 if (unlikely(copy_to_user((char __user *)sp, 512 rwin, winsize))) 513 goto barf; 514 } while (window--); 515 } 516 set_thread_wsaved(0); 517 return; 518 519 barf: 520 set_thread_wsaved(window + 1); 521 do_exit(SIGILL); 522 } 523 524 asmlinkage long sparc_do_fork(unsigned long clone_flags, 525 unsigned long stack_start, 526 struct pt_regs *regs, 527 unsigned long stack_size) 528 { 529 int __user *parent_tid_ptr, *child_tid_ptr; 530 unsigned long orig_i1 = regs->u_regs[UREG_I1]; 531 long ret; 532 533 #ifdef CONFIG_COMPAT 534 if (test_thread_flag(TIF_32BIT)) { 535 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]); 536 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]); 537 } else 538 #endif 539 { 540 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2]; 541 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4]; 542 } 543 544 ret = do_fork(clone_flags, stack_start, 545 regs, stack_size, 546 parent_tid_ptr, child_tid_ptr); 547 548 /* If we get an error and potentially restart the system 549 * call, we're screwed because copy_thread() clobbered 550 * the parent's %o1. So detect that case and restore it 551 * here. 552 */ 553 if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK) 554 regs->u_regs[UREG_I1] = orig_i1; 555 556 return ret; 557 } 558 559 /* Copy a Sparc thread. The fork() return value conventions 560 * under SunOS are nothing short of bletcherous: 561 * Parent --> %o0 == childs pid, %o1 == 0 562 * Child --> %o0 == parents pid, %o1 == 1 563 */ 564 int copy_thread(unsigned long clone_flags, unsigned long sp, 565 unsigned long unused, 566 struct task_struct *p, struct pt_regs *regs) 567 { 568 struct thread_info *t = task_thread_info(p); 569 struct sparc_stackf *parent_sf; 570 unsigned long child_stack_sz; 571 char *child_trap_frame; 572 int kernel_thread; 573 574 kernel_thread = (regs->tstate & TSTATE_PRIV) ? 1 : 0; 575 parent_sf = ((struct sparc_stackf *) regs) - 1; 576 577 /* Calculate offset to stack_frame & pt_regs */ 578 child_stack_sz = ((STACKFRAME_SZ + TRACEREG_SZ) + 579 (kernel_thread ? STACKFRAME_SZ : 0)); 580 child_trap_frame = (task_stack_page(p) + 581 (THREAD_SIZE - child_stack_sz)); 582 memcpy(child_trap_frame, parent_sf, child_stack_sz); 583 584 t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | 585 (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) | 586 (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT); 587 t->new_child = 1; 588 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; 589 t->kregs = (struct pt_regs *) (child_trap_frame + 590 sizeof(struct sparc_stackf)); 591 t->fpsaved[0] = 0; 592 593 if (kernel_thread) { 594 struct sparc_stackf *child_sf = (struct sparc_stackf *) 595 (child_trap_frame + (STACKFRAME_SZ + TRACEREG_SZ)); 596 597 /* Zero terminate the stack backtrace. */ 598 child_sf->fp = NULL; 599 t->kregs->u_regs[UREG_FP] = 600 ((unsigned long) child_sf) - STACK_BIAS; 601 602 /* Special case, if we are spawning a kernel thread from 603 * a userspace task (usermode helper, NFS or similar), we 604 * must disable performance counters in the child because 605 * the address space and protection realm are changing. 606 */ 607 if (t->flags & _TIF_PERFCTR) { 608 t->user_cntd0 = t->user_cntd1 = NULL; 609 t->pcr_reg = 0; 610 t->flags &= ~_TIF_PERFCTR; 611 } 612 t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT); 613 t->kregs->u_regs[UREG_G6] = (unsigned long) t; 614 t->kregs->u_regs[UREG_G4] = (unsigned long) t->task; 615 } else { 616 if (t->flags & _TIF_32BIT) { 617 sp &= 0x00000000ffffffffUL; 618 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; 619 } 620 t->kregs->u_regs[UREG_FP] = sp; 621 t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT); 622 if (sp != regs->u_regs[UREG_FP]) { 623 unsigned long csp; 624 625 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]); 626 if (!csp) 627 return -EFAULT; 628 t->kregs->u_regs[UREG_FP] = csp; 629 } 630 if (t->utraps) 631 t->utraps[0]++; 632 } 633 634 /* Set the return value for the child. */ 635 t->kregs->u_regs[UREG_I0] = current->pid; 636 t->kregs->u_regs[UREG_I1] = 1; 637 638 /* Set the second return value for the parent. */ 639 regs->u_regs[UREG_I1] = 0; 640 641 if (clone_flags & CLONE_SETTLS) 642 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3]; 643 644 return 0; 645 } 646 647 /* 648 * This is the mechanism for creating a new kernel thread. 649 * 650 * NOTE! Only a kernel-only process(ie the swapper or direct descendants 651 * who haven't done an "execve()") should use this: it will work within 652 * a system call from a "real" process, but the process memory space will 653 * not be freed until both the parent and the child have exited. 654 */ 655 pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 656 { 657 long retval; 658 659 /* If the parent runs before fn(arg) is called by the child, 660 * the input registers of this function can be clobbered. 661 * So we stash 'fn' and 'arg' into global registers which 662 * will not be modified by the parent. 663 */ 664 __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */ 665 "mov %5, %%g3\n\t" /* Save ARG into global */ 666 "mov %1, %%g1\n\t" /* Clone syscall nr. */ 667 "mov %2, %%o0\n\t" /* Clone flags. */ 668 "mov 0, %%o1\n\t" /* usp arg == 0 */ 669 "t 0x6d\n\t" /* Linux/Sparc clone(). */ 670 "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */ 671 " mov %%o0, %0\n\t" 672 "jmpl %%g2, %%o7\n\t" /* Call the function. */ 673 " mov %%g3, %%o0\n\t" /* Set arg in delay. */ 674 "mov %3, %%g1\n\t" 675 "t 0x6d\n\t" /* Linux/Sparc exit(). */ 676 /* Notreached by child. */ 677 "1:" : 678 "=r" (retval) : 679 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED), 680 "i" (__NR_exit), "r" (fn), "r" (arg) : 681 "g1", "g2", "g3", "o0", "o1", "memory", "cc"); 682 return retval; 683 } 684 EXPORT_SYMBOL(kernel_thread); 685 686 typedef struct { 687 union { 688 unsigned int pr_regs[32]; 689 unsigned long pr_dregs[16]; 690 } pr_fr; 691 unsigned int __unused; 692 unsigned int pr_fsr; 693 unsigned char pr_qcnt; 694 unsigned char pr_q_entrysize; 695 unsigned char pr_en; 696 unsigned int pr_q[64]; 697 } elf_fpregset_t32; 698 699 /* 700 * fill in the fpu structure for a core dump. 701 */ 702 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) 703 { 704 unsigned long *kfpregs = current_thread_info()->fpregs; 705 unsigned long fprs = current_thread_info()->fpsaved[0]; 706 707 if (test_thread_flag(TIF_32BIT)) { 708 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs; 709 710 if (fprs & FPRS_DL) 711 memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs, 712 sizeof(unsigned int) * 32); 713 else 714 memset(&fpregs32->pr_fr.pr_regs[0], 0, 715 sizeof(unsigned int) * 32); 716 fpregs32->pr_qcnt = 0; 717 fpregs32->pr_q_entrysize = 8; 718 memset(&fpregs32->pr_q[0], 0, 719 (sizeof(unsigned int) * 64)); 720 if (fprs & FPRS_FEF) { 721 fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0]; 722 fpregs32->pr_en = 1; 723 } else { 724 fpregs32->pr_fsr = 0; 725 fpregs32->pr_en = 0; 726 } 727 } else { 728 if(fprs & FPRS_DL) 729 memcpy(&fpregs->pr_regs[0], kfpregs, 730 sizeof(unsigned int) * 32); 731 else 732 memset(&fpregs->pr_regs[0], 0, 733 sizeof(unsigned int) * 32); 734 if(fprs & FPRS_DU) 735 memcpy(&fpregs->pr_regs[16], kfpregs+16, 736 sizeof(unsigned int) * 32); 737 else 738 memset(&fpregs->pr_regs[16], 0, 739 sizeof(unsigned int) * 32); 740 if(fprs & FPRS_FEF) { 741 fpregs->pr_fsr = current_thread_info()->xfsr[0]; 742 fpregs->pr_gsr = current_thread_info()->gsr[0]; 743 } else { 744 fpregs->pr_fsr = fpregs->pr_gsr = 0; 745 } 746 fpregs->pr_fprs = fprs; 747 } 748 return 1; 749 } 750 EXPORT_SYMBOL(dump_fpu); 751 752 /* 753 * sparc_execve() executes a new program after the asm stub has set 754 * things up for us. This should basically do what I want it to. 755 */ 756 asmlinkage int sparc_execve(struct pt_regs *regs) 757 { 758 int error, base = 0; 759 char *filename; 760 761 /* User register window flush is done by entry.S */ 762 763 /* Check for indirect call. */ 764 if (regs->u_regs[UREG_G1] == 0) 765 base = 1; 766 767 filename = getname((char __user *)regs->u_regs[base + UREG_I0]); 768 error = PTR_ERR(filename); 769 if (IS_ERR(filename)) 770 goto out; 771 error = do_execve(filename, 772 (char __user * __user *) 773 regs->u_regs[base + UREG_I1], 774 (char __user * __user *) 775 regs->u_regs[base + UREG_I2], regs); 776 putname(filename); 777 if (!error) { 778 fprs_write(0); 779 current_thread_info()->xfsr[0] = 0; 780 current_thread_info()->fpsaved[0] = 0; 781 regs->tstate &= ~TSTATE_PEF; 782 } 783 out: 784 return error; 785 } 786 787 unsigned long get_wchan(struct task_struct *task) 788 { 789 unsigned long pc, fp, bias = 0; 790 struct thread_info *tp; 791 struct reg_window *rw; 792 unsigned long ret = 0; 793 int count = 0; 794 795 if (!task || task == current || 796 task->state == TASK_RUNNING) 797 goto out; 798 799 tp = task_thread_info(task); 800 bias = STACK_BIAS; 801 fp = task_thread_info(task)->ksp + bias; 802 803 do { 804 if (!kstack_valid(tp, fp)) 805 break; 806 rw = (struct reg_window *) fp; 807 pc = rw->ins[7]; 808 if (!in_sched_functions(pc)) { 809 ret = pc; 810 goto out; 811 } 812 fp = rw->ins[6] + bias; 813 } while (++count < 16); 814 815 out: 816 return ret; 817 } 818