1 /* arch/sparc64/kernel/process.c 2 * 3 * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net) 4 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 5 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 */ 7 8 /* 9 * This file handles the architecture-dependent parts of process handling.. 10 */ 11 12 #include <stdarg.h> 13 14 #include <linux/errno.h> 15 #include <linux/export.h> 16 #include <linux/sched.h> 17 #include <linux/kernel.h> 18 #include <linux/mm.h> 19 #include <linux/fs.h> 20 #include <linux/smp.h> 21 #include <linux/stddef.h> 22 #include <linux/ptrace.h> 23 #include <linux/slab.h> 24 #include <linux/user.h> 25 #include <linux/delay.h> 26 #include <linux/compat.h> 27 #include <linux/tick.h> 28 #include <linux/init.h> 29 #include <linux/cpu.h> 30 #include <linux/perf_event.h> 31 #include <linux/elfcore.h> 32 #include <linux/sysrq.h> 33 #include <linux/nmi.h> 34 35 #include <asm/uaccess.h> 36 #include <asm/page.h> 37 #include <asm/pgalloc.h> 38 #include <asm/pgtable.h> 39 #include <asm/processor.h> 40 #include <asm/pstate.h> 41 #include <asm/elf.h> 42 #include <asm/fpumacro.h> 43 #include <asm/head.h> 44 #include <asm/cpudata.h> 45 #include <asm/mmu_context.h> 46 #include <asm/unistd.h> 47 #include <asm/hypervisor.h> 48 #include <asm/syscalls.h> 49 #include <asm/irq_regs.h> 50 #include <asm/smp.h> 51 #include <asm/pcr.h> 52 53 #include "kstack.h" 54 55 /* Idle loop support on sparc64. */ 56 void arch_cpu_idle(void) 57 { 58 if (tlb_type != hypervisor) { 59 touch_nmi_watchdog(); 60 } else { 61 unsigned long pstate; 62 63 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over 64 * the cpu sleep hypervisor call. 65 */ 66 __asm__ __volatile__( 67 "rdpr %%pstate, %0\n\t" 68 "andn %0, %1, %0\n\t" 69 "wrpr %0, %%g0, %%pstate" 70 : "=&r" (pstate) 71 : "i" (PSTATE_IE)); 72 73 if (!need_resched() && !cpu_is_offline(smp_processor_id())) 74 sun4v_cpu_yield(); 75 76 /* Re-enable interrupts. */ 77 __asm__ __volatile__( 78 "rdpr %%pstate, %0\n\t" 79 "or %0, %1, %0\n\t" 80 "wrpr %0, %%g0, %%pstate" 81 : "=&r" (pstate) 82 : "i" (PSTATE_IE)); 83 } 84 local_irq_enable(); 85 } 86 87 #ifdef CONFIG_HOTPLUG_CPU 88 void arch_cpu_idle_dead() 89 { 90 sched_preempt_enable_no_resched(); 91 cpu_play_dead(); 92 } 93 #endif 94 95 #ifdef CONFIG_COMPAT 96 static void show_regwindow32(struct pt_regs *regs) 97 { 98 struct reg_window32 __user *rw; 99 struct reg_window32 r_w; 100 mm_segment_t old_fs; 101 102 __asm__ __volatile__ ("flushw"); 103 rw = compat_ptr((unsigned)regs->u_regs[14]); 104 old_fs = get_fs(); 105 set_fs (USER_DS); 106 if (copy_from_user (&r_w, rw, sizeof(r_w))) { 107 set_fs (old_fs); 108 return; 109 } 110 111 set_fs (old_fs); 112 printk("l0: %08x l1: %08x l2: %08x l3: %08x " 113 "l4: %08x l5: %08x l6: %08x l7: %08x\n", 114 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3], 115 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]); 116 printk("i0: %08x i1: %08x i2: %08x i3: %08x " 117 "i4: %08x i5: %08x i6: %08x i7: %08x\n", 118 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3], 119 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]); 120 } 121 #else 122 #define show_regwindow32(regs) do { } while (0) 123 #endif 124 125 static void show_regwindow(struct pt_regs *regs) 126 { 127 struct reg_window __user *rw; 128 struct reg_window *rwk; 129 struct reg_window r_w; 130 mm_segment_t old_fs; 131 132 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) { 133 __asm__ __volatile__ ("flushw"); 134 rw = (struct reg_window __user *) 135 (regs->u_regs[14] + STACK_BIAS); 136 rwk = (struct reg_window *) 137 (regs->u_regs[14] + STACK_BIAS); 138 if (!(regs->tstate & TSTATE_PRIV)) { 139 old_fs = get_fs(); 140 set_fs (USER_DS); 141 if (copy_from_user (&r_w, rw, sizeof(r_w))) { 142 set_fs (old_fs); 143 return; 144 } 145 rwk = &r_w; 146 set_fs (old_fs); 147 } 148 } else { 149 show_regwindow32(regs); 150 return; 151 } 152 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n", 153 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]); 154 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n", 155 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]); 156 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n", 157 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]); 158 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", 159 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); 160 if (regs->tstate & TSTATE_PRIV) 161 printk("I7: <%pS>\n", (void *) rwk->ins[7]); 162 } 163 164 void show_regs(struct pt_regs *regs) 165 { 166 show_regs_print_info(KERN_DEFAULT); 167 168 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, 169 regs->tpc, regs->tnpc, regs->y, print_tainted()); 170 printk("TPC: <%pS>\n", (void *) regs->tpc); 171 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", 172 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], 173 regs->u_regs[3]); 174 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n", 175 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6], 176 regs->u_regs[7]); 177 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n", 178 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10], 179 regs->u_regs[11]); 180 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", 181 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], 182 regs->u_regs[15]); 183 printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); 184 show_regwindow(regs); 185 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); 186 } 187 188 union global_cpu_snapshot global_cpu_snapshot[NR_CPUS]; 189 static DEFINE_SPINLOCK(global_cpu_snapshot_lock); 190 191 static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, 192 int this_cpu) 193 { 194 struct global_reg_snapshot *rp; 195 196 flushw_all(); 197 198 rp = &global_cpu_snapshot[this_cpu].reg; 199 200 rp->tstate = regs->tstate; 201 rp->tpc = regs->tpc; 202 rp->tnpc = regs->tnpc; 203 rp->o7 = regs->u_regs[UREG_I7]; 204 205 if (regs->tstate & TSTATE_PRIV) { 206 struct reg_window *rw; 207 208 rw = (struct reg_window *) 209 (regs->u_regs[UREG_FP] + STACK_BIAS); 210 if (kstack_valid(tp, (unsigned long) rw)) { 211 rp->i7 = rw->ins[7]; 212 rw = (struct reg_window *) 213 (rw->ins[6] + STACK_BIAS); 214 if (kstack_valid(tp, (unsigned long) rw)) 215 rp->rpc = rw->ins[7]; 216 } 217 } else { 218 rp->i7 = 0; 219 rp->rpc = 0; 220 } 221 rp->thread = tp; 222 } 223 224 /* In order to avoid hangs we do not try to synchronize with the 225 * global register dump client cpus. The last store they make is to 226 * the thread pointer, so do a short poll waiting for that to become 227 * non-NULL. 228 */ 229 static void __global_reg_poll(struct global_reg_snapshot *gp) 230 { 231 int limit = 0; 232 233 while (!gp->thread && ++limit < 100) { 234 barrier(); 235 udelay(1); 236 } 237 } 238 239 void arch_trigger_all_cpu_backtrace(void) 240 { 241 struct thread_info *tp = current_thread_info(); 242 struct pt_regs *regs = get_irq_regs(); 243 unsigned long flags; 244 int this_cpu, cpu; 245 246 if (!regs) 247 regs = tp->kregs; 248 249 spin_lock_irqsave(&global_cpu_snapshot_lock, flags); 250 251 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 252 253 this_cpu = raw_smp_processor_id(); 254 255 __global_reg_self(tp, regs, this_cpu); 256 257 smp_fetch_global_regs(); 258 259 for_each_online_cpu(cpu) { 260 struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg; 261 262 __global_reg_poll(gp); 263 264 tp = gp->thread; 265 printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n", 266 (cpu == this_cpu ? '*' : ' '), cpu, 267 gp->tstate, gp->tpc, gp->tnpc, 268 ((tp && tp->task) ? tp->task->comm : "NULL"), 269 ((tp && tp->task) ? tp->task->pid : -1)); 270 271 if (gp->tstate & TSTATE_PRIV) { 272 printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", 273 (void *) gp->tpc, 274 (void *) gp->o7, 275 (void *) gp->i7, 276 (void *) gp->rpc); 277 } else { 278 printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", 279 gp->tpc, gp->o7, gp->i7, gp->rpc); 280 } 281 } 282 283 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 284 285 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); 286 } 287 288 #ifdef CONFIG_MAGIC_SYSRQ 289 290 static void sysrq_handle_globreg(int key) 291 { 292 arch_trigger_all_cpu_backtrace(); 293 } 294 295 static struct sysrq_key_op sparc_globalreg_op = { 296 .handler = sysrq_handle_globreg, 297 .help_msg = "global-regs(y)", 298 .action_msg = "Show Global CPU Regs", 299 }; 300 301 static void __global_pmu_self(int this_cpu) 302 { 303 struct global_pmu_snapshot *pp; 304 int i, num; 305 306 pp = &global_cpu_snapshot[this_cpu].pmu; 307 308 num = 1; 309 if (tlb_type == hypervisor && 310 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) 311 num = 4; 312 313 for (i = 0; i < num; i++) { 314 pp->pcr[i] = pcr_ops->read_pcr(i); 315 pp->pic[i] = pcr_ops->read_pic(i); 316 } 317 } 318 319 static void __global_pmu_poll(struct global_pmu_snapshot *pp) 320 { 321 int limit = 0; 322 323 while (!pp->pcr[0] && ++limit < 100) { 324 barrier(); 325 udelay(1); 326 } 327 } 328 329 static void pmu_snapshot_all_cpus(void) 330 { 331 unsigned long flags; 332 int this_cpu, cpu; 333 334 spin_lock_irqsave(&global_cpu_snapshot_lock, flags); 335 336 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 337 338 this_cpu = raw_smp_processor_id(); 339 340 __global_pmu_self(this_cpu); 341 342 smp_fetch_global_pmu(); 343 344 for_each_online_cpu(cpu) { 345 struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu; 346 347 __global_pmu_poll(pp); 348 349 printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n", 350 (cpu == this_cpu ? '*' : ' '), cpu, 351 pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], 352 pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); 353 } 354 355 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); 356 357 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); 358 } 359 360 static void sysrq_handle_globpmu(int key) 361 { 362 pmu_snapshot_all_cpus(); 363 } 364 365 static struct sysrq_key_op sparc_globalpmu_op = { 366 .handler = sysrq_handle_globpmu, 367 .help_msg = "global-pmu(x)", 368 .action_msg = "Show Global PMU Regs", 369 }; 370 371 static int __init sparc_sysrq_init(void) 372 { 373 int ret = register_sysrq_key('y', &sparc_globalreg_op); 374 375 if (!ret) 376 ret = register_sysrq_key('x', &sparc_globalpmu_op); 377 return ret; 378 } 379 380 core_initcall(sparc_sysrq_init); 381 382 #endif 383 384 unsigned long thread_saved_pc(struct task_struct *tsk) 385 { 386 struct thread_info *ti = task_thread_info(tsk); 387 unsigned long ret = 0xdeadbeefUL; 388 389 if (ti && ti->ksp) { 390 unsigned long *sp; 391 sp = (unsigned long *)(ti->ksp + STACK_BIAS); 392 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL && 393 sp[14]) { 394 unsigned long *fp; 395 fp = (unsigned long *)(sp[14] + STACK_BIAS); 396 if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL) 397 ret = fp[15]; 398 } 399 } 400 return ret; 401 } 402 403 /* Free current thread data structures etc.. */ 404 void exit_thread(void) 405 { 406 struct thread_info *t = current_thread_info(); 407 408 if (t->utraps) { 409 if (t->utraps[0] < 2) 410 kfree (t->utraps); 411 else 412 t->utraps[0]--; 413 } 414 } 415 416 void flush_thread(void) 417 { 418 struct thread_info *t = current_thread_info(); 419 struct mm_struct *mm; 420 421 mm = t->task->mm; 422 if (mm) 423 tsb_context_switch(mm); 424 425 set_thread_wsaved(0); 426 427 /* Clear FPU register state. */ 428 t->fpsaved[0] = 0; 429 } 430 431 /* It's a bit more tricky when 64-bit tasks are involved... */ 432 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) 433 { 434 bool stack_64bit = test_thread_64bit_stack(psp); 435 unsigned long fp, distance, rval; 436 437 if (stack_64bit) { 438 csp += STACK_BIAS; 439 psp += STACK_BIAS; 440 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); 441 fp += STACK_BIAS; 442 if (test_thread_flag(TIF_32BIT)) 443 fp &= 0xffffffff; 444 } else 445 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); 446 447 /* Now align the stack as this is mandatory in the Sparc ABI 448 * due to how register windows work. This hides the 449 * restriction from thread libraries etc. 450 */ 451 csp &= ~15UL; 452 453 distance = fp - psp; 454 rval = (csp - distance); 455 if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) 456 rval = 0; 457 else if (!stack_64bit) { 458 if (put_user(((u32)csp), 459 &(((struct reg_window32 __user *)rval)->ins[6]))) 460 rval = 0; 461 } else { 462 if (put_user(((u64)csp - STACK_BIAS), 463 &(((struct reg_window __user *)rval)->ins[6]))) 464 rval = 0; 465 else 466 rval = rval - STACK_BIAS; 467 } 468 469 return rval; 470 } 471 472 /* Standard stuff. */ 473 static inline void shift_window_buffer(int first_win, int last_win, 474 struct thread_info *t) 475 { 476 int i; 477 478 for (i = first_win; i < last_win; i++) { 479 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1]; 480 memcpy(&t->reg_window[i], &t->reg_window[i+1], 481 sizeof(struct reg_window)); 482 } 483 } 484 485 void synchronize_user_stack(void) 486 { 487 struct thread_info *t = current_thread_info(); 488 unsigned long window; 489 490 flush_user_windows(); 491 if ((window = get_thread_wsaved()) != 0) { 492 window -= 1; 493 do { 494 struct reg_window *rwin = &t->reg_window[window]; 495 int winsize = sizeof(struct reg_window); 496 unsigned long sp; 497 498 sp = t->rwbuf_stkptrs[window]; 499 500 if (test_thread_64bit_stack(sp)) 501 sp += STACK_BIAS; 502 else 503 winsize = sizeof(struct reg_window32); 504 505 if (!copy_to_user((char __user *)sp, rwin, winsize)) { 506 shift_window_buffer(window, get_thread_wsaved() - 1, t); 507 set_thread_wsaved(get_thread_wsaved() - 1); 508 } 509 } while (window--); 510 } 511 } 512 513 static void stack_unaligned(unsigned long sp) 514 { 515 siginfo_t info; 516 517 info.si_signo = SIGBUS; 518 info.si_errno = 0; 519 info.si_code = BUS_ADRALN; 520 info.si_addr = (void __user *) sp; 521 info.si_trapno = 0; 522 force_sig_info(SIGBUS, &info, current); 523 } 524 525 void fault_in_user_windows(void) 526 { 527 struct thread_info *t = current_thread_info(); 528 unsigned long window; 529 530 flush_user_windows(); 531 window = get_thread_wsaved(); 532 533 if (likely(window != 0)) { 534 window -= 1; 535 do { 536 struct reg_window *rwin = &t->reg_window[window]; 537 int winsize = sizeof(struct reg_window); 538 unsigned long sp; 539 540 sp = t->rwbuf_stkptrs[window]; 541 542 if (test_thread_64bit_stack(sp)) 543 sp += STACK_BIAS; 544 else 545 winsize = sizeof(struct reg_window32); 546 547 if (unlikely(sp & 0x7UL)) 548 stack_unaligned(sp); 549 550 if (unlikely(copy_to_user((char __user *)sp, 551 rwin, winsize))) 552 goto barf; 553 } while (window--); 554 } 555 set_thread_wsaved(0); 556 return; 557 558 barf: 559 set_thread_wsaved(window + 1); 560 do_exit(SIGILL); 561 } 562 563 asmlinkage long sparc_do_fork(unsigned long clone_flags, 564 unsigned long stack_start, 565 struct pt_regs *regs, 566 unsigned long stack_size) 567 { 568 int __user *parent_tid_ptr, *child_tid_ptr; 569 unsigned long orig_i1 = regs->u_regs[UREG_I1]; 570 long ret; 571 572 #ifdef CONFIG_COMPAT 573 if (test_thread_flag(TIF_32BIT)) { 574 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]); 575 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]); 576 } else 577 #endif 578 { 579 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2]; 580 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4]; 581 } 582 583 ret = do_fork(clone_flags, stack_start, stack_size, 584 parent_tid_ptr, child_tid_ptr); 585 586 /* If we get an error and potentially restart the system 587 * call, we're screwed because copy_thread() clobbered 588 * the parent's %o1. So detect that case and restore it 589 * here. 590 */ 591 if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK) 592 regs->u_regs[UREG_I1] = orig_i1; 593 594 return ret; 595 } 596 597 /* Copy a Sparc thread. The fork() return value conventions 598 * under SunOS are nothing short of bletcherous: 599 * Parent --> %o0 == childs pid, %o1 == 0 600 * Child --> %o0 == parents pid, %o1 == 1 601 */ 602 int copy_thread(unsigned long clone_flags, unsigned long sp, 603 unsigned long arg, struct task_struct *p) 604 { 605 struct thread_info *t = task_thread_info(p); 606 struct pt_regs *regs = current_pt_regs(); 607 struct sparc_stackf *parent_sf; 608 unsigned long child_stack_sz; 609 char *child_trap_frame; 610 611 /* Calculate offset to stack_frame & pt_regs */ 612 child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ); 613 child_trap_frame = (task_stack_page(p) + 614 (THREAD_SIZE - child_stack_sz)); 615 616 t->new_child = 1; 617 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; 618 t->kregs = (struct pt_regs *) (child_trap_frame + 619 sizeof(struct sparc_stackf)); 620 t->fpsaved[0] = 0; 621 622 if (unlikely(p->flags & PF_KTHREAD)) { 623 memset(child_trap_frame, 0, child_stack_sz); 624 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = 625 (current_pt_regs()->tstate + 1) & TSTATE_CWP; 626 t->current_ds = ASI_P; 627 t->kregs->u_regs[UREG_G1] = sp; /* function */ 628 t->kregs->u_regs[UREG_G2] = arg; 629 return 0; 630 } 631 632 parent_sf = ((struct sparc_stackf *) regs) - 1; 633 memcpy(child_trap_frame, parent_sf, child_stack_sz); 634 if (t->flags & _TIF_32BIT) { 635 sp &= 0x00000000ffffffffUL; 636 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; 637 } 638 t->kregs->u_regs[UREG_FP] = sp; 639 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = 640 (regs->tstate + 1) & TSTATE_CWP; 641 t->current_ds = ASI_AIUS; 642 if (sp != regs->u_regs[UREG_FP]) { 643 unsigned long csp; 644 645 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]); 646 if (!csp) 647 return -EFAULT; 648 t->kregs->u_regs[UREG_FP] = csp; 649 } 650 if (t->utraps) 651 t->utraps[0]++; 652 653 /* Set the return value for the child. */ 654 t->kregs->u_regs[UREG_I0] = current->pid; 655 t->kregs->u_regs[UREG_I1] = 1; 656 657 /* Set the second return value for the parent. */ 658 regs->u_regs[UREG_I1] = 0; 659 660 if (clone_flags & CLONE_SETTLS) 661 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3]; 662 663 return 0; 664 } 665 666 typedef struct { 667 union { 668 unsigned int pr_regs[32]; 669 unsigned long pr_dregs[16]; 670 } pr_fr; 671 unsigned int __unused; 672 unsigned int pr_fsr; 673 unsigned char pr_qcnt; 674 unsigned char pr_q_entrysize; 675 unsigned char pr_en; 676 unsigned int pr_q[64]; 677 } elf_fpregset_t32; 678 679 /* 680 * fill in the fpu structure for a core dump. 681 */ 682 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) 683 { 684 unsigned long *kfpregs = current_thread_info()->fpregs; 685 unsigned long fprs = current_thread_info()->fpsaved[0]; 686 687 if (test_thread_flag(TIF_32BIT)) { 688 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs; 689 690 if (fprs & FPRS_DL) 691 memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs, 692 sizeof(unsigned int) * 32); 693 else 694 memset(&fpregs32->pr_fr.pr_regs[0], 0, 695 sizeof(unsigned int) * 32); 696 fpregs32->pr_qcnt = 0; 697 fpregs32->pr_q_entrysize = 8; 698 memset(&fpregs32->pr_q[0], 0, 699 (sizeof(unsigned int) * 64)); 700 if (fprs & FPRS_FEF) { 701 fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0]; 702 fpregs32->pr_en = 1; 703 } else { 704 fpregs32->pr_fsr = 0; 705 fpregs32->pr_en = 0; 706 } 707 } else { 708 if(fprs & FPRS_DL) 709 memcpy(&fpregs->pr_regs[0], kfpregs, 710 sizeof(unsigned int) * 32); 711 else 712 memset(&fpregs->pr_regs[0], 0, 713 sizeof(unsigned int) * 32); 714 if(fprs & FPRS_DU) 715 memcpy(&fpregs->pr_regs[16], kfpregs+16, 716 sizeof(unsigned int) * 32); 717 else 718 memset(&fpregs->pr_regs[16], 0, 719 sizeof(unsigned int) * 32); 720 if(fprs & FPRS_FEF) { 721 fpregs->pr_fsr = current_thread_info()->xfsr[0]; 722 fpregs->pr_gsr = current_thread_info()->gsr[0]; 723 } else { 724 fpregs->pr_fsr = fpregs->pr_gsr = 0; 725 } 726 fpregs->pr_fprs = fprs; 727 } 728 return 1; 729 } 730 EXPORT_SYMBOL(dump_fpu); 731 732 unsigned long get_wchan(struct task_struct *task) 733 { 734 unsigned long pc, fp, bias = 0; 735 struct thread_info *tp; 736 struct reg_window *rw; 737 unsigned long ret = 0; 738 int count = 0; 739 740 if (!task || task == current || 741 task->state == TASK_RUNNING) 742 goto out; 743 744 tp = task_thread_info(task); 745 bias = STACK_BIAS; 746 fp = task_thread_info(task)->ksp + bias; 747 748 do { 749 if (!kstack_valid(tp, fp)) 750 break; 751 rw = (struct reg_window *) fp; 752 pc = rw->ins[7]; 753 if (!in_sched_functions(pc)) { 754 ret = pc; 755 goto out; 756 } 757 fp = rw->ins[6] + bias; 758 } while (++count < 16); 759 760 out: 761 return ret; 762 } 763