1 /* 2 * Derived from "arch/i386/kernel/process.c" 3 * Copyright (C) 1995 Linus Torvalds 4 * 5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and 6 * Paul Mackerras (paulus@cs.anu.edu.au) 7 * 8 * PowerPC version 9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 17 #include <linux/errno.h> 18 #include <linux/sched.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/smp.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/slab.h> 26 #include <linux/user.h> 27 #include <linux/elf.h> 28 #include <linux/init.h> 29 #include <linux/prctl.h> 30 #include <linux/init_task.h> 31 #include <linux/module.h> 32 #include <linux/kallsyms.h> 33 #include <linux/mqueue.h> 34 #include <linux/hardirq.h> 35 #include <linux/utsname.h> 36 #include <linux/ftrace.h> 37 #include <linux/kernel_stat.h> 38 #include <linux/personality.h> 39 #include <linux/random.h> 40 41 #include <asm/pgtable.h> 42 #include <asm/uaccess.h> 43 #include <asm/system.h> 44 #include <asm/io.h> 45 #include <asm/processor.h> 46 #include <asm/mmu.h> 47 #include <asm/prom.h> 48 #include <asm/machdep.h> 49 #include <asm/time.h> 50 #include <asm/syscalls.h> 51 #ifdef CONFIG_PPC64 52 #include <asm/firmware.h> 53 #endif 54 #include <linux/kprobes.h> 55 #include <linux/kdebug.h> 56 57 extern unsigned long _get_SP(void); 58 59 #ifndef CONFIG_SMP 60 struct task_struct *last_task_used_math = NULL; 61 struct task_struct *last_task_used_altivec = NULL; 62 struct task_struct *last_task_used_vsx = NULL; 63 struct task_struct *last_task_used_spe = NULL; 64 #endif 65 66 /* 67 * Make sure the floating-point register state in the 68 * the thread_struct is up to date for task tsk. 69 */ 70 void flush_fp_to_thread(struct task_struct *tsk) 71 { 72 if (tsk->thread.regs) { 73 /* 74 * We need to disable preemption here because if we didn't, 75 * another process could get scheduled after the regs->msr 76 * test but before we have finished saving the FP registers 77 * to the thread_struct. That process could take over the 78 * FPU, and then when we get scheduled again we would store 79 * bogus values for the remaining FP registers. 80 */ 81 preempt_disable(); 82 if (tsk->thread.regs->msr & MSR_FP) { 83 #ifdef CONFIG_SMP 84 /* 85 * This should only ever be called for current or 86 * for a stopped child process. Since we save away 87 * the FP register state on context switch on SMP, 88 * there is something wrong if a stopped child appears 89 * to still have its FP state in the CPU registers. 90 */ 91 BUG_ON(tsk != current); 92 #endif 93 giveup_fpu(tsk); 94 } 95 preempt_enable(); 96 } 97 } 98 99 void enable_kernel_fp(void) 100 { 101 WARN_ON(preemptible()); 102 103 #ifdef CONFIG_SMP 104 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) 105 giveup_fpu(current); 106 else 107 giveup_fpu(NULL); /* just enables FP for kernel */ 108 #else 109 giveup_fpu(last_task_used_math); 110 #endif /* CONFIG_SMP */ 111 } 112 EXPORT_SYMBOL(enable_kernel_fp); 113 114 #ifdef CONFIG_ALTIVEC 115 void enable_kernel_altivec(void) 116 { 117 WARN_ON(preemptible()); 118 119 #ifdef CONFIG_SMP 120 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) 121 giveup_altivec(current); 122 else 123 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */ 124 #else 125 giveup_altivec(last_task_used_altivec); 126 #endif /* CONFIG_SMP */ 127 } 128 EXPORT_SYMBOL(enable_kernel_altivec); 129 130 /* 131 * Make sure the VMX/Altivec register state in the 132 * the thread_struct is up to date for task tsk. 133 */ 134 void flush_altivec_to_thread(struct task_struct *tsk) 135 { 136 if (tsk->thread.regs) { 137 preempt_disable(); 138 if (tsk->thread.regs->msr & MSR_VEC) { 139 #ifdef CONFIG_SMP 140 BUG_ON(tsk != current); 141 #endif 142 giveup_altivec(tsk); 143 } 144 preempt_enable(); 145 } 146 } 147 #endif /* CONFIG_ALTIVEC */ 148 149 #ifdef CONFIG_VSX 150 #if 0 151 /* not currently used, but some crazy RAID module might want to later */ 152 void enable_kernel_vsx(void) 153 { 154 WARN_ON(preemptible()); 155 156 #ifdef CONFIG_SMP 157 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) 158 giveup_vsx(current); 159 else 160 giveup_vsx(NULL); /* just enable vsx for kernel - force */ 161 #else 162 giveup_vsx(last_task_used_vsx); 163 #endif /* CONFIG_SMP */ 164 } 165 EXPORT_SYMBOL(enable_kernel_vsx); 166 #endif 167 168 void giveup_vsx(struct task_struct *tsk) 169 { 170 giveup_fpu(tsk); 171 giveup_altivec(tsk); 172 __giveup_vsx(tsk); 173 } 174 175 void flush_vsx_to_thread(struct task_struct *tsk) 176 { 177 if (tsk->thread.regs) { 178 preempt_disable(); 179 if (tsk->thread.regs->msr & MSR_VSX) { 180 #ifdef CONFIG_SMP 181 BUG_ON(tsk != current); 182 #endif 183 giveup_vsx(tsk); 184 } 185 preempt_enable(); 186 } 187 } 188 #endif /* CONFIG_VSX */ 189 190 #ifdef CONFIG_SPE 191 192 void enable_kernel_spe(void) 193 { 194 WARN_ON(preemptible()); 195 196 #ifdef CONFIG_SMP 197 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) 198 giveup_spe(current); 199 else 200 giveup_spe(NULL); /* just enable SPE for kernel - force */ 201 #else 202 giveup_spe(last_task_used_spe); 203 #endif /* __SMP __ */ 204 } 205 EXPORT_SYMBOL(enable_kernel_spe); 206 207 void flush_spe_to_thread(struct task_struct *tsk) 208 { 209 if (tsk->thread.regs) { 210 preempt_disable(); 211 if (tsk->thread.regs->msr & MSR_SPE) { 212 #ifdef CONFIG_SMP 213 BUG_ON(tsk != current); 214 #endif 215 giveup_spe(tsk); 216 } 217 preempt_enable(); 218 } 219 } 220 #endif /* CONFIG_SPE */ 221 222 #ifndef CONFIG_SMP 223 /* 224 * If we are doing lazy switching of CPU state (FP, altivec or SPE), 225 * and the current task has some state, discard it. 226 */ 227 void discard_lazy_cpu_state(void) 228 { 229 preempt_disable(); 230 if (last_task_used_math == current) 231 last_task_used_math = NULL; 232 #ifdef CONFIG_ALTIVEC 233 if (last_task_used_altivec == current) 234 last_task_used_altivec = NULL; 235 #endif /* CONFIG_ALTIVEC */ 236 #ifdef CONFIG_VSX 237 if (last_task_used_vsx == current) 238 last_task_used_vsx = NULL; 239 #endif /* CONFIG_VSX */ 240 #ifdef CONFIG_SPE 241 if (last_task_used_spe == current) 242 last_task_used_spe = NULL; 243 #endif 244 preempt_enable(); 245 } 246 #endif /* CONFIG_SMP */ 247 248 void do_dabr(struct pt_regs *regs, unsigned long address, 249 unsigned long error_code) 250 { 251 siginfo_t info; 252 253 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 254 11, SIGSEGV) == NOTIFY_STOP) 255 return; 256 257 if (debugger_dabr_match(regs)) 258 return; 259 260 /* Clear the DAC and struct entries. One shot trigger */ 261 #if defined(CONFIG_BOOKE) 262 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W 263 | DBCR0_IDM)); 264 #endif 265 266 /* Clear the DABR */ 267 set_dabr(0); 268 269 /* Deliver the signal to userspace */ 270 info.si_signo = SIGTRAP; 271 info.si_errno = 0; 272 info.si_code = TRAP_HWBKPT; 273 info.si_addr = (void __user *)address; 274 force_sig_info(SIGTRAP, &info, current); 275 } 276 277 static DEFINE_PER_CPU(unsigned long, current_dabr); 278 279 int set_dabr(unsigned long dabr) 280 { 281 __get_cpu_var(current_dabr) = dabr; 282 283 if (ppc_md.set_dabr) 284 return ppc_md.set_dabr(dabr); 285 286 /* XXX should we have a CPU_FTR_HAS_DABR ? */ 287 #if defined(CONFIG_PPC64) || defined(CONFIG_6xx) 288 mtspr(SPRN_DABR, dabr); 289 #endif 290 291 #if defined(CONFIG_BOOKE) 292 mtspr(SPRN_DAC1, dabr); 293 #endif 294 295 return 0; 296 } 297 298 #ifdef CONFIG_PPC64 299 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); 300 #endif 301 302 struct task_struct *__switch_to(struct task_struct *prev, 303 struct task_struct *new) 304 { 305 struct thread_struct *new_thread, *old_thread; 306 unsigned long flags; 307 struct task_struct *last; 308 309 #ifdef CONFIG_SMP 310 /* avoid complexity of lazy save/restore of fpu 311 * by just saving it every time we switch out if 312 * this task used the fpu during the last quantum. 313 * 314 * If it tries to use the fpu again, it'll trap and 315 * reload its fp regs. So we don't have to do a restore 316 * every switch, just a save. 317 * -- Cort 318 */ 319 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) 320 giveup_fpu(prev); 321 #ifdef CONFIG_ALTIVEC 322 /* 323 * If the previous thread used altivec in the last quantum 324 * (thus changing altivec regs) then save them. 325 * We used to check the VRSAVE register but not all apps 326 * set it, so we don't rely on it now (and in fact we need 327 * to save & restore VSCR even if VRSAVE == 0). -- paulus 328 * 329 * On SMP we always save/restore altivec regs just to avoid the 330 * complexity of changing processors. 331 * -- Cort 332 */ 333 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) 334 giveup_altivec(prev); 335 #endif /* CONFIG_ALTIVEC */ 336 #ifdef CONFIG_VSX 337 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) 338 /* VMX and FPU registers are already save here */ 339 __giveup_vsx(prev); 340 #endif /* CONFIG_VSX */ 341 #ifdef CONFIG_SPE 342 /* 343 * If the previous thread used spe in the last quantum 344 * (thus changing spe regs) then save them. 345 * 346 * On SMP we always save/restore spe regs just to avoid the 347 * complexity of changing processors. 348 */ 349 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) 350 giveup_spe(prev); 351 #endif /* CONFIG_SPE */ 352 353 #else /* CONFIG_SMP */ 354 #ifdef CONFIG_ALTIVEC 355 /* Avoid the trap. On smp this this never happens since 356 * we don't set last_task_used_altivec -- Cort 357 */ 358 if (new->thread.regs && last_task_used_altivec == new) 359 new->thread.regs->msr |= MSR_VEC; 360 #endif /* CONFIG_ALTIVEC */ 361 #ifdef CONFIG_VSX 362 if (new->thread.regs && last_task_used_vsx == new) 363 new->thread.regs->msr |= MSR_VSX; 364 #endif /* CONFIG_VSX */ 365 #ifdef CONFIG_SPE 366 /* Avoid the trap. On smp this this never happens since 367 * we don't set last_task_used_spe 368 */ 369 if (new->thread.regs && last_task_used_spe == new) 370 new->thread.regs->msr |= MSR_SPE; 371 #endif /* CONFIG_SPE */ 372 373 #endif /* CONFIG_SMP */ 374 375 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) 376 set_dabr(new->thread.dabr); 377 378 #if defined(CONFIG_BOOKE) 379 /* If new thread DAC (HW breakpoint) is the same then leave it */ 380 if (new->thread.dabr) 381 set_dabr(new->thread.dabr); 382 #endif 383 384 new_thread = &new->thread; 385 old_thread = ¤t->thread; 386 387 #ifdef CONFIG_PPC64 388 /* 389 * Collect processor utilization data per process 390 */ 391 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 392 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 393 long unsigned start_tb, current_tb; 394 start_tb = old_thread->start_tb; 395 cu->current_tb = current_tb = mfspr(SPRN_PURR); 396 old_thread->accum_tb += (current_tb - start_tb); 397 new_thread->start_tb = current_tb; 398 } 399 #endif 400 401 local_irq_save(flags); 402 403 account_system_vtime(current); 404 account_process_vtime(current); 405 calculate_steal_time(); 406 407 /* 408 * We can't take a PMU exception inside _switch() since there is a 409 * window where the kernel stack SLB and the kernel stack are out 410 * of sync. Hard disable here. 411 */ 412 hard_irq_disable(); 413 last = _switch(old_thread, new_thread); 414 415 local_irq_restore(flags); 416 417 return last; 418 } 419 420 static int instructions_to_print = 16; 421 422 static void show_instructions(struct pt_regs *regs) 423 { 424 int i; 425 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * 426 sizeof(int)); 427 428 printk("Instruction dump:"); 429 430 for (i = 0; i < instructions_to_print; i++) { 431 int instr; 432 433 if (!(i % 8)) 434 printk("\n"); 435 436 #if !defined(CONFIG_BOOKE) 437 /* If executing with the IMMU off, adjust pc rather 438 * than print XXXXXXXX. 439 */ 440 if (!(regs->msr & MSR_IR)) 441 pc = (unsigned long)phys_to_virt(pc); 442 #endif 443 444 /* We use __get_user here *only* to avoid an OOPS on a 445 * bad address because the pc *should* only be a 446 * kernel address. 447 */ 448 if (!__kernel_text_address(pc) || 449 __get_user(instr, (unsigned int __user *)pc)) { 450 printk("XXXXXXXX "); 451 } else { 452 if (regs->nip == pc) 453 printk("<%08x> ", instr); 454 else 455 printk("%08x ", instr); 456 } 457 458 pc += sizeof(int); 459 } 460 461 printk("\n"); 462 } 463 464 static struct regbit { 465 unsigned long bit; 466 const char *name; 467 } msr_bits[] = { 468 {MSR_EE, "EE"}, 469 {MSR_PR, "PR"}, 470 {MSR_FP, "FP"}, 471 {MSR_VEC, "VEC"}, 472 {MSR_VSX, "VSX"}, 473 {MSR_ME, "ME"}, 474 {MSR_CE, "CE"}, 475 {MSR_DE, "DE"}, 476 {MSR_IR, "IR"}, 477 {MSR_DR, "DR"}, 478 {0, NULL} 479 }; 480 481 static void printbits(unsigned long val, struct regbit *bits) 482 { 483 const char *sep = ""; 484 485 printk("<"); 486 for (; bits->bit; ++bits) 487 if (val & bits->bit) { 488 printk("%s%s", sep, bits->name); 489 sep = ","; 490 } 491 printk(">"); 492 } 493 494 #ifdef CONFIG_PPC64 495 #define REG "%016lx" 496 #define REGS_PER_LINE 4 497 #define LAST_VOLATILE 13 498 #else 499 #define REG "%08lx" 500 #define REGS_PER_LINE 8 501 #define LAST_VOLATILE 12 502 #endif 503 504 void show_regs(struct pt_regs * regs) 505 { 506 int i, trap; 507 508 printk("NIP: "REG" LR: "REG" CTR: "REG"\n", 509 regs->nip, regs->link, regs->ctr); 510 printk("REGS: %p TRAP: %04lx %s (%s)\n", 511 regs, regs->trap, print_tainted(), init_utsname()->release); 512 printk("MSR: "REG" ", regs->msr); 513 printbits(regs->msr, msr_bits); 514 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); 515 trap = TRAP(regs); 516 if (trap == 0x300 || trap == 0x600) 517 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 518 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); 519 #else 520 printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); 521 #endif 522 printk("TASK = %p[%d] '%s' THREAD: %p", 523 current, task_pid_nr(current), current->comm, task_thread_info(current)); 524 525 #ifdef CONFIG_SMP 526 printk(" CPU: %d", raw_smp_processor_id()); 527 #endif /* CONFIG_SMP */ 528 529 for (i = 0; i < 32; i++) { 530 if ((i % REGS_PER_LINE) == 0) 531 printk("\n" KERN_INFO "GPR%02d: ", i); 532 printk(REG " ", regs->gpr[i]); 533 if (i == LAST_VOLATILE && !FULL_REGS(regs)) 534 break; 535 } 536 printk("\n"); 537 #ifdef CONFIG_KALLSYMS 538 /* 539 * Lookup NIP late so we have the best change of getting the 540 * above info out without failing 541 */ 542 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); 543 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); 544 #endif 545 show_stack(current, (unsigned long *) regs->gpr[1]); 546 if (!user_mode(regs)) 547 show_instructions(regs); 548 } 549 550 void exit_thread(void) 551 { 552 discard_lazy_cpu_state(); 553 } 554 555 void flush_thread(void) 556 { 557 #ifdef CONFIG_PPC64 558 struct thread_info *t = current_thread_info(); 559 560 if (test_ti_thread_flag(t, TIF_ABI_PENDING)) { 561 clear_ti_thread_flag(t, TIF_ABI_PENDING); 562 if (test_ti_thread_flag(t, TIF_32BIT)) 563 clear_ti_thread_flag(t, TIF_32BIT); 564 else 565 set_ti_thread_flag(t, TIF_32BIT); 566 } 567 #endif 568 569 discard_lazy_cpu_state(); 570 571 if (current->thread.dabr) { 572 current->thread.dabr = 0; 573 set_dabr(0); 574 575 #if defined(CONFIG_BOOKE) 576 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W); 577 #endif 578 } 579 } 580 581 void 582 release_thread(struct task_struct *t) 583 { 584 } 585 586 /* 587 * This gets called before we allocate a new thread and copy 588 * the current task into it. 589 */ 590 void prepare_to_copy(struct task_struct *tsk) 591 { 592 flush_fp_to_thread(current); 593 flush_altivec_to_thread(current); 594 flush_vsx_to_thread(current); 595 flush_spe_to_thread(current); 596 } 597 598 /* 599 * Copy a thread.. 600 */ 601 int copy_thread(unsigned long clone_flags, unsigned long usp, 602 unsigned long unused, struct task_struct *p, 603 struct pt_regs *regs) 604 { 605 struct pt_regs *childregs, *kregs; 606 extern void ret_from_fork(void); 607 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; 608 609 CHECK_FULL_REGS(regs); 610 /* Copy registers */ 611 sp -= sizeof(struct pt_regs); 612 childregs = (struct pt_regs *) sp; 613 *childregs = *regs; 614 if ((childregs->msr & MSR_PR) == 0) { 615 /* for kernel thread, set `current' and stackptr in new task */ 616 childregs->gpr[1] = sp + sizeof(struct pt_regs); 617 #ifdef CONFIG_PPC32 618 childregs->gpr[2] = (unsigned long) p; 619 #else 620 clear_tsk_thread_flag(p, TIF_32BIT); 621 #endif 622 p->thread.regs = NULL; /* no user register state */ 623 } else { 624 childregs->gpr[1] = usp; 625 p->thread.regs = childregs; 626 if (clone_flags & CLONE_SETTLS) { 627 #ifdef CONFIG_PPC64 628 if (!test_thread_flag(TIF_32BIT)) 629 childregs->gpr[13] = childregs->gpr[6]; 630 else 631 #endif 632 childregs->gpr[2] = childregs->gpr[6]; 633 } 634 } 635 childregs->gpr[3] = 0; /* Result from fork() */ 636 sp -= STACK_FRAME_OVERHEAD; 637 638 /* 639 * The way this works is that at some point in the future 640 * some task will call _switch to switch to the new task. 641 * That will pop off the stack frame created below and start 642 * the new task running at ret_from_fork. The new task will 643 * do some house keeping and then return from the fork or clone 644 * system call, using the stack frame created above. 645 */ 646 sp -= sizeof(struct pt_regs); 647 kregs = (struct pt_regs *) sp; 648 sp -= STACK_FRAME_OVERHEAD; 649 p->thread.ksp = sp; 650 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + 651 _ALIGN_UP(sizeof(struct thread_info), 16); 652 653 #ifdef CONFIG_PPC64 654 if (cpu_has_feature(CPU_FTR_SLB)) { 655 unsigned long sp_vsid; 656 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; 657 658 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) 659 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) 660 << SLB_VSID_SHIFT_1T; 661 else 662 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) 663 << SLB_VSID_SHIFT; 664 sp_vsid |= SLB_VSID_KERNEL | llp; 665 p->thread.ksp_vsid = sp_vsid; 666 } 667 668 /* 669 * The PPC64 ABI makes use of a TOC to contain function 670 * pointers. The function (ret_from_except) is actually a pointer 671 * to the TOC entry. The first entry is a pointer to the actual 672 * function. 673 */ 674 kregs->nip = *((unsigned long *)ret_from_fork); 675 #else 676 kregs->nip = (unsigned long)ret_from_fork; 677 #endif 678 679 return 0; 680 } 681 682 /* 683 * Set up a thread for executing a new program 684 */ 685 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) 686 { 687 #ifdef CONFIG_PPC64 688 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ 689 #endif 690 691 set_fs(USER_DS); 692 693 /* 694 * If we exec out of a kernel thread then thread.regs will not be 695 * set. Do it now. 696 */ 697 if (!current->thread.regs) { 698 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; 699 current->thread.regs = regs - 1; 700 } 701 702 memset(regs->gpr, 0, sizeof(regs->gpr)); 703 regs->ctr = 0; 704 regs->link = 0; 705 regs->xer = 0; 706 regs->ccr = 0; 707 regs->gpr[1] = sp; 708 709 /* 710 * We have just cleared all the nonvolatile GPRs, so make 711 * FULL_REGS(regs) return true. This is necessary to allow 712 * ptrace to examine the thread immediately after exec. 713 */ 714 regs->trap &= ~1UL; 715 716 #ifdef CONFIG_PPC32 717 regs->mq = 0; 718 regs->nip = start; 719 regs->msr = MSR_USER; 720 #else 721 if (!test_thread_flag(TIF_32BIT)) { 722 unsigned long entry, toc; 723 724 /* start is a relocated pointer to the function descriptor for 725 * the elf _start routine. The first entry in the function 726 * descriptor is the entry address of _start and the second 727 * entry is the TOC value we need to use. 728 */ 729 __get_user(entry, (unsigned long __user *)start); 730 __get_user(toc, (unsigned long __user *)start+1); 731 732 /* Check whether the e_entry function descriptor entries 733 * need to be relocated before we can use them. 734 */ 735 if (load_addr != 0) { 736 entry += load_addr; 737 toc += load_addr; 738 } 739 regs->nip = entry; 740 regs->gpr[2] = toc; 741 regs->msr = MSR_USER64; 742 } else { 743 regs->nip = start; 744 regs->gpr[2] = 0; 745 regs->msr = MSR_USER32; 746 } 747 #endif 748 749 discard_lazy_cpu_state(); 750 #ifdef CONFIG_VSX 751 current->thread.used_vsr = 0; 752 #endif 753 memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); 754 current->thread.fpscr.val = 0; 755 #ifdef CONFIG_ALTIVEC 756 memset(current->thread.vr, 0, sizeof(current->thread.vr)); 757 memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); 758 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */ 759 current->thread.vrsave = 0; 760 current->thread.used_vr = 0; 761 #endif /* CONFIG_ALTIVEC */ 762 #ifdef CONFIG_SPE 763 memset(current->thread.evr, 0, sizeof(current->thread.evr)); 764 current->thread.acc = 0; 765 current->thread.spefscr = 0; 766 current->thread.used_spe = 0; 767 #endif /* CONFIG_SPE */ 768 } 769 770 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ 771 | PR_FP_EXC_RES | PR_FP_EXC_INV) 772 773 int set_fpexc_mode(struct task_struct *tsk, unsigned int val) 774 { 775 struct pt_regs *regs = tsk->thread.regs; 776 777 /* This is a bit hairy. If we are an SPE enabled processor 778 * (have embedded fp) we store the IEEE exception enable flags in 779 * fpexc_mode. fpexc_mode is also used for setting FP exception 780 * mode (asyn, precise, disabled) for 'Classic' FP. */ 781 if (val & PR_FP_EXC_SW_ENABLE) { 782 #ifdef CONFIG_SPE 783 if (cpu_has_feature(CPU_FTR_SPE)) { 784 tsk->thread.fpexc_mode = val & 785 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); 786 return 0; 787 } else { 788 return -EINVAL; 789 } 790 #else 791 return -EINVAL; 792 #endif 793 } 794 795 /* on a CONFIG_SPE this does not hurt us. The bits that 796 * __pack_fe01 use do not overlap with bits used for 797 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits 798 * on CONFIG_SPE implementations are reserved so writing to 799 * them does not change anything */ 800 if (val > PR_FP_EXC_PRECISE) 801 return -EINVAL; 802 tsk->thread.fpexc_mode = __pack_fe01(val); 803 if (regs != NULL && (regs->msr & MSR_FP) != 0) 804 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) 805 | tsk->thread.fpexc_mode; 806 return 0; 807 } 808 809 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) 810 { 811 unsigned int val; 812 813 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) 814 #ifdef CONFIG_SPE 815 if (cpu_has_feature(CPU_FTR_SPE)) 816 val = tsk->thread.fpexc_mode; 817 else 818 return -EINVAL; 819 #else 820 return -EINVAL; 821 #endif 822 else 823 val = __unpack_fe01(tsk->thread.fpexc_mode); 824 return put_user(val, (unsigned int __user *) adr); 825 } 826 827 int set_endian(struct task_struct *tsk, unsigned int val) 828 { 829 struct pt_regs *regs = tsk->thread.regs; 830 831 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) || 832 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE))) 833 return -EINVAL; 834 835 if (regs == NULL) 836 return -EINVAL; 837 838 if (val == PR_ENDIAN_BIG) 839 regs->msr &= ~MSR_LE; 840 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) 841 regs->msr |= MSR_LE; 842 else 843 return -EINVAL; 844 845 return 0; 846 } 847 848 int get_endian(struct task_struct *tsk, unsigned long adr) 849 { 850 struct pt_regs *regs = tsk->thread.regs; 851 unsigned int val; 852 853 if (!cpu_has_feature(CPU_FTR_PPC_LE) && 854 !cpu_has_feature(CPU_FTR_REAL_LE)) 855 return -EINVAL; 856 857 if (regs == NULL) 858 return -EINVAL; 859 860 if (regs->msr & MSR_LE) { 861 if (cpu_has_feature(CPU_FTR_REAL_LE)) 862 val = PR_ENDIAN_LITTLE; 863 else 864 val = PR_ENDIAN_PPC_LITTLE; 865 } else 866 val = PR_ENDIAN_BIG; 867 868 return put_user(val, (unsigned int __user *)adr); 869 } 870 871 int set_unalign_ctl(struct task_struct *tsk, unsigned int val) 872 { 873 tsk->thread.align_ctl = val; 874 return 0; 875 } 876 877 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) 878 { 879 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); 880 } 881 882 #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff)) 883 884 int sys_clone(unsigned long clone_flags, unsigned long usp, 885 int __user *parent_tidp, void __user *child_threadptr, 886 int __user *child_tidp, int p6, 887 struct pt_regs *regs) 888 { 889 CHECK_FULL_REGS(regs); 890 if (usp == 0) 891 usp = regs->gpr[1]; /* stack pointer for child */ 892 #ifdef CONFIG_PPC64 893 if (test_thread_flag(TIF_32BIT)) { 894 parent_tidp = TRUNC_PTR(parent_tidp); 895 child_tidp = TRUNC_PTR(child_tidp); 896 } 897 #endif 898 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); 899 } 900 901 int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, 902 unsigned long p4, unsigned long p5, unsigned long p6, 903 struct pt_regs *regs) 904 { 905 CHECK_FULL_REGS(regs); 906 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); 907 } 908 909 int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, 910 unsigned long p4, unsigned long p5, unsigned long p6, 911 struct pt_regs *regs) 912 { 913 CHECK_FULL_REGS(regs); 914 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], 915 regs, 0, NULL, NULL); 916 } 917 918 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, 919 unsigned long a3, unsigned long a4, unsigned long a5, 920 struct pt_regs *regs) 921 { 922 int error; 923 char *filename; 924 925 filename = getname((char __user *) a0); 926 error = PTR_ERR(filename); 927 if (IS_ERR(filename)) 928 goto out; 929 flush_fp_to_thread(current); 930 flush_altivec_to_thread(current); 931 flush_spe_to_thread(current); 932 error = do_execve(filename, (char __user * __user *) a1, 933 (char __user * __user *) a2, regs); 934 putname(filename); 935 out: 936 return error; 937 } 938 939 #ifdef CONFIG_IRQSTACKS 940 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, 941 unsigned long nbytes) 942 { 943 unsigned long stack_page; 944 unsigned long cpu = task_cpu(p); 945 946 /* 947 * Avoid crashing if the stack has overflowed and corrupted 948 * task_cpu(p), which is in the thread_info struct. 949 */ 950 if (cpu < NR_CPUS && cpu_possible(cpu)) { 951 stack_page = (unsigned long) hardirq_ctx[cpu]; 952 if (sp >= stack_page + sizeof(struct thread_struct) 953 && sp <= stack_page + THREAD_SIZE - nbytes) 954 return 1; 955 956 stack_page = (unsigned long) softirq_ctx[cpu]; 957 if (sp >= stack_page + sizeof(struct thread_struct) 958 && sp <= stack_page + THREAD_SIZE - nbytes) 959 return 1; 960 } 961 return 0; 962 } 963 964 #else 965 #define valid_irq_stack(sp, p, nb) 0 966 #endif /* CONFIG_IRQSTACKS */ 967 968 int validate_sp(unsigned long sp, struct task_struct *p, 969 unsigned long nbytes) 970 { 971 unsigned long stack_page = (unsigned long)task_stack_page(p); 972 973 if (sp >= stack_page + sizeof(struct thread_struct) 974 && sp <= stack_page + THREAD_SIZE - nbytes) 975 return 1; 976 977 return valid_irq_stack(sp, p, nbytes); 978 } 979 980 EXPORT_SYMBOL(validate_sp); 981 982 unsigned long get_wchan(struct task_struct *p) 983 { 984 unsigned long ip, sp; 985 int count = 0; 986 987 if (!p || p == current || p->state == TASK_RUNNING) 988 return 0; 989 990 sp = p->thread.ksp; 991 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) 992 return 0; 993 994 do { 995 sp = *(unsigned long *)sp; 996 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) 997 return 0; 998 if (count > 0) { 999 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; 1000 if (!in_sched_functions(ip)) 1001 return ip; 1002 } 1003 } while (count++ < 16); 1004 return 0; 1005 } 1006 1007 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; 1008 1009 void show_stack(struct task_struct *tsk, unsigned long *stack) 1010 { 1011 unsigned long sp, ip, lr, newsp; 1012 int count = 0; 1013 int firstframe = 1; 1014 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1015 int curr_frame = current->curr_ret_stack; 1016 extern void return_to_handler(void); 1017 unsigned long addr = (unsigned long)return_to_handler; 1018 #ifdef CONFIG_PPC64 1019 addr = *(unsigned long*)addr; 1020 #endif 1021 #endif 1022 1023 sp = (unsigned long) stack; 1024 if (tsk == NULL) 1025 tsk = current; 1026 if (sp == 0) { 1027 if (tsk == current) 1028 asm("mr %0,1" : "=r" (sp)); 1029 else 1030 sp = tsk->thread.ksp; 1031 } 1032 1033 lr = 0; 1034 printk("Call Trace:\n"); 1035 do { 1036 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) 1037 return; 1038 1039 stack = (unsigned long *) sp; 1040 newsp = stack[0]; 1041 ip = stack[STACK_FRAME_LR_SAVE]; 1042 if (!firstframe || ip != lr) { 1043 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); 1044 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1045 if (ip == addr && curr_frame >= 0) { 1046 printk(" (%pS)", 1047 (void *)current->ret_stack[curr_frame].ret); 1048 curr_frame--; 1049 } 1050 #endif 1051 if (firstframe) 1052 printk(" (unreliable)"); 1053 printk("\n"); 1054 } 1055 firstframe = 0; 1056 1057 /* 1058 * See if this is an exception frame. 1059 * We look for the "regshere" marker in the current frame. 1060 */ 1061 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) 1062 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { 1063 struct pt_regs *regs = (struct pt_regs *) 1064 (sp + STACK_FRAME_OVERHEAD); 1065 lr = regs->link; 1066 printk("--- Exception: %lx at %pS\n LR = %pS\n", 1067 regs->trap, (void *)regs->nip, (void *)lr); 1068 firstframe = 1; 1069 } 1070 1071 sp = newsp; 1072 } while (count++ < kstack_depth_to_print); 1073 } 1074 1075 void dump_stack(void) 1076 { 1077 show_stack(current, NULL); 1078 } 1079 EXPORT_SYMBOL(dump_stack); 1080 1081 #ifdef CONFIG_PPC64 1082 void ppc64_runlatch_on(void) 1083 { 1084 unsigned long ctrl; 1085 1086 if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) { 1087 HMT_medium(); 1088 1089 ctrl = mfspr(SPRN_CTRLF); 1090 ctrl |= CTRL_RUNLATCH; 1091 mtspr(SPRN_CTRLT, ctrl); 1092 1093 set_thread_flag(TIF_RUNLATCH); 1094 } 1095 } 1096 1097 void ppc64_runlatch_off(void) 1098 { 1099 unsigned long ctrl; 1100 1101 if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) { 1102 HMT_medium(); 1103 1104 clear_thread_flag(TIF_RUNLATCH); 1105 1106 ctrl = mfspr(SPRN_CTRLF); 1107 ctrl &= ~CTRL_RUNLATCH; 1108 mtspr(SPRN_CTRLT, ctrl); 1109 } 1110 } 1111 #endif 1112 1113 #if THREAD_SHIFT < PAGE_SHIFT 1114 1115 static struct kmem_cache *thread_info_cache; 1116 1117 struct thread_info *alloc_thread_info(struct task_struct *tsk) 1118 { 1119 struct thread_info *ti; 1120 1121 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); 1122 if (unlikely(ti == NULL)) 1123 return NULL; 1124 #ifdef CONFIG_DEBUG_STACK_USAGE 1125 memset(ti, 0, THREAD_SIZE); 1126 #endif 1127 return ti; 1128 } 1129 1130 void free_thread_info(struct thread_info *ti) 1131 { 1132 kmem_cache_free(thread_info_cache, ti); 1133 } 1134 1135 void thread_info_cache_init(void) 1136 { 1137 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, 1138 THREAD_SIZE, 0, NULL); 1139 BUG_ON(thread_info_cache == NULL); 1140 } 1141 1142 #endif /* THREAD_SHIFT < PAGE_SHIFT */ 1143 1144 unsigned long arch_align_stack(unsigned long sp) 1145 { 1146 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 1147 sp -= get_random_int() & ~PAGE_MASK; 1148 return sp & ~0xf; 1149 } 1150 1151 static inline unsigned long brk_rnd(void) 1152 { 1153 unsigned long rnd = 0; 1154 1155 /* 8MB for 32bit, 1GB for 64bit */ 1156 if (is_32bit_task()) 1157 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); 1158 else 1159 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); 1160 1161 return rnd << PAGE_SHIFT; 1162 } 1163 1164 unsigned long arch_randomize_brk(struct mm_struct *mm) 1165 { 1166 unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); 1167 1168 if (ret < mm->brk) 1169 return mm->brk; 1170 1171 return ret; 1172 } 1173 1174 unsigned long randomize_et_dyn(unsigned long base) 1175 { 1176 unsigned long ret = PAGE_ALIGN(base + brk_rnd()); 1177 1178 if (ret < base) 1179 return base; 1180 1181 return ret; 1182 } 1183