1 /* 2 * Derived from "arch/i386/kernel/process.c" 3 * Copyright (C) 1995 Linus Torvalds 4 * 5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and 6 * Paul Mackerras (paulus@cs.anu.edu.au) 7 * 8 * PowerPC version 9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 17 #include <linux/errno.h> 18 #include <linux/sched.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/smp.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/slab.h> 26 #include <linux/user.h> 27 #include <linux/elf.h> 28 #include <linux/prctl.h> 29 #include <linux/init_task.h> 30 #include <linux/export.h> 31 #include <linux/kallsyms.h> 32 #include <linux/mqueue.h> 33 #include <linux/hardirq.h> 34 #include <linux/utsname.h> 35 #include <linux/ftrace.h> 36 #include <linux/kernel_stat.h> 37 #include <linux/personality.h> 38 #include <linux/random.h> 39 #include <linux/hw_breakpoint.h> 40 #include <linux/uaccess.h> 41 42 #include <asm/pgtable.h> 43 #include <asm/io.h> 44 #include <asm/processor.h> 45 #include <asm/mmu.h> 46 #include <asm/prom.h> 47 #include <asm/machdep.h> 48 #include <asm/time.h> 49 #include <asm/runlatch.h> 50 #include <asm/syscalls.h> 51 #include <asm/switch_to.h> 52 #include <asm/tm.h> 53 #include <asm/debug.h> 54 #ifdef CONFIG_PPC64 55 #include <asm/firmware.h> 56 #endif 57 #include <asm/code-patching.h> 58 #include <linux/kprobes.h> 59 #include <linux/kdebug.h> 60 61 /* Transactional Memory debug */ 62 #ifdef TM_DEBUG_SW 63 #define TM_DEBUG(x...) printk(KERN_INFO x) 64 #else 65 #define TM_DEBUG(x...) do { } while(0) 66 #endif 67 68 extern unsigned long _get_SP(void); 69 70 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 71 static void check_if_tm_restore_required(struct task_struct *tsk) 72 { 73 /* 74 * If we are saving the current thread's registers, and the 75 * thread is in a transactional state, set the TIF_RESTORE_TM 76 * bit so that we know to restore the registers before 77 * returning to userspace. 78 */ 79 if (tsk == current && tsk->thread.regs && 80 MSR_TM_ACTIVE(tsk->thread.regs->msr) && 81 !test_thread_flag(TIF_RESTORE_TM)) { 82 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; 83 set_thread_flag(TIF_RESTORE_TM); 84 } 85 } 86 #else 87 static inline void check_if_tm_restore_required(struct task_struct *tsk) { } 88 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 89 90 bool strict_msr_control; 91 EXPORT_SYMBOL(strict_msr_control); 92 93 static int __init enable_strict_msr_control(char *str) 94 { 95 strict_msr_control = true; 96 pr_info("Enabling strict facility control\n"); 97 98 return 0; 99 } 100 early_param("ppc_strict_facility_enable", enable_strict_msr_control); 101 102 void msr_check_and_set(unsigned long bits) 103 { 104 unsigned long oldmsr = mfmsr(); 105 unsigned long newmsr; 106 107 newmsr = oldmsr | bits; 108 109 #ifdef CONFIG_VSX 110 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) 111 newmsr |= MSR_VSX; 112 #endif 113 114 if (oldmsr != newmsr) 115 mtmsr_isync(newmsr); 116 } 117 118 void __msr_check_and_clear(unsigned long bits) 119 { 120 unsigned long oldmsr = mfmsr(); 121 unsigned long newmsr; 122 123 newmsr = oldmsr & ~bits; 124 125 #ifdef CONFIG_VSX 126 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) 127 newmsr &= ~MSR_VSX; 128 #endif 129 130 if (oldmsr != newmsr) 131 mtmsr_isync(newmsr); 132 } 133 EXPORT_SYMBOL(__msr_check_and_clear); 134 135 #ifdef CONFIG_PPC_FPU 136 void __giveup_fpu(struct task_struct *tsk) 137 { 138 save_fpu(tsk); 139 tsk->thread.regs->msr &= ~MSR_FP; 140 #ifdef CONFIG_VSX 141 if (cpu_has_feature(CPU_FTR_VSX)) 142 tsk->thread.regs->msr &= ~MSR_VSX; 143 #endif 144 } 145 146 void giveup_fpu(struct task_struct *tsk) 147 { 148 check_if_tm_restore_required(tsk); 149 150 msr_check_and_set(MSR_FP); 151 __giveup_fpu(tsk); 152 msr_check_and_clear(MSR_FP); 153 } 154 EXPORT_SYMBOL(giveup_fpu); 155 156 /* 157 * Make sure the floating-point register state in the 158 * the thread_struct is up to date for task tsk. 159 */ 160 void flush_fp_to_thread(struct task_struct *tsk) 161 { 162 if (tsk->thread.regs) { 163 /* 164 * We need to disable preemption here because if we didn't, 165 * another process could get scheduled after the regs->msr 166 * test but before we have finished saving the FP registers 167 * to the thread_struct. That process could take over the 168 * FPU, and then when we get scheduled again we would store 169 * bogus values for the remaining FP registers. 170 */ 171 preempt_disable(); 172 if (tsk->thread.regs->msr & MSR_FP) { 173 /* 174 * This should only ever be called for current or 175 * for a stopped child process. Since we save away 176 * the FP register state on context switch, 177 * there is something wrong if a stopped child appears 178 * to still have its FP state in the CPU registers. 179 */ 180 BUG_ON(tsk != current); 181 giveup_fpu(tsk); 182 } 183 preempt_enable(); 184 } 185 } 186 EXPORT_SYMBOL_GPL(flush_fp_to_thread); 187 188 void enable_kernel_fp(void) 189 { 190 WARN_ON(preemptible()); 191 192 msr_check_and_set(MSR_FP); 193 194 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { 195 check_if_tm_restore_required(current); 196 __giveup_fpu(current); 197 } 198 } 199 EXPORT_SYMBOL(enable_kernel_fp); 200 201 static int restore_fp(struct task_struct *tsk) { 202 if (tsk->thread.load_fp) { 203 load_fp_state(¤t->thread.fp_state); 204 current->thread.load_fp++; 205 return 1; 206 } 207 return 0; 208 } 209 #else 210 static int restore_fp(struct task_struct *tsk) { return 0; } 211 #endif /* CONFIG_PPC_FPU */ 212 213 #ifdef CONFIG_ALTIVEC 214 #define loadvec(thr) ((thr).load_vec) 215 216 static void __giveup_altivec(struct task_struct *tsk) 217 { 218 save_altivec(tsk); 219 tsk->thread.regs->msr &= ~MSR_VEC; 220 #ifdef CONFIG_VSX 221 if (cpu_has_feature(CPU_FTR_VSX)) 222 tsk->thread.regs->msr &= ~MSR_VSX; 223 #endif 224 } 225 226 void giveup_altivec(struct task_struct *tsk) 227 { 228 check_if_tm_restore_required(tsk); 229 230 msr_check_and_set(MSR_VEC); 231 __giveup_altivec(tsk); 232 msr_check_and_clear(MSR_VEC); 233 } 234 EXPORT_SYMBOL(giveup_altivec); 235 236 void enable_kernel_altivec(void) 237 { 238 WARN_ON(preemptible()); 239 240 msr_check_and_set(MSR_VEC); 241 242 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { 243 check_if_tm_restore_required(current); 244 __giveup_altivec(current); 245 } 246 } 247 EXPORT_SYMBOL(enable_kernel_altivec); 248 249 /* 250 * Make sure the VMX/Altivec register state in the 251 * the thread_struct is up to date for task tsk. 252 */ 253 void flush_altivec_to_thread(struct task_struct *tsk) 254 { 255 if (tsk->thread.regs) { 256 preempt_disable(); 257 if (tsk->thread.regs->msr & MSR_VEC) { 258 BUG_ON(tsk != current); 259 giveup_altivec(tsk); 260 } 261 preempt_enable(); 262 } 263 } 264 EXPORT_SYMBOL_GPL(flush_altivec_to_thread); 265 266 static int restore_altivec(struct task_struct *tsk) 267 { 268 if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) { 269 load_vr_state(&tsk->thread.vr_state); 270 tsk->thread.used_vr = 1; 271 tsk->thread.load_vec++; 272 273 return 1; 274 } 275 return 0; 276 } 277 #else 278 #define loadvec(thr) 0 279 static inline int restore_altivec(struct task_struct *tsk) { return 0; } 280 #endif /* CONFIG_ALTIVEC */ 281 282 #ifdef CONFIG_VSX 283 static void __giveup_vsx(struct task_struct *tsk) 284 { 285 if (tsk->thread.regs->msr & MSR_FP) 286 __giveup_fpu(tsk); 287 if (tsk->thread.regs->msr & MSR_VEC) 288 __giveup_altivec(tsk); 289 tsk->thread.regs->msr &= ~MSR_VSX; 290 } 291 292 static void giveup_vsx(struct task_struct *tsk) 293 { 294 check_if_tm_restore_required(tsk); 295 296 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX); 297 __giveup_vsx(tsk); 298 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); 299 } 300 301 static void save_vsx(struct task_struct *tsk) 302 { 303 if (tsk->thread.regs->msr & MSR_FP) 304 save_fpu(tsk); 305 if (tsk->thread.regs->msr & MSR_VEC) 306 save_altivec(tsk); 307 } 308 309 void enable_kernel_vsx(void) 310 { 311 WARN_ON(preemptible()); 312 313 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX); 314 315 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) { 316 check_if_tm_restore_required(current); 317 if (current->thread.regs->msr & MSR_FP) 318 __giveup_fpu(current); 319 if (current->thread.regs->msr & MSR_VEC) 320 __giveup_altivec(current); 321 __giveup_vsx(current); 322 } 323 } 324 EXPORT_SYMBOL(enable_kernel_vsx); 325 326 void flush_vsx_to_thread(struct task_struct *tsk) 327 { 328 if (tsk->thread.regs) { 329 preempt_disable(); 330 if (tsk->thread.regs->msr & MSR_VSX) { 331 BUG_ON(tsk != current); 332 giveup_vsx(tsk); 333 } 334 preempt_enable(); 335 } 336 } 337 EXPORT_SYMBOL_GPL(flush_vsx_to_thread); 338 339 static int restore_vsx(struct task_struct *tsk) 340 { 341 if (cpu_has_feature(CPU_FTR_VSX)) { 342 tsk->thread.used_vsr = 1; 343 return 1; 344 } 345 346 return 0; 347 } 348 #else 349 static inline int restore_vsx(struct task_struct *tsk) { return 0; } 350 static inline void save_vsx(struct task_struct *tsk) { } 351 #endif /* CONFIG_VSX */ 352 353 #ifdef CONFIG_SPE 354 void giveup_spe(struct task_struct *tsk) 355 { 356 check_if_tm_restore_required(tsk); 357 358 msr_check_and_set(MSR_SPE); 359 __giveup_spe(tsk); 360 msr_check_and_clear(MSR_SPE); 361 } 362 EXPORT_SYMBOL(giveup_spe); 363 364 void enable_kernel_spe(void) 365 { 366 WARN_ON(preemptible()); 367 368 msr_check_and_set(MSR_SPE); 369 370 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) { 371 check_if_tm_restore_required(current); 372 __giveup_spe(current); 373 } 374 } 375 EXPORT_SYMBOL(enable_kernel_spe); 376 377 void flush_spe_to_thread(struct task_struct *tsk) 378 { 379 if (tsk->thread.regs) { 380 preempt_disable(); 381 if (tsk->thread.regs->msr & MSR_SPE) { 382 BUG_ON(tsk != current); 383 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); 384 giveup_spe(tsk); 385 } 386 preempt_enable(); 387 } 388 } 389 #endif /* CONFIG_SPE */ 390 391 static unsigned long msr_all_available; 392 393 static int __init init_msr_all_available(void) 394 { 395 #ifdef CONFIG_PPC_FPU 396 msr_all_available |= MSR_FP; 397 #endif 398 #ifdef CONFIG_ALTIVEC 399 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 400 msr_all_available |= MSR_VEC; 401 #endif 402 #ifdef CONFIG_VSX 403 if (cpu_has_feature(CPU_FTR_VSX)) 404 msr_all_available |= MSR_VSX; 405 #endif 406 #ifdef CONFIG_SPE 407 if (cpu_has_feature(CPU_FTR_SPE)) 408 msr_all_available |= MSR_SPE; 409 #endif 410 411 return 0; 412 } 413 early_initcall(init_msr_all_available); 414 415 void giveup_all(struct task_struct *tsk) 416 { 417 unsigned long usermsr; 418 419 if (!tsk->thread.regs) 420 return; 421 422 usermsr = tsk->thread.regs->msr; 423 424 if ((usermsr & msr_all_available) == 0) 425 return; 426 427 msr_check_and_set(msr_all_available); 428 429 #ifdef CONFIG_PPC_FPU 430 if (usermsr & MSR_FP) 431 __giveup_fpu(tsk); 432 #endif 433 #ifdef CONFIG_ALTIVEC 434 if (usermsr & MSR_VEC) 435 __giveup_altivec(tsk); 436 #endif 437 #ifdef CONFIG_VSX 438 if (usermsr & MSR_VSX) 439 __giveup_vsx(tsk); 440 #endif 441 #ifdef CONFIG_SPE 442 if (usermsr & MSR_SPE) 443 __giveup_spe(tsk); 444 #endif 445 446 msr_check_and_clear(msr_all_available); 447 } 448 EXPORT_SYMBOL(giveup_all); 449 450 void restore_math(struct pt_regs *regs) 451 { 452 unsigned long msr; 453 454 if (!current->thread.load_fp && !loadvec(current->thread)) 455 return; 456 457 msr = regs->msr; 458 msr_check_and_set(msr_all_available); 459 460 /* 461 * Only reload if the bit is not set in the user MSR, the bit BEING set 462 * indicates that the registers are hot 463 */ 464 if ((!(msr & MSR_FP)) && restore_fp(current)) 465 msr |= MSR_FP | current->thread.fpexc_mode; 466 467 if ((!(msr & MSR_VEC)) && restore_altivec(current)) 468 msr |= MSR_VEC; 469 470 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) && 471 restore_vsx(current)) { 472 msr |= MSR_VSX; 473 } 474 475 msr_check_and_clear(msr_all_available); 476 477 regs->msr = msr; 478 } 479 480 void save_all(struct task_struct *tsk) 481 { 482 unsigned long usermsr; 483 484 if (!tsk->thread.regs) 485 return; 486 487 usermsr = tsk->thread.regs->msr; 488 489 if ((usermsr & msr_all_available) == 0) 490 return; 491 492 msr_check_and_set(msr_all_available); 493 494 /* 495 * Saving the way the register space is in hardware, save_vsx boils 496 * down to a save_fpu() and save_altivec() 497 */ 498 if (usermsr & MSR_VSX) { 499 save_vsx(tsk); 500 } else { 501 if (usermsr & MSR_FP) 502 save_fpu(tsk); 503 504 if (usermsr & MSR_VEC) 505 save_altivec(tsk); 506 } 507 508 if (usermsr & MSR_SPE) 509 __giveup_spe(tsk); 510 511 msr_check_and_clear(msr_all_available); 512 } 513 514 void flush_all_to_thread(struct task_struct *tsk) 515 { 516 if (tsk->thread.regs) { 517 preempt_disable(); 518 BUG_ON(tsk != current); 519 save_all(tsk); 520 521 #ifdef CONFIG_SPE 522 if (tsk->thread.regs->msr & MSR_SPE) 523 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); 524 #endif 525 526 preempt_enable(); 527 } 528 } 529 EXPORT_SYMBOL(flush_all_to_thread); 530 531 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 532 void do_send_trap(struct pt_regs *regs, unsigned long address, 533 unsigned long error_code, int signal_code, int breakpt) 534 { 535 siginfo_t info; 536 537 current->thread.trap_nr = signal_code; 538 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 539 11, SIGSEGV) == NOTIFY_STOP) 540 return; 541 542 /* Deliver the signal to userspace */ 543 info.si_signo = SIGTRAP; 544 info.si_errno = breakpt; /* breakpoint or watchpoint id */ 545 info.si_code = signal_code; 546 info.si_addr = (void __user *)address; 547 force_sig_info(SIGTRAP, &info, current); 548 } 549 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 550 void do_break (struct pt_regs *regs, unsigned long address, 551 unsigned long error_code) 552 { 553 siginfo_t info; 554 555 current->thread.trap_nr = TRAP_HWBKPT; 556 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 557 11, SIGSEGV) == NOTIFY_STOP) 558 return; 559 560 if (debugger_break_match(regs)) 561 return; 562 563 /* Clear the breakpoint */ 564 hw_breakpoint_disable(); 565 566 /* Deliver the signal to userspace */ 567 info.si_signo = SIGTRAP; 568 info.si_errno = 0; 569 info.si_code = TRAP_HWBKPT; 570 info.si_addr = (void __user *)address; 571 force_sig_info(SIGTRAP, &info, current); 572 } 573 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 574 575 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); 576 577 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 578 /* 579 * Set the debug registers back to their default "safe" values. 580 */ 581 static void set_debug_reg_defaults(struct thread_struct *thread) 582 { 583 thread->debug.iac1 = thread->debug.iac2 = 0; 584 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 585 thread->debug.iac3 = thread->debug.iac4 = 0; 586 #endif 587 thread->debug.dac1 = thread->debug.dac2 = 0; 588 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 589 thread->debug.dvc1 = thread->debug.dvc2 = 0; 590 #endif 591 thread->debug.dbcr0 = 0; 592 #ifdef CONFIG_BOOKE 593 /* 594 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) 595 */ 596 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | 597 DBCR1_IAC3US | DBCR1_IAC4US; 598 /* 599 * Force Data Address Compare User/Supervisor bits to be User-only 600 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. 601 */ 602 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; 603 #else 604 thread->debug.dbcr1 = 0; 605 #endif 606 } 607 608 static void prime_debug_regs(struct debug_reg *debug) 609 { 610 /* 611 * We could have inherited MSR_DE from userspace, since 612 * it doesn't get cleared on exception entry. Make sure 613 * MSR_DE is clear before we enable any debug events. 614 */ 615 mtmsr(mfmsr() & ~MSR_DE); 616 617 mtspr(SPRN_IAC1, debug->iac1); 618 mtspr(SPRN_IAC2, debug->iac2); 619 #if CONFIG_PPC_ADV_DEBUG_IACS > 2 620 mtspr(SPRN_IAC3, debug->iac3); 621 mtspr(SPRN_IAC4, debug->iac4); 622 #endif 623 mtspr(SPRN_DAC1, debug->dac1); 624 mtspr(SPRN_DAC2, debug->dac2); 625 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 626 mtspr(SPRN_DVC1, debug->dvc1); 627 mtspr(SPRN_DVC2, debug->dvc2); 628 #endif 629 mtspr(SPRN_DBCR0, debug->dbcr0); 630 mtspr(SPRN_DBCR1, debug->dbcr1); 631 #ifdef CONFIG_BOOKE 632 mtspr(SPRN_DBCR2, debug->dbcr2); 633 #endif 634 } 635 /* 636 * Unless neither the old or new thread are making use of the 637 * debug registers, set the debug registers from the values 638 * stored in the new thread. 639 */ 640 void switch_booke_debug_regs(struct debug_reg *new_debug) 641 { 642 if ((current->thread.debug.dbcr0 & DBCR0_IDM) 643 || (new_debug->dbcr0 & DBCR0_IDM)) 644 prime_debug_regs(new_debug); 645 } 646 EXPORT_SYMBOL_GPL(switch_booke_debug_regs); 647 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 648 #ifndef CONFIG_HAVE_HW_BREAKPOINT 649 static void set_debug_reg_defaults(struct thread_struct *thread) 650 { 651 thread->hw_brk.address = 0; 652 thread->hw_brk.type = 0; 653 set_breakpoint(&thread->hw_brk); 654 } 655 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ 656 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 657 658 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 659 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) 660 { 661 mtspr(SPRN_DAC1, dabr); 662 #ifdef CONFIG_PPC_47x 663 isync(); 664 #endif 665 return 0; 666 } 667 #elif defined(CONFIG_PPC_BOOK3S) 668 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) 669 { 670 mtspr(SPRN_DABR, dabr); 671 if (cpu_has_feature(CPU_FTR_DABRX)) 672 mtspr(SPRN_DABRX, dabrx); 673 return 0; 674 } 675 #else 676 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) 677 { 678 return -EINVAL; 679 } 680 #endif 681 682 static inline int set_dabr(struct arch_hw_breakpoint *brk) 683 { 684 unsigned long dabr, dabrx; 685 686 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR); 687 dabrx = ((brk->type >> 3) & 0x7); 688 689 if (ppc_md.set_dabr) 690 return ppc_md.set_dabr(dabr, dabrx); 691 692 return __set_dabr(dabr, dabrx); 693 } 694 695 static inline int set_dawr(struct arch_hw_breakpoint *brk) 696 { 697 unsigned long dawr, dawrx, mrd; 698 699 dawr = brk->address; 700 701 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \ 702 << (63 - 58); //* read/write bits */ 703 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \ 704 << (63 - 59); //* translate */ 705 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \ 706 >> 3; //* PRIM bits */ 707 /* dawr length is stored in field MDR bits 48:53. Matches range in 708 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and 709 0b111111=64DW. 710 brk->len is in bytes. 711 This aligns up to double word size, shifts and does the bias. 712 */ 713 mrd = ((brk->len + 7) >> 3) - 1; 714 dawrx |= (mrd & 0x3f) << (63 - 53); 715 716 if (ppc_md.set_dawr) 717 return ppc_md.set_dawr(dawr, dawrx); 718 mtspr(SPRN_DAWR, dawr); 719 mtspr(SPRN_DAWRX, dawrx); 720 return 0; 721 } 722 723 void __set_breakpoint(struct arch_hw_breakpoint *brk) 724 { 725 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk)); 726 727 if (cpu_has_feature(CPU_FTR_DAWR)) 728 set_dawr(brk); 729 else 730 set_dabr(brk); 731 } 732 733 void set_breakpoint(struct arch_hw_breakpoint *brk) 734 { 735 preempt_disable(); 736 __set_breakpoint(brk); 737 preempt_enable(); 738 } 739 740 #ifdef CONFIG_PPC64 741 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); 742 #endif 743 744 static inline bool hw_brk_match(struct arch_hw_breakpoint *a, 745 struct arch_hw_breakpoint *b) 746 { 747 if (a->address != b->address) 748 return false; 749 if (a->type != b->type) 750 return false; 751 if (a->len != b->len) 752 return false; 753 return true; 754 } 755 756 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 757 static void tm_reclaim_thread(struct thread_struct *thr, 758 struct thread_info *ti, uint8_t cause) 759 { 760 unsigned long msr_diff = 0; 761 762 /* 763 * If FP/VSX registers have been already saved to the 764 * thread_struct, move them to the transact_fp array. 765 * We clear the TIF_RESTORE_TM bit since after the reclaim 766 * the thread will no longer be transactional. 767 */ 768 if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) { 769 msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr; 770 if (msr_diff & MSR_FP) 771 memcpy(&thr->transact_fp, &thr->fp_state, 772 sizeof(struct thread_fp_state)); 773 if (msr_diff & MSR_VEC) 774 memcpy(&thr->transact_vr, &thr->vr_state, 775 sizeof(struct thread_vr_state)); 776 clear_ti_thread_flag(ti, TIF_RESTORE_TM); 777 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; 778 } 779 780 /* 781 * Use the current MSR TM suspended bit to track if we have 782 * checkpointed state outstanding. 783 * On signal delivery, we'd normally reclaim the checkpointed 784 * state to obtain stack pointer (see:get_tm_stackpointer()). 785 * This will then directly return to userspace without going 786 * through __switch_to(). However, if the stack frame is bad, 787 * we need to exit this thread which calls __switch_to() which 788 * will again attempt to reclaim the already saved tm state. 789 * Hence we need to check that we've not already reclaimed 790 * this state. 791 * We do this using the current MSR, rather tracking it in 792 * some specific thread_struct bit, as it has the additional 793 * benifit of checking for a potential TM bad thing exception. 794 */ 795 if (!MSR_TM_SUSPENDED(mfmsr())) 796 return; 797 798 tm_reclaim(thr, thr->regs->msr, cause); 799 800 /* Having done the reclaim, we now have the checkpointed 801 * FP/VSX values in the registers. These might be valid 802 * even if we have previously called enable_kernel_fp() or 803 * flush_fp_to_thread(), so update thr->regs->msr to 804 * indicate their current validity. 805 */ 806 thr->regs->msr |= msr_diff; 807 } 808 809 void tm_reclaim_current(uint8_t cause) 810 { 811 tm_enable(); 812 tm_reclaim_thread(¤t->thread, current_thread_info(), cause); 813 } 814 815 static inline void tm_reclaim_task(struct task_struct *tsk) 816 { 817 /* We have to work out if we're switching from/to a task that's in the 818 * middle of a transaction. 819 * 820 * In switching we need to maintain a 2nd register state as 821 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the 822 * checkpointed (tbegin) state in ckpt_regs and saves the transactional 823 * (current) FPRs into oldtask->thread.transact_fpr[]. 824 * 825 * We also context switch (save) TFHAR/TEXASR/TFIAR in here. 826 */ 827 struct thread_struct *thr = &tsk->thread; 828 829 if (!thr->regs) 830 return; 831 832 if (!MSR_TM_ACTIVE(thr->regs->msr)) 833 goto out_and_saveregs; 834 835 /* Stash the original thread MSR, as giveup_fpu et al will 836 * modify it. We hold onto it to see whether the task used 837 * FP & vector regs. If the TIF_RESTORE_TM flag is set, 838 * ckpt_regs.msr is already set. 839 */ 840 if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM)) 841 thr->ckpt_regs.msr = thr->regs->msr; 842 843 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " 844 "ccr=%lx, msr=%lx, trap=%lx)\n", 845 tsk->pid, thr->regs->nip, 846 thr->regs->ccr, thr->regs->msr, 847 thr->regs->trap); 848 849 tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED); 850 851 TM_DEBUG("--- tm_reclaim on pid %d complete\n", 852 tsk->pid); 853 854 out_and_saveregs: 855 /* Always save the regs here, even if a transaction's not active. 856 * This context-switches a thread's TM info SPRs. We do it here to 857 * be consistent with the restore path (in recheckpoint) which 858 * cannot happen later in _switch(). 859 */ 860 tm_save_sprs(thr); 861 } 862 863 extern void __tm_recheckpoint(struct thread_struct *thread, 864 unsigned long orig_msr); 865 866 void tm_recheckpoint(struct thread_struct *thread, 867 unsigned long orig_msr) 868 { 869 unsigned long flags; 870 871 /* We really can't be interrupted here as the TEXASR registers can't 872 * change and later in the trecheckpoint code, we have a userspace R1. 873 * So let's hard disable over this region. 874 */ 875 local_irq_save(flags); 876 hard_irq_disable(); 877 878 /* The TM SPRs are restored here, so that TEXASR.FS can be set 879 * before the trecheckpoint and no explosion occurs. 880 */ 881 tm_restore_sprs(thread); 882 883 __tm_recheckpoint(thread, orig_msr); 884 885 local_irq_restore(flags); 886 } 887 888 static inline void tm_recheckpoint_new_task(struct task_struct *new) 889 { 890 unsigned long msr; 891 892 if (!cpu_has_feature(CPU_FTR_TM)) 893 return; 894 895 /* Recheckpoint the registers of the thread we're about to switch to. 896 * 897 * If the task was using FP, we non-lazily reload both the original and 898 * the speculative FP register states. This is because the kernel 899 * doesn't see if/when a TM rollback occurs, so if we take an FP 900 * unavoidable later, we are unable to determine which set of FP regs 901 * need to be restored. 902 */ 903 if (!new->thread.regs) 904 return; 905 906 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){ 907 tm_restore_sprs(&new->thread); 908 return; 909 } 910 msr = new->thread.ckpt_regs.msr; 911 /* Recheckpoint to restore original checkpointed register state. */ 912 TM_DEBUG("*** tm_recheckpoint of pid %d " 913 "(new->msr 0x%lx, new->origmsr 0x%lx)\n", 914 new->pid, new->thread.regs->msr, msr); 915 916 /* This loads the checkpointed FP/VEC state, if used */ 917 tm_recheckpoint(&new->thread, msr); 918 919 /* This loads the speculative FP/VEC state, if used */ 920 if (msr & MSR_FP) { 921 do_load_up_transact_fpu(&new->thread); 922 new->thread.regs->msr |= 923 (MSR_FP | new->thread.fpexc_mode); 924 } 925 #ifdef CONFIG_ALTIVEC 926 if (msr & MSR_VEC) { 927 do_load_up_transact_altivec(&new->thread); 928 new->thread.regs->msr |= MSR_VEC; 929 } 930 #endif 931 /* We may as well turn on VSX too since all the state is restored now */ 932 if (msr & MSR_VSX) 933 new->thread.regs->msr |= MSR_VSX; 934 935 TM_DEBUG("*** tm_recheckpoint of pid %d complete " 936 "(kernel msr 0x%lx)\n", 937 new->pid, mfmsr()); 938 } 939 940 static inline void __switch_to_tm(struct task_struct *prev) 941 { 942 if (cpu_has_feature(CPU_FTR_TM)) { 943 tm_enable(); 944 tm_reclaim_task(prev); 945 } 946 } 947 948 /* 949 * This is called if we are on the way out to userspace and the 950 * TIF_RESTORE_TM flag is set. It checks if we need to reload 951 * FP and/or vector state and does so if necessary. 952 * If userspace is inside a transaction (whether active or 953 * suspended) and FP/VMX/VSX instructions have ever been enabled 954 * inside that transaction, then we have to keep them enabled 955 * and keep the FP/VMX/VSX state loaded while ever the transaction 956 * continues. The reason is that if we didn't, and subsequently 957 * got a FP/VMX/VSX unavailable interrupt inside a transaction, 958 * we don't know whether it's the same transaction, and thus we 959 * don't know which of the checkpointed state and the transactional 960 * state to use. 961 */ 962 void restore_tm_state(struct pt_regs *regs) 963 { 964 unsigned long msr_diff; 965 966 clear_thread_flag(TIF_RESTORE_TM); 967 if (!MSR_TM_ACTIVE(regs->msr)) 968 return; 969 970 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; 971 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; 972 973 restore_math(regs); 974 975 regs->msr |= msr_diff; 976 } 977 978 #else 979 #define tm_recheckpoint_new_task(new) 980 #define __switch_to_tm(prev) 981 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 982 983 static inline void save_sprs(struct thread_struct *t) 984 { 985 #ifdef CONFIG_ALTIVEC 986 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 987 t->vrsave = mfspr(SPRN_VRSAVE); 988 #endif 989 #ifdef CONFIG_PPC_BOOK3S_64 990 if (cpu_has_feature(CPU_FTR_DSCR)) 991 t->dscr = mfspr(SPRN_DSCR); 992 993 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 994 t->bescr = mfspr(SPRN_BESCR); 995 t->ebbhr = mfspr(SPRN_EBBHR); 996 t->ebbrr = mfspr(SPRN_EBBRR); 997 998 t->fscr = mfspr(SPRN_FSCR); 999 1000 /* 1001 * Note that the TAR is not available for use in the kernel. 1002 * (To provide this, the TAR should be backed up/restored on 1003 * exception entry/exit instead, and be in pt_regs. FIXME, 1004 * this should be in pt_regs anyway (for debug).) 1005 */ 1006 t->tar = mfspr(SPRN_TAR); 1007 } 1008 #endif 1009 } 1010 1011 static inline void restore_sprs(struct thread_struct *old_thread, 1012 struct thread_struct *new_thread) 1013 { 1014 #ifdef CONFIG_ALTIVEC 1015 if (cpu_has_feature(CPU_FTR_ALTIVEC) && 1016 old_thread->vrsave != new_thread->vrsave) 1017 mtspr(SPRN_VRSAVE, new_thread->vrsave); 1018 #endif 1019 #ifdef CONFIG_PPC_BOOK3S_64 1020 if (cpu_has_feature(CPU_FTR_DSCR)) { 1021 u64 dscr = get_paca()->dscr_default; 1022 u64 fscr = old_thread->fscr & ~FSCR_DSCR; 1023 1024 if (new_thread->dscr_inherit) { 1025 dscr = new_thread->dscr; 1026 fscr |= FSCR_DSCR; 1027 } 1028 1029 if (old_thread->dscr != dscr) 1030 mtspr(SPRN_DSCR, dscr); 1031 1032 if (old_thread->fscr != fscr) 1033 mtspr(SPRN_FSCR, fscr); 1034 } 1035 1036 if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 1037 if (old_thread->bescr != new_thread->bescr) 1038 mtspr(SPRN_BESCR, new_thread->bescr); 1039 if (old_thread->ebbhr != new_thread->ebbhr) 1040 mtspr(SPRN_EBBHR, new_thread->ebbhr); 1041 if (old_thread->ebbrr != new_thread->ebbrr) 1042 mtspr(SPRN_EBBRR, new_thread->ebbrr); 1043 1044 if (old_thread->tar != new_thread->tar) 1045 mtspr(SPRN_TAR, new_thread->tar); 1046 } 1047 #endif 1048 } 1049 1050 struct task_struct *__switch_to(struct task_struct *prev, 1051 struct task_struct *new) 1052 { 1053 struct thread_struct *new_thread, *old_thread; 1054 struct task_struct *last; 1055 #ifdef CONFIG_PPC_BOOK3S_64 1056 struct ppc64_tlb_batch *batch; 1057 #endif 1058 1059 new_thread = &new->thread; 1060 old_thread = ¤t->thread; 1061 1062 WARN_ON(!irqs_disabled()); 1063 1064 #ifdef CONFIG_PPC64 1065 /* 1066 * Collect processor utilization data per process 1067 */ 1068 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 1069 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); 1070 long unsigned start_tb, current_tb; 1071 start_tb = old_thread->start_tb; 1072 cu->current_tb = current_tb = mfspr(SPRN_PURR); 1073 old_thread->accum_tb += (current_tb - start_tb); 1074 new_thread->start_tb = current_tb; 1075 } 1076 #endif /* CONFIG_PPC64 */ 1077 1078 #ifdef CONFIG_PPC_BOOK3S_64 1079 batch = this_cpu_ptr(&ppc64_tlb_batch); 1080 if (batch->active) { 1081 current_thread_info()->local_flags |= _TLF_LAZY_MMU; 1082 if (batch->index) 1083 __flush_tlb_pending(batch); 1084 batch->active = 0; 1085 } 1086 #endif /* CONFIG_PPC_BOOK3S_64 */ 1087 1088 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1089 switch_booke_debug_regs(&new->thread.debug); 1090 #else 1091 /* 1092 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would 1093 * schedule DABR 1094 */ 1095 #ifndef CONFIG_HAVE_HW_BREAKPOINT 1096 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk))) 1097 __set_breakpoint(&new->thread.hw_brk); 1098 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1099 #endif 1100 1101 /* 1102 * We need to save SPRs before treclaim/trecheckpoint as these will 1103 * change a number of them. 1104 */ 1105 save_sprs(&prev->thread); 1106 1107 __switch_to_tm(prev); 1108 1109 /* Save FPU, Altivec, VSX and SPE state */ 1110 giveup_all(prev); 1111 1112 /* 1113 * We can't take a PMU exception inside _switch() since there is a 1114 * window where the kernel stack SLB and the kernel stack are out 1115 * of sync. Hard disable here. 1116 */ 1117 hard_irq_disable(); 1118 1119 tm_recheckpoint_new_task(new); 1120 1121 /* 1122 * Call restore_sprs() before calling _switch(). If we move it after 1123 * _switch() then we miss out on calling it for new tasks. The reason 1124 * for this is we manually create a stack frame for new tasks that 1125 * directly returns through ret_from_fork() or 1126 * ret_from_kernel_thread(). See copy_thread() for details. 1127 */ 1128 restore_sprs(old_thread, new_thread); 1129 1130 last = _switch(old_thread, new_thread); 1131 1132 #ifdef CONFIG_PPC_BOOK3S_64 1133 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { 1134 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; 1135 batch = this_cpu_ptr(&ppc64_tlb_batch); 1136 batch->active = 1; 1137 } 1138 1139 if (current_thread_info()->task->thread.regs) 1140 restore_math(current_thread_info()->task->thread.regs); 1141 1142 #endif /* CONFIG_PPC_BOOK3S_64 */ 1143 1144 return last; 1145 } 1146 1147 static int instructions_to_print = 16; 1148 1149 static void show_instructions(struct pt_regs *regs) 1150 { 1151 int i; 1152 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * 1153 sizeof(int)); 1154 1155 printk("Instruction dump:"); 1156 1157 for (i = 0; i < instructions_to_print; i++) { 1158 int instr; 1159 1160 if (!(i % 8)) 1161 printk("\n"); 1162 1163 #if !defined(CONFIG_BOOKE) 1164 /* If executing with the IMMU off, adjust pc rather 1165 * than print XXXXXXXX. 1166 */ 1167 if (!(regs->msr & MSR_IR)) 1168 pc = (unsigned long)phys_to_virt(pc); 1169 #endif 1170 1171 if (!__kernel_text_address(pc) || 1172 probe_kernel_address((unsigned int __user *)pc, instr)) { 1173 printk(KERN_CONT "XXXXXXXX "); 1174 } else { 1175 if (regs->nip == pc) 1176 printk(KERN_CONT "<%08x> ", instr); 1177 else 1178 printk(KERN_CONT "%08x ", instr); 1179 } 1180 1181 pc += sizeof(int); 1182 } 1183 1184 printk("\n"); 1185 } 1186 1187 struct regbit { 1188 unsigned long bit; 1189 const char *name; 1190 }; 1191 1192 static struct regbit msr_bits[] = { 1193 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) 1194 {MSR_SF, "SF"}, 1195 {MSR_HV, "HV"}, 1196 #endif 1197 {MSR_VEC, "VEC"}, 1198 {MSR_VSX, "VSX"}, 1199 #ifdef CONFIG_BOOKE 1200 {MSR_CE, "CE"}, 1201 #endif 1202 {MSR_EE, "EE"}, 1203 {MSR_PR, "PR"}, 1204 {MSR_FP, "FP"}, 1205 {MSR_ME, "ME"}, 1206 #ifdef CONFIG_BOOKE 1207 {MSR_DE, "DE"}, 1208 #else 1209 {MSR_SE, "SE"}, 1210 {MSR_BE, "BE"}, 1211 #endif 1212 {MSR_IR, "IR"}, 1213 {MSR_DR, "DR"}, 1214 {MSR_PMM, "PMM"}, 1215 #ifndef CONFIG_BOOKE 1216 {MSR_RI, "RI"}, 1217 {MSR_LE, "LE"}, 1218 #endif 1219 {0, NULL} 1220 }; 1221 1222 static void print_bits(unsigned long val, struct regbit *bits, const char *sep) 1223 { 1224 const char *s = ""; 1225 1226 for (; bits->bit; ++bits) 1227 if (val & bits->bit) { 1228 printk("%s%s", s, bits->name); 1229 s = sep; 1230 } 1231 } 1232 1233 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1234 static struct regbit msr_tm_bits[] = { 1235 {MSR_TS_T, "T"}, 1236 {MSR_TS_S, "S"}, 1237 {MSR_TM, "E"}, 1238 {0, NULL} 1239 }; 1240 1241 static void print_tm_bits(unsigned long val) 1242 { 1243 /* 1244 * This only prints something if at least one of the TM bit is set. 1245 * Inside the TM[], the output means: 1246 * E: Enabled (bit 32) 1247 * S: Suspended (bit 33) 1248 * T: Transactional (bit 34) 1249 */ 1250 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) { 1251 printk(",TM["); 1252 print_bits(val, msr_tm_bits, ""); 1253 printk("]"); 1254 } 1255 } 1256 #else 1257 static void print_tm_bits(unsigned long val) {} 1258 #endif 1259 1260 static void print_msr_bits(unsigned long val) 1261 { 1262 printk("<"); 1263 print_bits(val, msr_bits, ","); 1264 print_tm_bits(val); 1265 printk(">"); 1266 } 1267 1268 #ifdef CONFIG_PPC64 1269 #define REG "%016lx" 1270 #define REGS_PER_LINE 4 1271 #define LAST_VOLATILE 13 1272 #else 1273 #define REG "%08lx" 1274 #define REGS_PER_LINE 8 1275 #define LAST_VOLATILE 12 1276 #endif 1277 1278 void show_regs(struct pt_regs * regs) 1279 { 1280 int i, trap; 1281 1282 show_regs_print_info(KERN_DEFAULT); 1283 1284 printk("NIP: "REG" LR: "REG" CTR: "REG"\n", 1285 regs->nip, regs->link, regs->ctr); 1286 printk("REGS: %p TRAP: %04lx %s (%s)\n", 1287 regs, regs->trap, print_tainted(), init_utsname()->release); 1288 printk("MSR: "REG" ", regs->msr); 1289 print_msr_bits(regs->msr); 1290 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); 1291 trap = TRAP(regs); 1292 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) 1293 printk("CFAR: "REG" ", regs->orig_gpr3); 1294 if (trap == 0x200 || trap == 0x300 || trap == 0x600) 1295 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 1296 printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); 1297 #else 1298 printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); 1299 #endif 1300 #ifdef CONFIG_PPC64 1301 printk("SOFTE: %ld ", regs->softe); 1302 #endif 1303 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1304 if (MSR_TM_ACTIVE(regs->msr)) 1305 printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); 1306 #endif 1307 1308 for (i = 0; i < 32; i++) { 1309 if ((i % REGS_PER_LINE) == 0) 1310 printk("\nGPR%02d: ", i); 1311 printk(REG " ", regs->gpr[i]); 1312 if (i == LAST_VOLATILE && !FULL_REGS(regs)) 1313 break; 1314 } 1315 printk("\n"); 1316 #ifdef CONFIG_KALLSYMS 1317 /* 1318 * Lookup NIP late so we have the best change of getting the 1319 * above info out without failing 1320 */ 1321 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); 1322 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); 1323 #endif 1324 show_stack(current, (unsigned long *) regs->gpr[1]); 1325 if (!user_mode(regs)) 1326 show_instructions(regs); 1327 } 1328 1329 void exit_thread(void) 1330 { 1331 } 1332 1333 void flush_thread(void) 1334 { 1335 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1336 flush_ptrace_hw_breakpoint(current); 1337 #else /* CONFIG_HAVE_HW_BREAKPOINT */ 1338 set_debug_reg_defaults(¤t->thread); 1339 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1340 } 1341 1342 void 1343 release_thread(struct task_struct *t) 1344 { 1345 } 1346 1347 /* 1348 * this gets called so that we can store coprocessor state into memory and 1349 * copy the current task into the new thread. 1350 */ 1351 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 1352 { 1353 flush_all_to_thread(src); 1354 /* 1355 * Flush TM state out so we can copy it. __switch_to_tm() does this 1356 * flush but it removes the checkpointed state from the current CPU and 1357 * transitions the CPU out of TM mode. Hence we need to call 1358 * tm_recheckpoint_new_task() (on the same task) to restore the 1359 * checkpointed state back and the TM mode. 1360 */ 1361 __switch_to_tm(src); 1362 tm_recheckpoint_new_task(src); 1363 1364 *dst = *src; 1365 1366 clear_task_ebb(dst); 1367 1368 return 0; 1369 } 1370 1371 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp) 1372 { 1373 #ifdef CONFIG_PPC_STD_MMU_64 1374 unsigned long sp_vsid; 1375 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; 1376 1377 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) 1378 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) 1379 << SLB_VSID_SHIFT_1T; 1380 else 1381 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) 1382 << SLB_VSID_SHIFT; 1383 sp_vsid |= SLB_VSID_KERNEL | llp; 1384 p->thread.ksp_vsid = sp_vsid; 1385 #endif 1386 } 1387 1388 /* 1389 * Copy a thread.. 1390 */ 1391 1392 /* 1393 * Copy architecture-specific thread state 1394 */ 1395 int copy_thread(unsigned long clone_flags, unsigned long usp, 1396 unsigned long kthread_arg, struct task_struct *p) 1397 { 1398 struct pt_regs *childregs, *kregs; 1399 extern void ret_from_fork(void); 1400 extern void ret_from_kernel_thread(void); 1401 void (*f)(void); 1402 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; 1403 1404 /* Copy registers */ 1405 sp -= sizeof(struct pt_regs); 1406 childregs = (struct pt_regs *) sp; 1407 if (unlikely(p->flags & PF_KTHREAD)) { 1408 /* kernel thread */ 1409 struct thread_info *ti = (void *)task_stack_page(p); 1410 memset(childregs, 0, sizeof(struct pt_regs)); 1411 childregs->gpr[1] = sp + sizeof(struct pt_regs); 1412 /* function */ 1413 if (usp) 1414 childregs->gpr[14] = ppc_function_entry((void *)usp); 1415 #ifdef CONFIG_PPC64 1416 clear_tsk_thread_flag(p, TIF_32BIT); 1417 childregs->softe = 1; 1418 #endif 1419 childregs->gpr[15] = kthread_arg; 1420 p->thread.regs = NULL; /* no user register state */ 1421 ti->flags |= _TIF_RESTOREALL; 1422 f = ret_from_kernel_thread; 1423 } else { 1424 /* user thread */ 1425 struct pt_regs *regs = current_pt_regs(); 1426 CHECK_FULL_REGS(regs); 1427 *childregs = *regs; 1428 if (usp) 1429 childregs->gpr[1] = usp; 1430 p->thread.regs = childregs; 1431 childregs->gpr[3] = 0; /* Result from fork() */ 1432 if (clone_flags & CLONE_SETTLS) { 1433 #ifdef CONFIG_PPC64 1434 if (!is_32bit_task()) 1435 childregs->gpr[13] = childregs->gpr[6]; 1436 else 1437 #endif 1438 childregs->gpr[2] = childregs->gpr[6]; 1439 } 1440 1441 f = ret_from_fork; 1442 } 1443 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX); 1444 sp -= STACK_FRAME_OVERHEAD; 1445 1446 /* 1447 * The way this works is that at some point in the future 1448 * some task will call _switch to switch to the new task. 1449 * That will pop off the stack frame created below and start 1450 * the new task running at ret_from_fork. The new task will 1451 * do some house keeping and then return from the fork or clone 1452 * system call, using the stack frame created above. 1453 */ 1454 ((unsigned long *)sp)[0] = 0; 1455 sp -= sizeof(struct pt_regs); 1456 kregs = (struct pt_regs *) sp; 1457 sp -= STACK_FRAME_OVERHEAD; 1458 p->thread.ksp = sp; 1459 #ifdef CONFIG_PPC32 1460 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + 1461 _ALIGN_UP(sizeof(struct thread_info), 16); 1462 #endif 1463 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1464 p->thread.ptrace_bps[0] = NULL; 1465 #endif 1466 1467 p->thread.fp_save_area = NULL; 1468 #ifdef CONFIG_ALTIVEC 1469 p->thread.vr_save_area = NULL; 1470 #endif 1471 1472 setup_ksp_vsid(p, sp); 1473 1474 #ifdef CONFIG_PPC64 1475 if (cpu_has_feature(CPU_FTR_DSCR)) { 1476 p->thread.dscr_inherit = current->thread.dscr_inherit; 1477 p->thread.dscr = mfspr(SPRN_DSCR); 1478 } 1479 if (cpu_has_feature(CPU_FTR_HAS_PPR)) 1480 p->thread.ppr = INIT_PPR; 1481 #endif 1482 kregs->nip = ppc_function_entry(f); 1483 return 0; 1484 } 1485 1486 /* 1487 * Set up a thread for executing a new program 1488 */ 1489 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) 1490 { 1491 #ifdef CONFIG_PPC64 1492 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ 1493 #endif 1494 1495 /* 1496 * If we exec out of a kernel thread then thread.regs will not be 1497 * set. Do it now. 1498 */ 1499 if (!current->thread.regs) { 1500 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; 1501 current->thread.regs = regs - 1; 1502 } 1503 1504 memset(regs->gpr, 0, sizeof(regs->gpr)); 1505 regs->ctr = 0; 1506 regs->link = 0; 1507 regs->xer = 0; 1508 regs->ccr = 0; 1509 regs->gpr[1] = sp; 1510 1511 /* 1512 * We have just cleared all the nonvolatile GPRs, so make 1513 * FULL_REGS(regs) return true. This is necessary to allow 1514 * ptrace to examine the thread immediately after exec. 1515 */ 1516 regs->trap &= ~1UL; 1517 1518 #ifdef CONFIG_PPC32 1519 regs->mq = 0; 1520 regs->nip = start; 1521 regs->msr = MSR_USER; 1522 #else 1523 if (!is_32bit_task()) { 1524 unsigned long entry; 1525 1526 if (is_elf2_task()) { 1527 /* Look ma, no function descriptors! */ 1528 entry = start; 1529 1530 /* 1531 * Ulrich says: 1532 * The latest iteration of the ABI requires that when 1533 * calling a function (at its global entry point), 1534 * the caller must ensure r12 holds the entry point 1535 * address (so that the function can quickly 1536 * establish addressability). 1537 */ 1538 regs->gpr[12] = start; 1539 /* Make sure that's restored on entry to userspace. */ 1540 set_thread_flag(TIF_RESTOREALL); 1541 } else { 1542 unsigned long toc; 1543 1544 /* start is a relocated pointer to the function 1545 * descriptor for the elf _start routine. The first 1546 * entry in the function descriptor is the entry 1547 * address of _start and the second entry is the TOC 1548 * value we need to use. 1549 */ 1550 __get_user(entry, (unsigned long __user *)start); 1551 __get_user(toc, (unsigned long __user *)start+1); 1552 1553 /* Check whether the e_entry function descriptor entries 1554 * need to be relocated before we can use them. 1555 */ 1556 if (load_addr != 0) { 1557 entry += load_addr; 1558 toc += load_addr; 1559 } 1560 regs->gpr[2] = toc; 1561 } 1562 regs->nip = entry; 1563 regs->msr = MSR_USER64; 1564 } else { 1565 regs->nip = start; 1566 regs->gpr[2] = 0; 1567 regs->msr = MSR_USER32; 1568 } 1569 #endif 1570 #ifdef CONFIG_VSX 1571 current->thread.used_vsr = 0; 1572 #endif 1573 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state)); 1574 current->thread.fp_save_area = NULL; 1575 #ifdef CONFIG_ALTIVEC 1576 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state)); 1577 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ 1578 current->thread.vr_save_area = NULL; 1579 current->thread.vrsave = 0; 1580 current->thread.used_vr = 0; 1581 #endif /* CONFIG_ALTIVEC */ 1582 #ifdef CONFIG_SPE 1583 memset(current->thread.evr, 0, sizeof(current->thread.evr)); 1584 current->thread.acc = 0; 1585 current->thread.spefscr = 0; 1586 current->thread.used_spe = 0; 1587 #endif /* CONFIG_SPE */ 1588 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1589 if (cpu_has_feature(CPU_FTR_TM)) 1590 regs->msr |= MSR_TM; 1591 current->thread.tm_tfhar = 0; 1592 current->thread.tm_texasr = 0; 1593 current->thread.tm_tfiar = 0; 1594 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1595 } 1596 EXPORT_SYMBOL(start_thread); 1597 1598 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ 1599 | PR_FP_EXC_RES | PR_FP_EXC_INV) 1600 1601 int set_fpexc_mode(struct task_struct *tsk, unsigned int val) 1602 { 1603 struct pt_regs *regs = tsk->thread.regs; 1604 1605 /* This is a bit hairy. If we are an SPE enabled processor 1606 * (have embedded fp) we store the IEEE exception enable flags in 1607 * fpexc_mode. fpexc_mode is also used for setting FP exception 1608 * mode (asyn, precise, disabled) for 'Classic' FP. */ 1609 if (val & PR_FP_EXC_SW_ENABLE) { 1610 #ifdef CONFIG_SPE 1611 if (cpu_has_feature(CPU_FTR_SPE)) { 1612 /* 1613 * When the sticky exception bits are set 1614 * directly by userspace, it must call prctl 1615 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE 1616 * in the existing prctl settings) or 1617 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in 1618 * the bits being set). <fenv.h> functions 1619 * saving and restoring the whole 1620 * floating-point environment need to do so 1621 * anyway to restore the prctl settings from 1622 * the saved environment. 1623 */ 1624 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); 1625 tsk->thread.fpexc_mode = val & 1626 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); 1627 return 0; 1628 } else { 1629 return -EINVAL; 1630 } 1631 #else 1632 return -EINVAL; 1633 #endif 1634 } 1635 1636 /* on a CONFIG_SPE this does not hurt us. The bits that 1637 * __pack_fe01 use do not overlap with bits used for 1638 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits 1639 * on CONFIG_SPE implementations are reserved so writing to 1640 * them does not change anything */ 1641 if (val > PR_FP_EXC_PRECISE) 1642 return -EINVAL; 1643 tsk->thread.fpexc_mode = __pack_fe01(val); 1644 if (regs != NULL && (regs->msr & MSR_FP) != 0) 1645 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) 1646 | tsk->thread.fpexc_mode; 1647 return 0; 1648 } 1649 1650 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) 1651 { 1652 unsigned int val; 1653 1654 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) 1655 #ifdef CONFIG_SPE 1656 if (cpu_has_feature(CPU_FTR_SPE)) { 1657 /* 1658 * When the sticky exception bits are set 1659 * directly by userspace, it must call prctl 1660 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE 1661 * in the existing prctl settings) or 1662 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in 1663 * the bits being set). <fenv.h> functions 1664 * saving and restoring the whole 1665 * floating-point environment need to do so 1666 * anyway to restore the prctl settings from 1667 * the saved environment. 1668 */ 1669 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); 1670 val = tsk->thread.fpexc_mode; 1671 } else 1672 return -EINVAL; 1673 #else 1674 return -EINVAL; 1675 #endif 1676 else 1677 val = __unpack_fe01(tsk->thread.fpexc_mode); 1678 return put_user(val, (unsigned int __user *) adr); 1679 } 1680 1681 int set_endian(struct task_struct *tsk, unsigned int val) 1682 { 1683 struct pt_regs *regs = tsk->thread.regs; 1684 1685 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) || 1686 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE))) 1687 return -EINVAL; 1688 1689 if (regs == NULL) 1690 return -EINVAL; 1691 1692 if (val == PR_ENDIAN_BIG) 1693 regs->msr &= ~MSR_LE; 1694 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) 1695 regs->msr |= MSR_LE; 1696 else 1697 return -EINVAL; 1698 1699 return 0; 1700 } 1701 1702 int get_endian(struct task_struct *tsk, unsigned long adr) 1703 { 1704 struct pt_regs *regs = tsk->thread.regs; 1705 unsigned int val; 1706 1707 if (!cpu_has_feature(CPU_FTR_PPC_LE) && 1708 !cpu_has_feature(CPU_FTR_REAL_LE)) 1709 return -EINVAL; 1710 1711 if (regs == NULL) 1712 return -EINVAL; 1713 1714 if (regs->msr & MSR_LE) { 1715 if (cpu_has_feature(CPU_FTR_REAL_LE)) 1716 val = PR_ENDIAN_LITTLE; 1717 else 1718 val = PR_ENDIAN_PPC_LITTLE; 1719 } else 1720 val = PR_ENDIAN_BIG; 1721 1722 return put_user(val, (unsigned int __user *)adr); 1723 } 1724 1725 int set_unalign_ctl(struct task_struct *tsk, unsigned int val) 1726 { 1727 tsk->thread.align_ctl = val; 1728 return 0; 1729 } 1730 1731 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) 1732 { 1733 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); 1734 } 1735 1736 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, 1737 unsigned long nbytes) 1738 { 1739 unsigned long stack_page; 1740 unsigned long cpu = task_cpu(p); 1741 1742 /* 1743 * Avoid crashing if the stack has overflowed and corrupted 1744 * task_cpu(p), which is in the thread_info struct. 1745 */ 1746 if (cpu < NR_CPUS && cpu_possible(cpu)) { 1747 stack_page = (unsigned long) hardirq_ctx[cpu]; 1748 if (sp >= stack_page + sizeof(struct thread_struct) 1749 && sp <= stack_page + THREAD_SIZE - nbytes) 1750 return 1; 1751 1752 stack_page = (unsigned long) softirq_ctx[cpu]; 1753 if (sp >= stack_page + sizeof(struct thread_struct) 1754 && sp <= stack_page + THREAD_SIZE - nbytes) 1755 return 1; 1756 } 1757 return 0; 1758 } 1759 1760 int validate_sp(unsigned long sp, struct task_struct *p, 1761 unsigned long nbytes) 1762 { 1763 unsigned long stack_page = (unsigned long)task_stack_page(p); 1764 1765 if (sp >= stack_page + sizeof(struct thread_struct) 1766 && sp <= stack_page + THREAD_SIZE - nbytes) 1767 return 1; 1768 1769 return valid_irq_stack(sp, p, nbytes); 1770 } 1771 1772 EXPORT_SYMBOL(validate_sp); 1773 1774 unsigned long get_wchan(struct task_struct *p) 1775 { 1776 unsigned long ip, sp; 1777 int count = 0; 1778 1779 if (!p || p == current || p->state == TASK_RUNNING) 1780 return 0; 1781 1782 sp = p->thread.ksp; 1783 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) 1784 return 0; 1785 1786 do { 1787 sp = *(unsigned long *)sp; 1788 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) 1789 return 0; 1790 if (count > 0) { 1791 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; 1792 if (!in_sched_functions(ip)) 1793 return ip; 1794 } 1795 } while (count++ < 16); 1796 return 0; 1797 } 1798 1799 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; 1800 1801 void show_stack(struct task_struct *tsk, unsigned long *stack) 1802 { 1803 unsigned long sp, ip, lr, newsp; 1804 int count = 0; 1805 int firstframe = 1; 1806 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1807 int curr_frame = current->curr_ret_stack; 1808 extern void return_to_handler(void); 1809 unsigned long rth = (unsigned long)return_to_handler; 1810 #endif 1811 1812 sp = (unsigned long) stack; 1813 if (tsk == NULL) 1814 tsk = current; 1815 if (sp == 0) { 1816 if (tsk == current) 1817 sp = current_stack_pointer(); 1818 else 1819 sp = tsk->thread.ksp; 1820 } 1821 1822 lr = 0; 1823 printk("Call Trace:\n"); 1824 do { 1825 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) 1826 return; 1827 1828 stack = (unsigned long *) sp; 1829 newsp = stack[0]; 1830 ip = stack[STACK_FRAME_LR_SAVE]; 1831 if (!firstframe || ip != lr) { 1832 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); 1833 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1834 if ((ip == rth) && curr_frame >= 0) { 1835 printk(" (%pS)", 1836 (void *)current->ret_stack[curr_frame].ret); 1837 curr_frame--; 1838 } 1839 #endif 1840 if (firstframe) 1841 printk(" (unreliable)"); 1842 printk("\n"); 1843 } 1844 firstframe = 0; 1845 1846 /* 1847 * See if this is an exception frame. 1848 * We look for the "regshere" marker in the current frame. 1849 */ 1850 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) 1851 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { 1852 struct pt_regs *regs = (struct pt_regs *) 1853 (sp + STACK_FRAME_OVERHEAD); 1854 lr = regs->link; 1855 printk("--- interrupt: %lx at %pS\n LR = %pS\n", 1856 regs->trap, (void *)regs->nip, (void *)lr); 1857 firstframe = 1; 1858 } 1859 1860 sp = newsp; 1861 } while (count++ < kstack_depth_to_print); 1862 } 1863 1864 #ifdef CONFIG_PPC64 1865 /* Called with hard IRQs off */ 1866 void notrace __ppc64_runlatch_on(void) 1867 { 1868 struct thread_info *ti = current_thread_info(); 1869 unsigned long ctrl; 1870 1871 ctrl = mfspr(SPRN_CTRLF); 1872 ctrl |= CTRL_RUNLATCH; 1873 mtspr(SPRN_CTRLT, ctrl); 1874 1875 ti->local_flags |= _TLF_RUNLATCH; 1876 } 1877 1878 /* Called with hard IRQs off */ 1879 void notrace __ppc64_runlatch_off(void) 1880 { 1881 struct thread_info *ti = current_thread_info(); 1882 unsigned long ctrl; 1883 1884 ti->local_flags &= ~_TLF_RUNLATCH; 1885 1886 ctrl = mfspr(SPRN_CTRLF); 1887 ctrl &= ~CTRL_RUNLATCH; 1888 mtspr(SPRN_CTRLT, ctrl); 1889 } 1890 #endif /* CONFIG_PPC64 */ 1891 1892 unsigned long arch_align_stack(unsigned long sp) 1893 { 1894 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 1895 sp -= get_random_int() & ~PAGE_MASK; 1896 return sp & ~0xf; 1897 } 1898 1899 static inline unsigned long brk_rnd(void) 1900 { 1901 unsigned long rnd = 0; 1902 1903 /* 8MB for 32bit, 1GB for 64bit */ 1904 if (is_32bit_task()) 1905 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT))); 1906 else 1907 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT))); 1908 1909 return rnd << PAGE_SHIFT; 1910 } 1911 1912 unsigned long arch_randomize_brk(struct mm_struct *mm) 1913 { 1914 unsigned long base = mm->brk; 1915 unsigned long ret; 1916 1917 #ifdef CONFIG_PPC_STD_MMU_64 1918 /* 1919 * If we are using 1TB segments and we are allowed to randomise 1920 * the heap, we can put it above 1TB so it is backed by a 1TB 1921 * segment. Otherwise the heap will be in the bottom 1TB 1922 * which always uses 256MB segments and this may result in a 1923 * performance penalty. 1924 */ 1925 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) 1926 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); 1927 #endif 1928 1929 ret = PAGE_ALIGN(base + brk_rnd()); 1930 1931 if (ret < mm->brk) 1932 return mm->brk; 1933 1934 return ret; 1935 } 1936 1937