xref: /openbmc/linux/arch/powerpc/kernel/process.c (revision 37be287c)
1 /*
2  *  Derived from "arch/i386/kernel/process.c"
3  *    Copyright (C) 1995  Linus Torvalds
4  *
5  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6  *  Paul Mackerras (paulus@cs.anu.edu.au)
7  *
8  *  PowerPC version
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation; either version
14  *  2 of the License, or (at your option) any later version.
15  */
16 
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/prctl.h>
29 #include <linux/init_task.h>
30 #include <linux/export.h>
31 #include <linux/kallsyms.h>
32 #include <linux/mqueue.h>
33 #include <linux/hardirq.h>
34 #include <linux/utsname.h>
35 #include <linux/ftrace.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/personality.h>
38 #include <linux/random.h>
39 #include <linux/hw_breakpoint.h>
40 
41 #include <asm/pgtable.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
44 #include <asm/processor.h>
45 #include <asm/mmu.h>
46 #include <asm/prom.h>
47 #include <asm/machdep.h>
48 #include <asm/time.h>
49 #include <asm/runlatch.h>
50 #include <asm/syscalls.h>
51 #include <asm/switch_to.h>
52 #include <asm/tm.h>
53 #include <asm/debug.h>
54 #ifdef CONFIG_PPC64
55 #include <asm/firmware.h>
56 #endif
57 #include <linux/kprobes.h>
58 #include <linux/kdebug.h>
59 
60 /* Transactional Memory debug */
61 #ifdef TM_DEBUG_SW
62 #define TM_DEBUG(x...) printk(KERN_INFO x)
63 #else
64 #define TM_DEBUG(x...) do { } while(0)
65 #endif
66 
67 extern unsigned long _get_SP(void);
68 
69 #ifndef CONFIG_SMP
70 struct task_struct *last_task_used_math = NULL;
71 struct task_struct *last_task_used_altivec = NULL;
72 struct task_struct *last_task_used_vsx = NULL;
73 struct task_struct *last_task_used_spe = NULL;
74 #endif
75 
76 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
77 void giveup_fpu_maybe_transactional(struct task_struct *tsk)
78 {
79 	/*
80 	 * If we are saving the current thread's registers, and the
81 	 * thread is in a transactional state, set the TIF_RESTORE_TM
82 	 * bit so that we know to restore the registers before
83 	 * returning to userspace.
84 	 */
85 	if (tsk == current && tsk->thread.regs &&
86 	    MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
87 	    !test_thread_flag(TIF_RESTORE_TM)) {
88 		tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
89 		set_thread_flag(TIF_RESTORE_TM);
90 	}
91 
92 	giveup_fpu(tsk);
93 }
94 
95 void giveup_altivec_maybe_transactional(struct task_struct *tsk)
96 {
97 	/*
98 	 * If we are saving the current thread's registers, and the
99 	 * thread is in a transactional state, set the TIF_RESTORE_TM
100 	 * bit so that we know to restore the registers before
101 	 * returning to userspace.
102 	 */
103 	if (tsk == current && tsk->thread.regs &&
104 	    MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
105 	    !test_thread_flag(TIF_RESTORE_TM)) {
106 		tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
107 		set_thread_flag(TIF_RESTORE_TM);
108 	}
109 
110 	giveup_altivec(tsk);
111 }
112 
113 #else
114 #define giveup_fpu_maybe_transactional(tsk)	giveup_fpu(tsk)
115 #define giveup_altivec_maybe_transactional(tsk)	giveup_altivec(tsk)
116 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
117 
118 #ifdef CONFIG_PPC_FPU
119 /*
120  * Make sure the floating-point register state in the
121  * the thread_struct is up to date for task tsk.
122  */
123 void flush_fp_to_thread(struct task_struct *tsk)
124 {
125 	if (tsk->thread.regs) {
126 		/*
127 		 * We need to disable preemption here because if we didn't,
128 		 * another process could get scheduled after the regs->msr
129 		 * test but before we have finished saving the FP registers
130 		 * to the thread_struct.  That process could take over the
131 		 * FPU, and then when we get scheduled again we would store
132 		 * bogus values for the remaining FP registers.
133 		 */
134 		preempt_disable();
135 		if (tsk->thread.regs->msr & MSR_FP) {
136 #ifdef CONFIG_SMP
137 			/*
138 			 * This should only ever be called for current or
139 			 * for a stopped child process.  Since we save away
140 			 * the FP register state on context switch on SMP,
141 			 * there is something wrong if a stopped child appears
142 			 * to still have its FP state in the CPU registers.
143 			 */
144 			BUG_ON(tsk != current);
145 #endif
146 			giveup_fpu_maybe_transactional(tsk);
147 		}
148 		preempt_enable();
149 	}
150 }
151 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
152 #endif /* CONFIG_PPC_FPU */
153 
154 void enable_kernel_fp(void)
155 {
156 	WARN_ON(preemptible());
157 
158 #ifdef CONFIG_SMP
159 	if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
160 		giveup_fpu_maybe_transactional(current);
161 	else
162 		giveup_fpu(NULL);	/* just enables FP for kernel */
163 #else
164 	giveup_fpu_maybe_transactional(last_task_used_math);
165 #endif /* CONFIG_SMP */
166 }
167 EXPORT_SYMBOL(enable_kernel_fp);
168 
169 #ifdef CONFIG_ALTIVEC
170 void enable_kernel_altivec(void)
171 {
172 	WARN_ON(preemptible());
173 
174 #ifdef CONFIG_SMP
175 	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
176 		giveup_altivec_maybe_transactional(current);
177 	else
178 		giveup_altivec_notask();
179 #else
180 	giveup_altivec_maybe_transactional(last_task_used_altivec);
181 #endif /* CONFIG_SMP */
182 }
183 EXPORT_SYMBOL(enable_kernel_altivec);
184 
185 /*
186  * Make sure the VMX/Altivec register state in the
187  * the thread_struct is up to date for task tsk.
188  */
189 void flush_altivec_to_thread(struct task_struct *tsk)
190 {
191 	if (tsk->thread.regs) {
192 		preempt_disable();
193 		if (tsk->thread.regs->msr & MSR_VEC) {
194 #ifdef CONFIG_SMP
195 			BUG_ON(tsk != current);
196 #endif
197 			giveup_altivec_maybe_transactional(tsk);
198 		}
199 		preempt_enable();
200 	}
201 }
202 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
203 #endif /* CONFIG_ALTIVEC */
204 
205 #ifdef CONFIG_VSX
206 #if 0
207 /* not currently used, but some crazy RAID module might want to later */
208 void enable_kernel_vsx(void)
209 {
210 	WARN_ON(preemptible());
211 
212 #ifdef CONFIG_SMP
213 	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
214 		giveup_vsx(current);
215 	else
216 		giveup_vsx(NULL);	/* just enable vsx for kernel - force */
217 #else
218 	giveup_vsx(last_task_used_vsx);
219 #endif /* CONFIG_SMP */
220 }
221 EXPORT_SYMBOL(enable_kernel_vsx);
222 #endif
223 
224 void giveup_vsx(struct task_struct *tsk)
225 {
226 	giveup_fpu_maybe_transactional(tsk);
227 	giveup_altivec_maybe_transactional(tsk);
228 	__giveup_vsx(tsk);
229 }
230 
231 void flush_vsx_to_thread(struct task_struct *tsk)
232 {
233 	if (tsk->thread.regs) {
234 		preempt_disable();
235 		if (tsk->thread.regs->msr & MSR_VSX) {
236 #ifdef CONFIG_SMP
237 			BUG_ON(tsk != current);
238 #endif
239 			giveup_vsx(tsk);
240 		}
241 		preempt_enable();
242 	}
243 }
244 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
245 #endif /* CONFIG_VSX */
246 
247 #ifdef CONFIG_SPE
248 
249 void enable_kernel_spe(void)
250 {
251 	WARN_ON(preemptible());
252 
253 #ifdef CONFIG_SMP
254 	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
255 		giveup_spe(current);
256 	else
257 		giveup_spe(NULL);	/* just enable SPE for kernel - force */
258 #else
259 	giveup_spe(last_task_used_spe);
260 #endif /* __SMP __ */
261 }
262 EXPORT_SYMBOL(enable_kernel_spe);
263 
264 void flush_spe_to_thread(struct task_struct *tsk)
265 {
266 	if (tsk->thread.regs) {
267 		preempt_disable();
268 		if (tsk->thread.regs->msr & MSR_SPE) {
269 #ifdef CONFIG_SMP
270 			BUG_ON(tsk != current);
271 #endif
272 			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
273 			giveup_spe(tsk);
274 		}
275 		preempt_enable();
276 	}
277 }
278 #endif /* CONFIG_SPE */
279 
280 #ifndef CONFIG_SMP
281 /*
282  * If we are doing lazy switching of CPU state (FP, altivec or SPE),
283  * and the current task has some state, discard it.
284  */
285 void discard_lazy_cpu_state(void)
286 {
287 	preempt_disable();
288 	if (last_task_used_math == current)
289 		last_task_used_math = NULL;
290 #ifdef CONFIG_ALTIVEC
291 	if (last_task_used_altivec == current)
292 		last_task_used_altivec = NULL;
293 #endif /* CONFIG_ALTIVEC */
294 #ifdef CONFIG_VSX
295 	if (last_task_used_vsx == current)
296 		last_task_used_vsx = NULL;
297 #endif /* CONFIG_VSX */
298 #ifdef CONFIG_SPE
299 	if (last_task_used_spe == current)
300 		last_task_used_spe = NULL;
301 #endif
302 	preempt_enable();
303 }
304 #endif /* CONFIG_SMP */
305 
306 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
307 void do_send_trap(struct pt_regs *regs, unsigned long address,
308 		  unsigned long error_code, int signal_code, int breakpt)
309 {
310 	siginfo_t info;
311 
312 	current->thread.trap_nr = signal_code;
313 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
314 			11, SIGSEGV) == NOTIFY_STOP)
315 		return;
316 
317 	/* Deliver the signal to userspace */
318 	info.si_signo = SIGTRAP;
319 	info.si_errno = breakpt;	/* breakpoint or watchpoint id */
320 	info.si_code = signal_code;
321 	info.si_addr = (void __user *)address;
322 	force_sig_info(SIGTRAP, &info, current);
323 }
324 #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
325 void do_break (struct pt_regs *regs, unsigned long address,
326 		    unsigned long error_code)
327 {
328 	siginfo_t info;
329 
330 	current->thread.trap_nr = TRAP_HWBKPT;
331 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
332 			11, SIGSEGV) == NOTIFY_STOP)
333 		return;
334 
335 	if (debugger_break_match(regs))
336 		return;
337 
338 	/* Clear the breakpoint */
339 	hw_breakpoint_disable();
340 
341 	/* Deliver the signal to userspace */
342 	info.si_signo = SIGTRAP;
343 	info.si_errno = 0;
344 	info.si_code = TRAP_HWBKPT;
345 	info.si_addr = (void __user *)address;
346 	force_sig_info(SIGTRAP, &info, current);
347 }
348 #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
349 
350 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
351 
352 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
353 /*
354  * Set the debug registers back to their default "safe" values.
355  */
356 static void set_debug_reg_defaults(struct thread_struct *thread)
357 {
358 	thread->debug.iac1 = thread->debug.iac2 = 0;
359 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
360 	thread->debug.iac3 = thread->debug.iac4 = 0;
361 #endif
362 	thread->debug.dac1 = thread->debug.dac2 = 0;
363 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
364 	thread->debug.dvc1 = thread->debug.dvc2 = 0;
365 #endif
366 	thread->debug.dbcr0 = 0;
367 #ifdef CONFIG_BOOKE
368 	/*
369 	 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
370 	 */
371 	thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
372 			DBCR1_IAC3US | DBCR1_IAC4US;
373 	/*
374 	 * Force Data Address Compare User/Supervisor bits to be User-only
375 	 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
376 	 */
377 	thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
378 #else
379 	thread->debug.dbcr1 = 0;
380 #endif
381 }
382 
383 static void prime_debug_regs(struct debug_reg *debug)
384 {
385 	/*
386 	 * We could have inherited MSR_DE from userspace, since
387 	 * it doesn't get cleared on exception entry.  Make sure
388 	 * MSR_DE is clear before we enable any debug events.
389 	 */
390 	mtmsr(mfmsr() & ~MSR_DE);
391 
392 	mtspr(SPRN_IAC1, debug->iac1);
393 	mtspr(SPRN_IAC2, debug->iac2);
394 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
395 	mtspr(SPRN_IAC3, debug->iac3);
396 	mtspr(SPRN_IAC4, debug->iac4);
397 #endif
398 	mtspr(SPRN_DAC1, debug->dac1);
399 	mtspr(SPRN_DAC2, debug->dac2);
400 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
401 	mtspr(SPRN_DVC1, debug->dvc1);
402 	mtspr(SPRN_DVC2, debug->dvc2);
403 #endif
404 	mtspr(SPRN_DBCR0, debug->dbcr0);
405 	mtspr(SPRN_DBCR1, debug->dbcr1);
406 #ifdef CONFIG_BOOKE
407 	mtspr(SPRN_DBCR2, debug->dbcr2);
408 #endif
409 }
410 /*
411  * Unless neither the old or new thread are making use of the
412  * debug registers, set the debug registers from the values
413  * stored in the new thread.
414  */
415 void switch_booke_debug_regs(struct debug_reg *new_debug)
416 {
417 	if ((current->thread.debug.dbcr0 & DBCR0_IDM)
418 		|| (new_debug->dbcr0 & DBCR0_IDM))
419 			prime_debug_regs(new_debug);
420 }
421 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
422 #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
423 #ifndef CONFIG_HAVE_HW_BREAKPOINT
424 static void set_debug_reg_defaults(struct thread_struct *thread)
425 {
426 	thread->hw_brk.address = 0;
427 	thread->hw_brk.type = 0;
428 	set_breakpoint(&thread->hw_brk);
429 }
430 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
431 #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
432 
433 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
434 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
435 {
436 	mtspr(SPRN_DAC1, dabr);
437 #ifdef CONFIG_PPC_47x
438 	isync();
439 #endif
440 	return 0;
441 }
442 #elif defined(CONFIG_PPC_BOOK3S)
443 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
444 {
445 	mtspr(SPRN_DABR, dabr);
446 	if (cpu_has_feature(CPU_FTR_DABRX))
447 		mtspr(SPRN_DABRX, dabrx);
448 	return 0;
449 }
450 #else
451 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
452 {
453 	return -EINVAL;
454 }
455 #endif
456 
457 static inline int set_dabr(struct arch_hw_breakpoint *brk)
458 {
459 	unsigned long dabr, dabrx;
460 
461 	dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
462 	dabrx = ((brk->type >> 3) & 0x7);
463 
464 	if (ppc_md.set_dabr)
465 		return ppc_md.set_dabr(dabr, dabrx);
466 
467 	return __set_dabr(dabr, dabrx);
468 }
469 
470 static inline int set_dawr(struct arch_hw_breakpoint *brk)
471 {
472 	unsigned long dawr, dawrx, mrd;
473 
474 	dawr = brk->address;
475 
476 	dawrx  = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
477 		                   << (63 - 58); //* read/write bits */
478 	dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
479 		                   << (63 - 59); //* translate */
480 	dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
481 		                   >> 3; //* PRIM bits */
482 	/* dawr length is stored in field MDR bits 48:53.  Matches range in
483 	   doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
484 	   0b111111=64DW.
485 	   brk->len is in bytes.
486 	   This aligns up to double word size, shifts and does the bias.
487 	*/
488 	mrd = ((brk->len + 7) >> 3) - 1;
489 	dawrx |= (mrd & 0x3f) << (63 - 53);
490 
491 	if (ppc_md.set_dawr)
492 		return ppc_md.set_dawr(dawr, dawrx);
493 	mtspr(SPRN_DAWR, dawr);
494 	mtspr(SPRN_DAWRX, dawrx);
495 	return 0;
496 }
497 
498 int set_breakpoint(struct arch_hw_breakpoint *brk)
499 {
500 	__get_cpu_var(current_brk) = *brk;
501 
502 	if (cpu_has_feature(CPU_FTR_DAWR))
503 		return set_dawr(brk);
504 
505 	return set_dabr(brk);
506 }
507 
508 #ifdef CONFIG_PPC64
509 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
510 #endif
511 
512 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
513 			      struct arch_hw_breakpoint *b)
514 {
515 	if (a->address != b->address)
516 		return false;
517 	if (a->type != b->type)
518 		return false;
519 	if (a->len != b->len)
520 		return false;
521 	return true;
522 }
523 
524 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
525 static void tm_reclaim_thread(struct thread_struct *thr,
526 			      struct thread_info *ti, uint8_t cause)
527 {
528 	unsigned long msr_diff = 0;
529 
530 	/*
531 	 * If FP/VSX registers have been already saved to the
532 	 * thread_struct, move them to the transact_fp array.
533 	 * We clear the TIF_RESTORE_TM bit since after the reclaim
534 	 * the thread will no longer be transactional.
535 	 */
536 	if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
537 		msr_diff = thr->tm_orig_msr & ~thr->regs->msr;
538 		if (msr_diff & MSR_FP)
539 			memcpy(&thr->transact_fp, &thr->fp_state,
540 			       sizeof(struct thread_fp_state));
541 		if (msr_diff & MSR_VEC)
542 			memcpy(&thr->transact_vr, &thr->vr_state,
543 			       sizeof(struct thread_vr_state));
544 		clear_ti_thread_flag(ti, TIF_RESTORE_TM);
545 		msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
546 	}
547 
548 	tm_reclaim(thr, thr->regs->msr, cause);
549 
550 	/* Having done the reclaim, we now have the checkpointed
551 	 * FP/VSX values in the registers.  These might be valid
552 	 * even if we have previously called enable_kernel_fp() or
553 	 * flush_fp_to_thread(), so update thr->regs->msr to
554 	 * indicate their current validity.
555 	 */
556 	thr->regs->msr |= msr_diff;
557 }
558 
559 void tm_reclaim_current(uint8_t cause)
560 {
561 	tm_enable();
562 	tm_reclaim_thread(&current->thread, current_thread_info(), cause);
563 }
564 
565 static inline void tm_reclaim_task(struct task_struct *tsk)
566 {
567 	/* We have to work out if we're switching from/to a task that's in the
568 	 * middle of a transaction.
569 	 *
570 	 * In switching we need to maintain a 2nd register state as
571 	 * oldtask->thread.ckpt_regs.  We tm_reclaim(oldproc); this saves the
572 	 * checkpointed (tbegin) state in ckpt_regs and saves the transactional
573 	 * (current) FPRs into oldtask->thread.transact_fpr[].
574 	 *
575 	 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
576 	 */
577 	struct thread_struct *thr = &tsk->thread;
578 
579 	if (!thr->regs)
580 		return;
581 
582 	if (!MSR_TM_ACTIVE(thr->regs->msr))
583 		goto out_and_saveregs;
584 
585 	/* Stash the original thread MSR, as giveup_fpu et al will
586 	 * modify it.  We hold onto it to see whether the task used
587 	 * FP & vector regs.  If the TIF_RESTORE_TM flag is set,
588 	 * tm_orig_msr is already set.
589 	 */
590 	if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
591 		thr->tm_orig_msr = thr->regs->msr;
592 
593 	TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
594 		 "ccr=%lx, msr=%lx, trap=%lx)\n",
595 		 tsk->pid, thr->regs->nip,
596 		 thr->regs->ccr, thr->regs->msr,
597 		 thr->regs->trap);
598 
599 	tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
600 
601 	TM_DEBUG("--- tm_reclaim on pid %d complete\n",
602 		 tsk->pid);
603 
604 out_and_saveregs:
605 	/* Always save the regs here, even if a transaction's not active.
606 	 * This context-switches a thread's TM info SPRs.  We do it here to
607 	 * be consistent with the restore path (in recheckpoint) which
608 	 * cannot happen later in _switch().
609 	 */
610 	tm_save_sprs(thr);
611 }
612 
613 static inline void tm_recheckpoint_new_task(struct task_struct *new)
614 {
615 	unsigned long msr;
616 
617 	if (!cpu_has_feature(CPU_FTR_TM))
618 		return;
619 
620 	/* Recheckpoint the registers of the thread we're about to switch to.
621 	 *
622 	 * If the task was using FP, we non-lazily reload both the original and
623 	 * the speculative FP register states.  This is because the kernel
624 	 * doesn't see if/when a TM rollback occurs, so if we take an FP
625 	 * unavoidable later, we are unable to determine which set of FP regs
626 	 * need to be restored.
627 	 */
628 	if (!new->thread.regs)
629 		return;
630 
631 	/* The TM SPRs are restored here, so that TEXASR.FS can be set
632 	 * before the trecheckpoint and no explosion occurs.
633 	 */
634 	tm_restore_sprs(&new->thread);
635 
636 	if (!MSR_TM_ACTIVE(new->thread.regs->msr))
637 		return;
638 	msr = new->thread.tm_orig_msr;
639 	/* Recheckpoint to restore original checkpointed register state. */
640 	TM_DEBUG("*** tm_recheckpoint of pid %d "
641 		 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
642 		 new->pid, new->thread.regs->msr, msr);
643 
644 	/* This loads the checkpointed FP/VEC state, if used */
645 	tm_recheckpoint(&new->thread, msr);
646 
647 	/* This loads the speculative FP/VEC state, if used */
648 	if (msr & MSR_FP) {
649 		do_load_up_transact_fpu(&new->thread);
650 		new->thread.regs->msr |=
651 			(MSR_FP | new->thread.fpexc_mode);
652 	}
653 #ifdef CONFIG_ALTIVEC
654 	if (msr & MSR_VEC) {
655 		do_load_up_transact_altivec(&new->thread);
656 		new->thread.regs->msr |= MSR_VEC;
657 	}
658 #endif
659 	/* We may as well turn on VSX too since all the state is restored now */
660 	if (msr & MSR_VSX)
661 		new->thread.regs->msr |= MSR_VSX;
662 
663 	TM_DEBUG("*** tm_recheckpoint of pid %d complete "
664 		 "(kernel msr 0x%lx)\n",
665 		 new->pid, mfmsr());
666 }
667 
668 static inline void __switch_to_tm(struct task_struct *prev)
669 {
670 	if (cpu_has_feature(CPU_FTR_TM)) {
671 		tm_enable();
672 		tm_reclaim_task(prev);
673 	}
674 }
675 
676 /*
677  * This is called if we are on the way out to userspace and the
678  * TIF_RESTORE_TM flag is set.  It checks if we need to reload
679  * FP and/or vector state and does so if necessary.
680  * If userspace is inside a transaction (whether active or
681  * suspended) and FP/VMX/VSX instructions have ever been enabled
682  * inside that transaction, then we have to keep them enabled
683  * and keep the FP/VMX/VSX state loaded while ever the transaction
684  * continues.  The reason is that if we didn't, and subsequently
685  * got a FP/VMX/VSX unavailable interrupt inside a transaction,
686  * we don't know whether it's the same transaction, and thus we
687  * don't know which of the checkpointed state and the transactional
688  * state to use.
689  */
690 void restore_tm_state(struct pt_regs *regs)
691 {
692 	unsigned long msr_diff;
693 
694 	clear_thread_flag(TIF_RESTORE_TM);
695 	if (!MSR_TM_ACTIVE(regs->msr))
696 		return;
697 
698 	msr_diff = current->thread.tm_orig_msr & ~regs->msr;
699 	msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
700 	if (msr_diff & MSR_FP) {
701 		fp_enable();
702 		load_fp_state(&current->thread.fp_state);
703 		regs->msr |= current->thread.fpexc_mode;
704 	}
705 	if (msr_diff & MSR_VEC) {
706 		vec_enable();
707 		load_vr_state(&current->thread.vr_state);
708 	}
709 	regs->msr |= msr_diff;
710 }
711 
712 #else
713 #define tm_recheckpoint_new_task(new)
714 #define __switch_to_tm(prev)
715 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
716 
717 struct task_struct *__switch_to(struct task_struct *prev,
718 	struct task_struct *new)
719 {
720 	struct thread_struct *new_thread, *old_thread;
721 	struct task_struct *last;
722 #ifdef CONFIG_PPC_BOOK3S_64
723 	struct ppc64_tlb_batch *batch;
724 #endif
725 
726 	WARN_ON(!irqs_disabled());
727 
728 	/* Back up the TAR across context switches.
729 	 * Note that the TAR is not available for use in the kernel.  (To
730 	 * provide this, the TAR should be backed up/restored on exception
731 	 * entry/exit instead, and be in pt_regs.  FIXME, this should be in
732 	 * pt_regs anyway (for debug).)
733 	 * Save the TAR here before we do treclaim/trecheckpoint as these
734 	 * will change the TAR.
735 	 */
736 	save_tar(&prev->thread);
737 
738 	__switch_to_tm(prev);
739 
740 #ifdef CONFIG_SMP
741 	/* avoid complexity of lazy save/restore of fpu
742 	 * by just saving it every time we switch out if
743 	 * this task used the fpu during the last quantum.
744 	 *
745 	 * If it tries to use the fpu again, it'll trap and
746 	 * reload its fp regs.  So we don't have to do a restore
747 	 * every switch, just a save.
748 	 *  -- Cort
749 	 */
750 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
751 		giveup_fpu(prev);
752 #ifdef CONFIG_ALTIVEC
753 	/*
754 	 * If the previous thread used altivec in the last quantum
755 	 * (thus changing altivec regs) then save them.
756 	 * We used to check the VRSAVE register but not all apps
757 	 * set it, so we don't rely on it now (and in fact we need
758 	 * to save & restore VSCR even if VRSAVE == 0).  -- paulus
759 	 *
760 	 * On SMP we always save/restore altivec regs just to avoid the
761 	 * complexity of changing processors.
762 	 *  -- Cort
763 	 */
764 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
765 		giveup_altivec(prev);
766 #endif /* CONFIG_ALTIVEC */
767 #ifdef CONFIG_VSX
768 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
769 		/* VMX and FPU registers are already save here */
770 		__giveup_vsx(prev);
771 #endif /* CONFIG_VSX */
772 #ifdef CONFIG_SPE
773 	/*
774 	 * If the previous thread used spe in the last quantum
775 	 * (thus changing spe regs) then save them.
776 	 *
777 	 * On SMP we always save/restore spe regs just to avoid the
778 	 * complexity of changing processors.
779 	 */
780 	if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
781 		giveup_spe(prev);
782 #endif /* CONFIG_SPE */
783 
784 #else  /* CONFIG_SMP */
785 #ifdef CONFIG_ALTIVEC
786 	/* Avoid the trap.  On smp this this never happens since
787 	 * we don't set last_task_used_altivec -- Cort
788 	 */
789 	if (new->thread.regs && last_task_used_altivec == new)
790 		new->thread.regs->msr |= MSR_VEC;
791 #endif /* CONFIG_ALTIVEC */
792 #ifdef CONFIG_VSX
793 	if (new->thread.regs && last_task_used_vsx == new)
794 		new->thread.regs->msr |= MSR_VSX;
795 #endif /* CONFIG_VSX */
796 #ifdef CONFIG_SPE
797 	/* Avoid the trap.  On smp this this never happens since
798 	 * we don't set last_task_used_spe
799 	 */
800 	if (new->thread.regs && last_task_used_spe == new)
801 		new->thread.regs->msr |= MSR_SPE;
802 #endif /* CONFIG_SPE */
803 
804 #endif /* CONFIG_SMP */
805 
806 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
807 	switch_booke_debug_regs(&new->thread.debug);
808 #else
809 /*
810  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
811  * schedule DABR
812  */
813 #ifndef CONFIG_HAVE_HW_BREAKPOINT
814 	if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
815 		set_breakpoint(&new->thread.hw_brk);
816 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
817 #endif
818 
819 
820 	new_thread = &new->thread;
821 	old_thread = &current->thread;
822 
823 #ifdef CONFIG_PPC64
824 	/*
825 	 * Collect processor utilization data per process
826 	 */
827 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
828 		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
829 		long unsigned start_tb, current_tb;
830 		start_tb = old_thread->start_tb;
831 		cu->current_tb = current_tb = mfspr(SPRN_PURR);
832 		old_thread->accum_tb += (current_tb - start_tb);
833 		new_thread->start_tb = current_tb;
834 	}
835 #endif /* CONFIG_PPC64 */
836 
837 #ifdef CONFIG_PPC_BOOK3S_64
838 	batch = &__get_cpu_var(ppc64_tlb_batch);
839 	if (batch->active) {
840 		current_thread_info()->local_flags |= _TLF_LAZY_MMU;
841 		if (batch->index)
842 			__flush_tlb_pending(batch);
843 		batch->active = 0;
844 	}
845 #endif /* CONFIG_PPC_BOOK3S_64 */
846 
847 	/*
848 	 * We can't take a PMU exception inside _switch() since there is a
849 	 * window where the kernel stack SLB and the kernel stack are out
850 	 * of sync. Hard disable here.
851 	 */
852 	hard_irq_disable();
853 
854 	tm_recheckpoint_new_task(new);
855 
856 	last = _switch(old_thread, new_thread);
857 
858 #ifdef CONFIG_PPC_BOOK3S_64
859 	if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
860 		current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
861 		batch = &__get_cpu_var(ppc64_tlb_batch);
862 		batch->active = 1;
863 	}
864 #endif /* CONFIG_PPC_BOOK3S_64 */
865 
866 	return last;
867 }
868 
869 static int instructions_to_print = 16;
870 
871 static void show_instructions(struct pt_regs *regs)
872 {
873 	int i;
874 	unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
875 			sizeof(int));
876 
877 	printk("Instruction dump:");
878 
879 	for (i = 0; i < instructions_to_print; i++) {
880 		int instr;
881 
882 		if (!(i % 8))
883 			printk("\n");
884 
885 #if !defined(CONFIG_BOOKE)
886 		/* If executing with the IMMU off, adjust pc rather
887 		 * than print XXXXXXXX.
888 		 */
889 		if (!(regs->msr & MSR_IR))
890 			pc = (unsigned long)phys_to_virt(pc);
891 #endif
892 
893 		/* We use __get_user here *only* to avoid an OOPS on a
894 		 * bad address because the pc *should* only be a
895 		 * kernel address.
896 		 */
897 		if (!__kernel_text_address(pc) ||
898 		     __get_user(instr, (unsigned int __user *)pc)) {
899 			printk(KERN_CONT "XXXXXXXX ");
900 		} else {
901 			if (regs->nip == pc)
902 				printk(KERN_CONT "<%08x> ", instr);
903 			else
904 				printk(KERN_CONT "%08x ", instr);
905 		}
906 
907 		pc += sizeof(int);
908 	}
909 
910 	printk("\n");
911 }
912 
913 static struct regbit {
914 	unsigned long bit;
915 	const char *name;
916 } msr_bits[] = {
917 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
918 	{MSR_SF,	"SF"},
919 	{MSR_HV,	"HV"},
920 #endif
921 	{MSR_VEC,	"VEC"},
922 	{MSR_VSX,	"VSX"},
923 #ifdef CONFIG_BOOKE
924 	{MSR_CE,	"CE"},
925 #endif
926 	{MSR_EE,	"EE"},
927 	{MSR_PR,	"PR"},
928 	{MSR_FP,	"FP"},
929 	{MSR_ME,	"ME"},
930 #ifdef CONFIG_BOOKE
931 	{MSR_DE,	"DE"},
932 #else
933 	{MSR_SE,	"SE"},
934 	{MSR_BE,	"BE"},
935 #endif
936 	{MSR_IR,	"IR"},
937 	{MSR_DR,	"DR"},
938 	{MSR_PMM,	"PMM"},
939 #ifndef CONFIG_BOOKE
940 	{MSR_RI,	"RI"},
941 	{MSR_LE,	"LE"},
942 #endif
943 	{0,		NULL}
944 };
945 
946 static void printbits(unsigned long val, struct regbit *bits)
947 {
948 	const char *sep = "";
949 
950 	printk("<");
951 	for (; bits->bit; ++bits)
952 		if (val & bits->bit) {
953 			printk("%s%s", sep, bits->name);
954 			sep = ",";
955 		}
956 	printk(">");
957 }
958 
959 #ifdef CONFIG_PPC64
960 #define REG		"%016lx"
961 #define REGS_PER_LINE	4
962 #define LAST_VOLATILE	13
963 #else
964 #define REG		"%08lx"
965 #define REGS_PER_LINE	8
966 #define LAST_VOLATILE	12
967 #endif
968 
969 void show_regs(struct pt_regs * regs)
970 {
971 	int i, trap;
972 
973 	show_regs_print_info(KERN_DEFAULT);
974 
975 	printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
976 	       regs->nip, regs->link, regs->ctr);
977 	printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
978 	       regs, regs->trap, print_tainted(), init_utsname()->release);
979 	printk("MSR: "REG" ", regs->msr);
980 	printbits(regs->msr, msr_bits);
981 	printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
982 	trap = TRAP(regs);
983 	if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
984 		printk("CFAR: "REG" ", regs->orig_gpr3);
985 	if (trap == 0x200 || trap == 0x300 || trap == 0x600)
986 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
987 		printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
988 #else
989 		printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
990 #endif
991 #ifdef CONFIG_PPC64
992 	printk("SOFTE: %ld ", regs->softe);
993 #endif
994 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
995 	if (MSR_TM_ACTIVE(regs->msr))
996 		printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
997 #endif
998 
999 	for (i = 0;  i < 32;  i++) {
1000 		if ((i % REGS_PER_LINE) == 0)
1001 			printk("\nGPR%02d: ", i);
1002 		printk(REG " ", regs->gpr[i]);
1003 		if (i == LAST_VOLATILE && !FULL_REGS(regs))
1004 			break;
1005 	}
1006 	printk("\n");
1007 #ifdef CONFIG_KALLSYMS
1008 	/*
1009 	 * Lookup NIP late so we have the best change of getting the
1010 	 * above info out without failing
1011 	 */
1012 	printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1013 	printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1014 #endif
1015 	show_stack(current, (unsigned long *) regs->gpr[1]);
1016 	if (!user_mode(regs))
1017 		show_instructions(regs);
1018 }
1019 
1020 void exit_thread(void)
1021 {
1022 	discard_lazy_cpu_state();
1023 }
1024 
1025 void flush_thread(void)
1026 {
1027 	discard_lazy_cpu_state();
1028 
1029 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1030 	flush_ptrace_hw_breakpoint(current);
1031 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1032 	set_debug_reg_defaults(&current->thread);
1033 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1034 }
1035 
1036 void
1037 release_thread(struct task_struct *t)
1038 {
1039 }
1040 
1041 /*
1042  * this gets called so that we can store coprocessor state into memory and
1043  * copy the current task into the new thread.
1044  */
1045 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1046 {
1047 	flush_fp_to_thread(src);
1048 	flush_altivec_to_thread(src);
1049 	flush_vsx_to_thread(src);
1050 	flush_spe_to_thread(src);
1051 
1052 	*dst = *src;
1053 
1054 	clear_task_ebb(dst);
1055 
1056 	return 0;
1057 }
1058 
1059 /*
1060  * Copy a thread..
1061  */
1062 extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
1063 
1064 int copy_thread(unsigned long clone_flags, unsigned long usp,
1065 		unsigned long arg, struct task_struct *p)
1066 {
1067 	struct pt_regs *childregs, *kregs;
1068 	extern void ret_from_fork(void);
1069 	extern void ret_from_kernel_thread(void);
1070 	void (*f)(void);
1071 	unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1072 
1073 	/* Copy registers */
1074 	sp -= sizeof(struct pt_regs);
1075 	childregs = (struct pt_regs *) sp;
1076 	if (unlikely(p->flags & PF_KTHREAD)) {
1077 		struct thread_info *ti = (void *)task_stack_page(p);
1078 		memset(childregs, 0, sizeof(struct pt_regs));
1079 		childregs->gpr[1] = sp + sizeof(struct pt_regs);
1080 		childregs->gpr[14] = usp;	/* function */
1081 #ifdef CONFIG_PPC64
1082 		clear_tsk_thread_flag(p, TIF_32BIT);
1083 		childregs->softe = 1;
1084 #endif
1085 		childregs->gpr[15] = arg;
1086 		p->thread.regs = NULL;	/* no user register state */
1087 		ti->flags |= _TIF_RESTOREALL;
1088 		f = ret_from_kernel_thread;
1089 	} else {
1090 		struct pt_regs *regs = current_pt_regs();
1091 		CHECK_FULL_REGS(regs);
1092 		*childregs = *regs;
1093 		if (usp)
1094 			childregs->gpr[1] = usp;
1095 		p->thread.regs = childregs;
1096 		childregs->gpr[3] = 0;  /* Result from fork() */
1097 		if (clone_flags & CLONE_SETTLS) {
1098 #ifdef CONFIG_PPC64
1099 			if (!is_32bit_task())
1100 				childregs->gpr[13] = childregs->gpr[6];
1101 			else
1102 #endif
1103 				childregs->gpr[2] = childregs->gpr[6];
1104 		}
1105 
1106 		f = ret_from_fork;
1107 	}
1108 	sp -= STACK_FRAME_OVERHEAD;
1109 
1110 	/*
1111 	 * The way this works is that at some point in the future
1112 	 * some task will call _switch to switch to the new task.
1113 	 * That will pop off the stack frame created below and start
1114 	 * the new task running at ret_from_fork.  The new task will
1115 	 * do some house keeping and then return from the fork or clone
1116 	 * system call, using the stack frame created above.
1117 	 */
1118 	((unsigned long *)sp)[0] = 0;
1119 	sp -= sizeof(struct pt_regs);
1120 	kregs = (struct pt_regs *) sp;
1121 	sp -= STACK_FRAME_OVERHEAD;
1122 	p->thread.ksp = sp;
1123 #ifdef CONFIG_PPC32
1124 	p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1125 				_ALIGN_UP(sizeof(struct thread_info), 16);
1126 #endif
1127 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1128 	p->thread.ptrace_bps[0] = NULL;
1129 #endif
1130 
1131 	p->thread.fp_save_area = NULL;
1132 #ifdef CONFIG_ALTIVEC
1133 	p->thread.vr_save_area = NULL;
1134 #endif
1135 
1136 #ifdef CONFIG_PPC_STD_MMU_64
1137 	if (mmu_has_feature(MMU_FTR_SLB)) {
1138 		unsigned long sp_vsid;
1139 		unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1140 
1141 		if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1142 			sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1143 				<< SLB_VSID_SHIFT_1T;
1144 		else
1145 			sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1146 				<< SLB_VSID_SHIFT;
1147 		sp_vsid |= SLB_VSID_KERNEL | llp;
1148 		p->thread.ksp_vsid = sp_vsid;
1149 	}
1150 #endif /* CONFIG_PPC_STD_MMU_64 */
1151 #ifdef CONFIG_PPC64
1152 	if (cpu_has_feature(CPU_FTR_DSCR)) {
1153 		p->thread.dscr_inherit = current->thread.dscr_inherit;
1154 		p->thread.dscr = current->thread.dscr;
1155 	}
1156 	if (cpu_has_feature(CPU_FTR_HAS_PPR))
1157 		p->thread.ppr = INIT_PPR;
1158 #endif
1159 	/*
1160 	 * The PPC64 ABI makes use of a TOC to contain function
1161 	 * pointers.  The function (ret_from_except) is actually a pointer
1162 	 * to the TOC entry.  The first entry is a pointer to the actual
1163 	 * function.
1164 	 */
1165 #ifdef CONFIG_PPC64
1166 	kregs->nip = *((unsigned long *)f);
1167 #else
1168 	kregs->nip = (unsigned long)f;
1169 #endif
1170 	return 0;
1171 }
1172 
1173 /*
1174  * Set up a thread for executing a new program
1175  */
1176 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1177 {
1178 #ifdef CONFIG_PPC64
1179 	unsigned long load_addr = regs->gpr[2];	/* saved by ELF_PLAT_INIT */
1180 #endif
1181 
1182 	/*
1183 	 * If we exec out of a kernel thread then thread.regs will not be
1184 	 * set.  Do it now.
1185 	 */
1186 	if (!current->thread.regs) {
1187 		struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1188 		current->thread.regs = regs - 1;
1189 	}
1190 
1191 	memset(regs->gpr, 0, sizeof(regs->gpr));
1192 	regs->ctr = 0;
1193 	regs->link = 0;
1194 	regs->xer = 0;
1195 	regs->ccr = 0;
1196 	regs->gpr[1] = sp;
1197 
1198 	/*
1199 	 * We have just cleared all the nonvolatile GPRs, so make
1200 	 * FULL_REGS(regs) return true.  This is necessary to allow
1201 	 * ptrace to examine the thread immediately after exec.
1202 	 */
1203 	regs->trap &= ~1UL;
1204 
1205 #ifdef CONFIG_PPC32
1206 	regs->mq = 0;
1207 	regs->nip = start;
1208 	regs->msr = MSR_USER;
1209 #else
1210 	if (!is_32bit_task()) {
1211 		unsigned long entry;
1212 
1213 		if (is_elf2_task()) {
1214 			/* Look ma, no function descriptors! */
1215 			entry = start;
1216 
1217 			/*
1218 			 * Ulrich says:
1219 			 *   The latest iteration of the ABI requires that when
1220 			 *   calling a function (at its global entry point),
1221 			 *   the caller must ensure r12 holds the entry point
1222 			 *   address (so that the function can quickly
1223 			 *   establish addressability).
1224 			 */
1225 			regs->gpr[12] = start;
1226 			/* Make sure that's restored on entry to userspace. */
1227 			set_thread_flag(TIF_RESTOREALL);
1228 		} else {
1229 			unsigned long toc;
1230 
1231 			/* start is a relocated pointer to the function
1232 			 * descriptor for the elf _start routine.  The first
1233 			 * entry in the function descriptor is the entry
1234 			 * address of _start and the second entry is the TOC
1235 			 * value we need to use.
1236 			 */
1237 			__get_user(entry, (unsigned long __user *)start);
1238 			__get_user(toc, (unsigned long __user *)start+1);
1239 
1240 			/* Check whether the e_entry function descriptor entries
1241 			 * need to be relocated before we can use them.
1242 			 */
1243 			if (load_addr != 0) {
1244 				entry += load_addr;
1245 				toc   += load_addr;
1246 			}
1247 			regs->gpr[2] = toc;
1248 		}
1249 		regs->nip = entry;
1250 		regs->msr = MSR_USER64;
1251 	} else {
1252 		regs->nip = start;
1253 		regs->gpr[2] = 0;
1254 		regs->msr = MSR_USER32;
1255 	}
1256 #endif
1257 	discard_lazy_cpu_state();
1258 #ifdef CONFIG_VSX
1259 	current->thread.used_vsr = 0;
1260 #endif
1261 	memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
1262 	current->thread.fp_save_area = NULL;
1263 #ifdef CONFIG_ALTIVEC
1264 	memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
1265 	current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1266 	current->thread.vr_save_area = NULL;
1267 	current->thread.vrsave = 0;
1268 	current->thread.used_vr = 0;
1269 #endif /* CONFIG_ALTIVEC */
1270 #ifdef CONFIG_SPE
1271 	memset(current->thread.evr, 0, sizeof(current->thread.evr));
1272 	current->thread.acc = 0;
1273 	current->thread.spefscr = 0;
1274 	current->thread.used_spe = 0;
1275 #endif /* CONFIG_SPE */
1276 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1277 	if (cpu_has_feature(CPU_FTR_TM))
1278 		regs->msr |= MSR_TM;
1279 	current->thread.tm_tfhar = 0;
1280 	current->thread.tm_texasr = 0;
1281 	current->thread.tm_tfiar = 0;
1282 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1283 }
1284 
1285 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1286 		| PR_FP_EXC_RES | PR_FP_EXC_INV)
1287 
1288 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1289 {
1290 	struct pt_regs *regs = tsk->thread.regs;
1291 
1292 	/* This is a bit hairy.  If we are an SPE enabled  processor
1293 	 * (have embedded fp) we store the IEEE exception enable flags in
1294 	 * fpexc_mode.  fpexc_mode is also used for setting FP exception
1295 	 * mode (asyn, precise, disabled) for 'Classic' FP. */
1296 	if (val & PR_FP_EXC_SW_ENABLE) {
1297 #ifdef CONFIG_SPE
1298 		if (cpu_has_feature(CPU_FTR_SPE)) {
1299 			/*
1300 			 * When the sticky exception bits are set
1301 			 * directly by userspace, it must call prctl
1302 			 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1303 			 * in the existing prctl settings) or
1304 			 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1305 			 * the bits being set).  <fenv.h> functions
1306 			 * saving and restoring the whole
1307 			 * floating-point environment need to do so
1308 			 * anyway to restore the prctl settings from
1309 			 * the saved environment.
1310 			 */
1311 			tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1312 			tsk->thread.fpexc_mode = val &
1313 				(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1314 			return 0;
1315 		} else {
1316 			return -EINVAL;
1317 		}
1318 #else
1319 		return -EINVAL;
1320 #endif
1321 	}
1322 
1323 	/* on a CONFIG_SPE this does not hurt us.  The bits that
1324 	 * __pack_fe01 use do not overlap with bits used for
1325 	 * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
1326 	 * on CONFIG_SPE implementations are reserved so writing to
1327 	 * them does not change anything */
1328 	if (val > PR_FP_EXC_PRECISE)
1329 		return -EINVAL;
1330 	tsk->thread.fpexc_mode = __pack_fe01(val);
1331 	if (regs != NULL && (regs->msr & MSR_FP) != 0)
1332 		regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1333 			| tsk->thread.fpexc_mode;
1334 	return 0;
1335 }
1336 
1337 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1338 {
1339 	unsigned int val;
1340 
1341 	if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1342 #ifdef CONFIG_SPE
1343 		if (cpu_has_feature(CPU_FTR_SPE)) {
1344 			/*
1345 			 * When the sticky exception bits are set
1346 			 * directly by userspace, it must call prctl
1347 			 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1348 			 * in the existing prctl settings) or
1349 			 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1350 			 * the bits being set).  <fenv.h> functions
1351 			 * saving and restoring the whole
1352 			 * floating-point environment need to do so
1353 			 * anyway to restore the prctl settings from
1354 			 * the saved environment.
1355 			 */
1356 			tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1357 			val = tsk->thread.fpexc_mode;
1358 		} else
1359 			return -EINVAL;
1360 #else
1361 		return -EINVAL;
1362 #endif
1363 	else
1364 		val = __unpack_fe01(tsk->thread.fpexc_mode);
1365 	return put_user(val, (unsigned int __user *) adr);
1366 }
1367 
1368 int set_endian(struct task_struct *tsk, unsigned int val)
1369 {
1370 	struct pt_regs *regs = tsk->thread.regs;
1371 
1372 	if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1373 	    (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1374 		return -EINVAL;
1375 
1376 	if (regs == NULL)
1377 		return -EINVAL;
1378 
1379 	if (val == PR_ENDIAN_BIG)
1380 		regs->msr &= ~MSR_LE;
1381 	else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1382 		regs->msr |= MSR_LE;
1383 	else
1384 		return -EINVAL;
1385 
1386 	return 0;
1387 }
1388 
1389 int get_endian(struct task_struct *tsk, unsigned long adr)
1390 {
1391 	struct pt_regs *regs = tsk->thread.regs;
1392 	unsigned int val;
1393 
1394 	if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1395 	    !cpu_has_feature(CPU_FTR_REAL_LE))
1396 		return -EINVAL;
1397 
1398 	if (regs == NULL)
1399 		return -EINVAL;
1400 
1401 	if (regs->msr & MSR_LE) {
1402 		if (cpu_has_feature(CPU_FTR_REAL_LE))
1403 			val = PR_ENDIAN_LITTLE;
1404 		else
1405 			val = PR_ENDIAN_PPC_LITTLE;
1406 	} else
1407 		val = PR_ENDIAN_BIG;
1408 
1409 	return put_user(val, (unsigned int __user *)adr);
1410 }
1411 
1412 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1413 {
1414 	tsk->thread.align_ctl = val;
1415 	return 0;
1416 }
1417 
1418 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1419 {
1420 	return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1421 }
1422 
1423 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1424 				  unsigned long nbytes)
1425 {
1426 	unsigned long stack_page;
1427 	unsigned long cpu = task_cpu(p);
1428 
1429 	/*
1430 	 * Avoid crashing if the stack has overflowed and corrupted
1431 	 * task_cpu(p), which is in the thread_info struct.
1432 	 */
1433 	if (cpu < NR_CPUS && cpu_possible(cpu)) {
1434 		stack_page = (unsigned long) hardirq_ctx[cpu];
1435 		if (sp >= stack_page + sizeof(struct thread_struct)
1436 		    && sp <= stack_page + THREAD_SIZE - nbytes)
1437 			return 1;
1438 
1439 		stack_page = (unsigned long) softirq_ctx[cpu];
1440 		if (sp >= stack_page + sizeof(struct thread_struct)
1441 		    && sp <= stack_page + THREAD_SIZE - nbytes)
1442 			return 1;
1443 	}
1444 	return 0;
1445 }
1446 
1447 int validate_sp(unsigned long sp, struct task_struct *p,
1448 		       unsigned long nbytes)
1449 {
1450 	unsigned long stack_page = (unsigned long)task_stack_page(p);
1451 
1452 	if (sp >= stack_page + sizeof(struct thread_struct)
1453 	    && sp <= stack_page + THREAD_SIZE - nbytes)
1454 		return 1;
1455 
1456 	return valid_irq_stack(sp, p, nbytes);
1457 }
1458 
1459 EXPORT_SYMBOL(validate_sp);
1460 
1461 unsigned long get_wchan(struct task_struct *p)
1462 {
1463 	unsigned long ip, sp;
1464 	int count = 0;
1465 
1466 	if (!p || p == current || p->state == TASK_RUNNING)
1467 		return 0;
1468 
1469 	sp = p->thread.ksp;
1470 	if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1471 		return 0;
1472 
1473 	do {
1474 		sp = *(unsigned long *)sp;
1475 		if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1476 			return 0;
1477 		if (count > 0) {
1478 			ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1479 			if (!in_sched_functions(ip))
1480 				return ip;
1481 		}
1482 	} while (count++ < 16);
1483 	return 0;
1484 }
1485 
1486 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1487 
1488 void show_stack(struct task_struct *tsk, unsigned long *stack)
1489 {
1490 	unsigned long sp, ip, lr, newsp;
1491 	int count = 0;
1492 	int firstframe = 1;
1493 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1494 	int curr_frame = current->curr_ret_stack;
1495 	extern void return_to_handler(void);
1496 	unsigned long rth = (unsigned long)return_to_handler;
1497 	unsigned long mrth = -1;
1498 #ifdef CONFIG_PPC64
1499 	extern void mod_return_to_handler(void);
1500 	rth = *(unsigned long *)rth;
1501 	mrth = (unsigned long)mod_return_to_handler;
1502 	mrth = *(unsigned long *)mrth;
1503 #endif
1504 #endif
1505 
1506 	sp = (unsigned long) stack;
1507 	if (tsk == NULL)
1508 		tsk = current;
1509 	if (sp == 0) {
1510 		if (tsk == current)
1511 			asm("mr %0,1" : "=r" (sp));
1512 		else
1513 			sp = tsk->thread.ksp;
1514 	}
1515 
1516 	lr = 0;
1517 	printk("Call Trace:\n");
1518 	do {
1519 		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1520 			return;
1521 
1522 		stack = (unsigned long *) sp;
1523 		newsp = stack[0];
1524 		ip = stack[STACK_FRAME_LR_SAVE];
1525 		if (!firstframe || ip != lr) {
1526 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1527 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1528 			if ((ip == rth || ip == mrth) && curr_frame >= 0) {
1529 				printk(" (%pS)",
1530 				       (void *)current->ret_stack[curr_frame].ret);
1531 				curr_frame--;
1532 			}
1533 #endif
1534 			if (firstframe)
1535 				printk(" (unreliable)");
1536 			printk("\n");
1537 		}
1538 		firstframe = 0;
1539 
1540 		/*
1541 		 * See if this is an exception frame.
1542 		 * We look for the "regshere" marker in the current frame.
1543 		 */
1544 		if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1545 		    && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1546 			struct pt_regs *regs = (struct pt_regs *)
1547 				(sp + STACK_FRAME_OVERHEAD);
1548 			lr = regs->link;
1549 			printk("--- Exception: %lx at %pS\n    LR = %pS\n",
1550 			       regs->trap, (void *)regs->nip, (void *)lr);
1551 			firstframe = 1;
1552 		}
1553 
1554 		sp = newsp;
1555 	} while (count++ < kstack_depth_to_print);
1556 }
1557 
1558 #ifdef CONFIG_PPC64
1559 /* Called with hard IRQs off */
1560 void notrace __ppc64_runlatch_on(void)
1561 {
1562 	struct thread_info *ti = current_thread_info();
1563 	unsigned long ctrl;
1564 
1565 	ctrl = mfspr(SPRN_CTRLF);
1566 	ctrl |= CTRL_RUNLATCH;
1567 	mtspr(SPRN_CTRLT, ctrl);
1568 
1569 	ti->local_flags |= _TLF_RUNLATCH;
1570 }
1571 
1572 /* Called with hard IRQs off */
1573 void notrace __ppc64_runlatch_off(void)
1574 {
1575 	struct thread_info *ti = current_thread_info();
1576 	unsigned long ctrl;
1577 
1578 	ti->local_flags &= ~_TLF_RUNLATCH;
1579 
1580 	ctrl = mfspr(SPRN_CTRLF);
1581 	ctrl &= ~CTRL_RUNLATCH;
1582 	mtspr(SPRN_CTRLT, ctrl);
1583 }
1584 #endif /* CONFIG_PPC64 */
1585 
1586 unsigned long arch_align_stack(unsigned long sp)
1587 {
1588 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1589 		sp -= get_random_int() & ~PAGE_MASK;
1590 	return sp & ~0xf;
1591 }
1592 
1593 static inline unsigned long brk_rnd(void)
1594 {
1595         unsigned long rnd = 0;
1596 
1597 	/* 8MB for 32bit, 1GB for 64bit */
1598 	if (is_32bit_task())
1599 		rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1600 	else
1601 		rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1602 
1603 	return rnd << PAGE_SHIFT;
1604 }
1605 
1606 unsigned long arch_randomize_brk(struct mm_struct *mm)
1607 {
1608 	unsigned long base = mm->brk;
1609 	unsigned long ret;
1610 
1611 #ifdef CONFIG_PPC_STD_MMU_64
1612 	/*
1613 	 * If we are using 1TB segments and we are allowed to randomise
1614 	 * the heap, we can put it above 1TB so it is backed by a 1TB
1615 	 * segment. Otherwise the heap will be in the bottom 1TB
1616 	 * which always uses 256MB segments and this may result in a
1617 	 * performance penalty.
1618 	 */
1619 	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1620 		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1621 #endif
1622 
1623 	ret = PAGE_ALIGN(base + brk_rnd());
1624 
1625 	if (ret < mm->brk)
1626 		return mm->brk;
1627 
1628 	return ret;
1629 }
1630 
1631 unsigned long randomize_et_dyn(unsigned long base)
1632 {
1633 	unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1634 
1635 	if (ret < base)
1636 		return base;
1637 
1638 	return ret;
1639 }
1640