xref: /openbmc/linux/arch/powerpc/kernel/process.c (revision f35e839a)
1 /*
2  *  Derived from "arch/i386/kernel/process.c"
3  *    Copyright (C) 1995  Linus Torvalds
4  *
5  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6  *  Paul Mackerras (paulus@cs.anu.edu.au)
7  *
8  *  PowerPC version
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation; either version
14  *  2 of the License, or (at your option) any later version.
15  */
16 
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/init.h>
29 #include <linux/prctl.h>
30 #include <linux/init_task.h>
31 #include <linux/export.h>
32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h>
35 #include <linux/utsname.h>
36 #include <linux/ftrace.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/personality.h>
39 #include <linux/random.h>
40 #include <linux/hw_breakpoint.h>
41 
42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
44 #include <asm/io.h>
45 #include <asm/processor.h>
46 #include <asm/mmu.h>
47 #include <asm/prom.h>
48 #include <asm/machdep.h>
49 #include <asm/time.h>
50 #include <asm/runlatch.h>
51 #include <asm/syscalls.h>
52 #include <asm/switch_to.h>
53 #include <asm/tm.h>
54 #include <asm/debug.h>
55 #ifdef CONFIG_PPC64
56 #include <asm/firmware.h>
57 #endif
58 #include <linux/kprobes.h>
59 #include <linux/kdebug.h>
60 
61 /* Transactional Memory debug */
62 #ifdef TM_DEBUG_SW
63 #define TM_DEBUG(x...) printk(KERN_INFO x)
64 #else
65 #define TM_DEBUG(x...) do { } while(0)
66 #endif
67 
68 extern unsigned long _get_SP(void);
69 
70 #ifndef CONFIG_SMP
71 struct task_struct *last_task_used_math = NULL;
72 struct task_struct *last_task_used_altivec = NULL;
73 struct task_struct *last_task_used_vsx = NULL;
74 struct task_struct *last_task_used_spe = NULL;
75 #endif
76 
77 /*
78  * Make sure the floating-point register state in the
79  * the thread_struct is up to date for task tsk.
80  */
81 void flush_fp_to_thread(struct task_struct *tsk)
82 {
83 	if (tsk->thread.regs) {
84 		/*
85 		 * We need to disable preemption here because if we didn't,
86 		 * another process could get scheduled after the regs->msr
87 		 * test but before we have finished saving the FP registers
88 		 * to the thread_struct.  That process could take over the
89 		 * FPU, and then when we get scheduled again we would store
90 		 * bogus values for the remaining FP registers.
91 		 */
92 		preempt_disable();
93 		if (tsk->thread.regs->msr & MSR_FP) {
94 #ifdef CONFIG_SMP
95 			/*
96 			 * This should only ever be called for current or
97 			 * for a stopped child process.  Since we save away
98 			 * the FP register state on context switch on SMP,
99 			 * there is something wrong if a stopped child appears
100 			 * to still have its FP state in the CPU registers.
101 			 */
102 			BUG_ON(tsk != current);
103 #endif
104 			giveup_fpu(tsk);
105 		}
106 		preempt_enable();
107 	}
108 }
109 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
110 
111 void enable_kernel_fp(void)
112 {
113 	WARN_ON(preemptible());
114 
115 #ifdef CONFIG_SMP
116 	if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
117 		giveup_fpu(current);
118 	else
119 		giveup_fpu(NULL);	/* just enables FP for kernel */
120 #else
121 	giveup_fpu(last_task_used_math);
122 #endif /* CONFIG_SMP */
123 }
124 EXPORT_SYMBOL(enable_kernel_fp);
125 
126 #ifdef CONFIG_ALTIVEC
127 void enable_kernel_altivec(void)
128 {
129 	WARN_ON(preemptible());
130 
131 #ifdef CONFIG_SMP
132 	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
133 		giveup_altivec(current);
134 	else
135 		giveup_altivec_notask();
136 #else
137 	giveup_altivec(last_task_used_altivec);
138 #endif /* CONFIG_SMP */
139 }
140 EXPORT_SYMBOL(enable_kernel_altivec);
141 
142 /*
143  * Make sure the VMX/Altivec register state in the
144  * the thread_struct is up to date for task tsk.
145  */
146 void flush_altivec_to_thread(struct task_struct *tsk)
147 {
148 	if (tsk->thread.regs) {
149 		preempt_disable();
150 		if (tsk->thread.regs->msr & MSR_VEC) {
151 #ifdef CONFIG_SMP
152 			BUG_ON(tsk != current);
153 #endif
154 			giveup_altivec(tsk);
155 		}
156 		preempt_enable();
157 	}
158 }
159 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
160 #endif /* CONFIG_ALTIVEC */
161 
162 #ifdef CONFIG_VSX
163 #if 0
164 /* not currently used, but some crazy RAID module might want to later */
165 void enable_kernel_vsx(void)
166 {
167 	WARN_ON(preemptible());
168 
169 #ifdef CONFIG_SMP
170 	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
171 		giveup_vsx(current);
172 	else
173 		giveup_vsx(NULL);	/* just enable vsx for kernel - force */
174 #else
175 	giveup_vsx(last_task_used_vsx);
176 #endif /* CONFIG_SMP */
177 }
178 EXPORT_SYMBOL(enable_kernel_vsx);
179 #endif
180 
181 void giveup_vsx(struct task_struct *tsk)
182 {
183 	giveup_fpu(tsk);
184 	giveup_altivec(tsk);
185 	__giveup_vsx(tsk);
186 }
187 
188 void flush_vsx_to_thread(struct task_struct *tsk)
189 {
190 	if (tsk->thread.regs) {
191 		preempt_disable();
192 		if (tsk->thread.regs->msr & MSR_VSX) {
193 #ifdef CONFIG_SMP
194 			BUG_ON(tsk != current);
195 #endif
196 			giveup_vsx(tsk);
197 		}
198 		preempt_enable();
199 	}
200 }
201 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
202 #endif /* CONFIG_VSX */
203 
204 #ifdef CONFIG_SPE
205 
206 void enable_kernel_spe(void)
207 {
208 	WARN_ON(preemptible());
209 
210 #ifdef CONFIG_SMP
211 	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
212 		giveup_spe(current);
213 	else
214 		giveup_spe(NULL);	/* just enable SPE for kernel - force */
215 #else
216 	giveup_spe(last_task_used_spe);
217 #endif /* __SMP __ */
218 }
219 EXPORT_SYMBOL(enable_kernel_spe);
220 
221 void flush_spe_to_thread(struct task_struct *tsk)
222 {
223 	if (tsk->thread.regs) {
224 		preempt_disable();
225 		if (tsk->thread.regs->msr & MSR_SPE) {
226 #ifdef CONFIG_SMP
227 			BUG_ON(tsk != current);
228 #endif
229 			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
230 			giveup_spe(tsk);
231 		}
232 		preempt_enable();
233 	}
234 }
235 #endif /* CONFIG_SPE */
236 
237 #ifndef CONFIG_SMP
238 /*
239  * If we are doing lazy switching of CPU state (FP, altivec or SPE),
240  * and the current task has some state, discard it.
241  */
242 void discard_lazy_cpu_state(void)
243 {
244 	preempt_disable();
245 	if (last_task_used_math == current)
246 		last_task_used_math = NULL;
247 #ifdef CONFIG_ALTIVEC
248 	if (last_task_used_altivec == current)
249 		last_task_used_altivec = NULL;
250 #endif /* CONFIG_ALTIVEC */
251 #ifdef CONFIG_VSX
252 	if (last_task_used_vsx == current)
253 		last_task_used_vsx = NULL;
254 #endif /* CONFIG_VSX */
255 #ifdef CONFIG_SPE
256 	if (last_task_used_spe == current)
257 		last_task_used_spe = NULL;
258 #endif
259 	preempt_enable();
260 }
261 #endif /* CONFIG_SMP */
262 
263 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
264 void do_send_trap(struct pt_regs *regs, unsigned long address,
265 		  unsigned long error_code, int signal_code, int breakpt)
266 {
267 	siginfo_t info;
268 
269 	current->thread.trap_nr = signal_code;
270 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
271 			11, SIGSEGV) == NOTIFY_STOP)
272 		return;
273 
274 	/* Deliver the signal to userspace */
275 	info.si_signo = SIGTRAP;
276 	info.si_errno = breakpt;	/* breakpoint or watchpoint id */
277 	info.si_code = signal_code;
278 	info.si_addr = (void __user *)address;
279 	force_sig_info(SIGTRAP, &info, current);
280 }
281 #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
282 void do_break (struct pt_regs *regs, unsigned long address,
283 		    unsigned long error_code)
284 {
285 	siginfo_t info;
286 
287 	current->thread.trap_nr = TRAP_HWBKPT;
288 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
289 			11, SIGSEGV) == NOTIFY_STOP)
290 		return;
291 
292 	if (debugger_break_match(regs))
293 		return;
294 
295 	/* Clear the breakpoint */
296 	hw_breakpoint_disable();
297 
298 	/* Deliver the signal to userspace */
299 	info.si_signo = SIGTRAP;
300 	info.si_errno = 0;
301 	info.si_code = TRAP_HWBKPT;
302 	info.si_addr = (void __user *)address;
303 	force_sig_info(SIGTRAP, &info, current);
304 }
305 #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
306 
307 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
308 
309 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
310 /*
311  * Set the debug registers back to their default "safe" values.
312  */
313 static void set_debug_reg_defaults(struct thread_struct *thread)
314 {
315 	thread->iac1 = thread->iac2 = 0;
316 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
317 	thread->iac3 = thread->iac4 = 0;
318 #endif
319 	thread->dac1 = thread->dac2 = 0;
320 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
321 	thread->dvc1 = thread->dvc2 = 0;
322 #endif
323 	thread->dbcr0 = 0;
324 #ifdef CONFIG_BOOKE
325 	/*
326 	 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
327 	 */
328 	thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |	\
329 			DBCR1_IAC3US | DBCR1_IAC4US;
330 	/*
331 	 * Force Data Address Compare User/Supervisor bits to be User-only
332 	 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
333 	 */
334 	thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
335 #else
336 	thread->dbcr1 = 0;
337 #endif
338 }
339 
340 static void prime_debug_regs(struct thread_struct *thread)
341 {
342 	mtspr(SPRN_IAC1, thread->iac1);
343 	mtspr(SPRN_IAC2, thread->iac2);
344 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
345 	mtspr(SPRN_IAC3, thread->iac3);
346 	mtspr(SPRN_IAC4, thread->iac4);
347 #endif
348 	mtspr(SPRN_DAC1, thread->dac1);
349 	mtspr(SPRN_DAC2, thread->dac2);
350 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
351 	mtspr(SPRN_DVC1, thread->dvc1);
352 	mtspr(SPRN_DVC2, thread->dvc2);
353 #endif
354 	mtspr(SPRN_DBCR0, thread->dbcr0);
355 	mtspr(SPRN_DBCR1, thread->dbcr1);
356 #ifdef CONFIG_BOOKE
357 	mtspr(SPRN_DBCR2, thread->dbcr2);
358 #endif
359 }
360 /*
361  * Unless neither the old or new thread are making use of the
362  * debug registers, set the debug registers from the values
363  * stored in the new thread.
364  */
365 static void switch_booke_debug_regs(struct thread_struct *new_thread)
366 {
367 	if ((current->thread.dbcr0 & DBCR0_IDM)
368 		|| (new_thread->dbcr0 & DBCR0_IDM))
369 			prime_debug_regs(new_thread);
370 }
371 #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
372 #ifndef CONFIG_HAVE_HW_BREAKPOINT
373 static void set_debug_reg_defaults(struct thread_struct *thread)
374 {
375 	thread->hw_brk.address = 0;
376 	thread->hw_brk.type = 0;
377 	set_breakpoint(&thread->hw_brk);
378 }
379 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
380 #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
381 
382 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
383 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
384 {
385 	mtspr(SPRN_DAC1, dabr);
386 #ifdef CONFIG_PPC_47x
387 	isync();
388 #endif
389 	return 0;
390 }
391 #elif defined(CONFIG_PPC_BOOK3S)
392 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
393 {
394 	mtspr(SPRN_DABR, dabr);
395 	mtspr(SPRN_DABRX, dabrx);
396 	return 0;
397 }
398 #else
399 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
400 {
401 	return -EINVAL;
402 }
403 #endif
404 
405 static inline int set_dabr(struct arch_hw_breakpoint *brk)
406 {
407 	unsigned long dabr, dabrx;
408 
409 	dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
410 	dabrx = ((brk->type >> 3) & 0x7);
411 
412 	if (ppc_md.set_dabr)
413 		return ppc_md.set_dabr(dabr, dabrx);
414 
415 	return __set_dabr(dabr, dabrx);
416 }
417 
418 static inline int set_dawr(struct arch_hw_breakpoint *brk)
419 {
420 	unsigned long dawr, dawrx, mrd;
421 
422 	dawr = brk->address;
423 
424 	dawrx  = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
425 		                   << (63 - 58); //* read/write bits */
426 	dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
427 		                   << (63 - 59); //* translate */
428 	dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
429 		                   >> 3; //* PRIM bits */
430 	/* dawr length is stored in field MDR bits 48:53.  Matches range in
431 	   doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
432 	   0b111111=64DW.
433 	   brk->len is in bytes.
434 	   This aligns up to double word size, shifts and does the bias.
435 	*/
436 	mrd = ((brk->len + 7) >> 3) - 1;
437 	dawrx |= (mrd & 0x3f) << (63 - 53);
438 
439 	if (ppc_md.set_dawr)
440 		return ppc_md.set_dawr(dawr, dawrx);
441 	mtspr(SPRN_DAWR, dawr);
442 	mtspr(SPRN_DAWRX, dawrx);
443 	return 0;
444 }
445 
446 int set_breakpoint(struct arch_hw_breakpoint *brk)
447 {
448 	__get_cpu_var(current_brk) = *brk;
449 
450 	if (cpu_has_feature(CPU_FTR_DAWR))
451 		return set_dawr(brk);
452 
453 	return set_dabr(brk);
454 }
455 
456 #ifdef CONFIG_PPC64
457 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
458 #endif
459 
460 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
461 			      struct arch_hw_breakpoint *b)
462 {
463 	if (a->address != b->address)
464 		return false;
465 	if (a->type != b->type)
466 		return false;
467 	if (a->len != b->len)
468 		return false;
469 	return true;
470 }
471 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
472 static inline void tm_reclaim_task(struct task_struct *tsk)
473 {
474 	/* We have to work out if we're switching from/to a task that's in the
475 	 * middle of a transaction.
476 	 *
477 	 * In switching we need to maintain a 2nd register state as
478 	 * oldtask->thread.ckpt_regs.  We tm_reclaim(oldproc); this saves the
479 	 * checkpointed (tbegin) state in ckpt_regs and saves the transactional
480 	 * (current) FPRs into oldtask->thread.transact_fpr[].
481 	 *
482 	 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
483 	 */
484 	struct thread_struct *thr = &tsk->thread;
485 
486 	if (!thr->regs)
487 		return;
488 
489 	if (!MSR_TM_ACTIVE(thr->regs->msr))
490 		goto out_and_saveregs;
491 
492 	/* Stash the original thread MSR, as giveup_fpu et al will
493 	 * modify it.  We hold onto it to see whether the task used
494 	 * FP & vector regs.
495 	 */
496 	thr->tm_orig_msr = thr->regs->msr;
497 
498 	TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
499 		 "ccr=%lx, msr=%lx, trap=%lx)\n",
500 		 tsk->pid, thr->regs->nip,
501 		 thr->regs->ccr, thr->regs->msr,
502 		 thr->regs->trap);
503 
504 	tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED);
505 
506 	TM_DEBUG("--- tm_reclaim on pid %d complete\n",
507 		 tsk->pid);
508 
509 out_and_saveregs:
510 	/* Always save the regs here, even if a transaction's not active.
511 	 * This context-switches a thread's TM info SPRs.  We do it here to
512 	 * be consistent with the restore path (in recheckpoint) which
513 	 * cannot happen later in _switch().
514 	 */
515 	tm_save_sprs(thr);
516 }
517 
518 static inline void tm_recheckpoint_new_task(struct task_struct *new)
519 {
520 	unsigned long msr;
521 
522 	if (!cpu_has_feature(CPU_FTR_TM))
523 		return;
524 
525 	/* Recheckpoint the registers of the thread we're about to switch to.
526 	 *
527 	 * If the task was using FP, we non-lazily reload both the original and
528 	 * the speculative FP register states.  This is because the kernel
529 	 * doesn't see if/when a TM rollback occurs, so if we take an FP
530 	 * unavoidable later, we are unable to determine which set of FP regs
531 	 * need to be restored.
532 	 */
533 	if (!new->thread.regs)
534 		return;
535 
536 	/* The TM SPRs are restored here, so that TEXASR.FS can be set
537 	 * before the trecheckpoint and no explosion occurs.
538 	 */
539 	tm_restore_sprs(&new->thread);
540 
541 	if (!MSR_TM_ACTIVE(new->thread.regs->msr))
542 		return;
543 	msr = new->thread.tm_orig_msr;
544 	/* Recheckpoint to restore original checkpointed register state. */
545 	TM_DEBUG("*** tm_recheckpoint of pid %d "
546 		 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
547 		 new->pid, new->thread.regs->msr, msr);
548 
549 	/* This loads the checkpointed FP/VEC state, if used */
550 	tm_recheckpoint(&new->thread, msr);
551 
552 	/* This loads the speculative FP/VEC state, if used */
553 	if (msr & MSR_FP) {
554 		do_load_up_transact_fpu(&new->thread);
555 		new->thread.regs->msr |=
556 			(MSR_FP | new->thread.fpexc_mode);
557 	}
558 #ifdef CONFIG_ALTIVEC
559 	if (msr & MSR_VEC) {
560 		do_load_up_transact_altivec(&new->thread);
561 		new->thread.regs->msr |= MSR_VEC;
562 	}
563 #endif
564 	/* We may as well turn on VSX too since all the state is restored now */
565 	if (msr & MSR_VSX)
566 		new->thread.regs->msr |= MSR_VSX;
567 
568 	TM_DEBUG("*** tm_recheckpoint of pid %d complete "
569 		 "(kernel msr 0x%lx)\n",
570 		 new->pid, mfmsr());
571 }
572 
573 static inline void __switch_to_tm(struct task_struct *prev)
574 {
575 	if (cpu_has_feature(CPU_FTR_TM)) {
576 		tm_enable();
577 		tm_reclaim_task(prev);
578 	}
579 }
580 #else
581 #define tm_recheckpoint_new_task(new)
582 #define __switch_to_tm(prev)
583 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
584 
585 struct task_struct *__switch_to(struct task_struct *prev,
586 	struct task_struct *new)
587 {
588 	struct thread_struct *new_thread, *old_thread;
589 	unsigned long flags;
590 	struct task_struct *last;
591 #ifdef CONFIG_PPC_BOOK3S_64
592 	struct ppc64_tlb_batch *batch;
593 #endif
594 
595 	__switch_to_tm(prev);
596 
597 #ifdef CONFIG_SMP
598 	/* avoid complexity of lazy save/restore of fpu
599 	 * by just saving it every time we switch out if
600 	 * this task used the fpu during the last quantum.
601 	 *
602 	 * If it tries to use the fpu again, it'll trap and
603 	 * reload its fp regs.  So we don't have to do a restore
604 	 * every switch, just a save.
605 	 *  -- Cort
606 	 */
607 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
608 		giveup_fpu(prev);
609 #ifdef CONFIG_ALTIVEC
610 	/*
611 	 * If the previous thread used altivec in the last quantum
612 	 * (thus changing altivec regs) then save them.
613 	 * We used to check the VRSAVE register but not all apps
614 	 * set it, so we don't rely on it now (and in fact we need
615 	 * to save & restore VSCR even if VRSAVE == 0).  -- paulus
616 	 *
617 	 * On SMP we always save/restore altivec regs just to avoid the
618 	 * complexity of changing processors.
619 	 *  -- Cort
620 	 */
621 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
622 		giveup_altivec(prev);
623 #endif /* CONFIG_ALTIVEC */
624 #ifdef CONFIG_VSX
625 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
626 		/* VMX and FPU registers are already save here */
627 		__giveup_vsx(prev);
628 #endif /* CONFIG_VSX */
629 #ifdef CONFIG_SPE
630 	/*
631 	 * If the previous thread used spe in the last quantum
632 	 * (thus changing spe regs) then save them.
633 	 *
634 	 * On SMP we always save/restore spe regs just to avoid the
635 	 * complexity of changing processors.
636 	 */
637 	if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
638 		giveup_spe(prev);
639 #endif /* CONFIG_SPE */
640 
641 #else  /* CONFIG_SMP */
642 #ifdef CONFIG_ALTIVEC
643 	/* Avoid the trap.  On smp this this never happens since
644 	 * we don't set last_task_used_altivec -- Cort
645 	 */
646 	if (new->thread.regs && last_task_used_altivec == new)
647 		new->thread.regs->msr |= MSR_VEC;
648 #endif /* CONFIG_ALTIVEC */
649 #ifdef CONFIG_VSX
650 	if (new->thread.regs && last_task_used_vsx == new)
651 		new->thread.regs->msr |= MSR_VSX;
652 #endif /* CONFIG_VSX */
653 #ifdef CONFIG_SPE
654 	/* Avoid the trap.  On smp this this never happens since
655 	 * we don't set last_task_used_spe
656 	 */
657 	if (new->thread.regs && last_task_used_spe == new)
658 		new->thread.regs->msr |= MSR_SPE;
659 #endif /* CONFIG_SPE */
660 
661 #endif /* CONFIG_SMP */
662 
663 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
664 	switch_booke_debug_regs(&new->thread);
665 #else
666 /*
667  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
668  * schedule DABR
669  */
670 #ifndef CONFIG_HAVE_HW_BREAKPOINT
671 	if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
672 		set_breakpoint(&new->thread.hw_brk);
673 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
674 #endif
675 
676 
677 	new_thread = &new->thread;
678 	old_thread = &current->thread;
679 
680 #ifdef CONFIG_PPC64
681 	/*
682 	 * Collect processor utilization data per process
683 	 */
684 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
685 		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
686 		long unsigned start_tb, current_tb;
687 		start_tb = old_thread->start_tb;
688 		cu->current_tb = current_tb = mfspr(SPRN_PURR);
689 		old_thread->accum_tb += (current_tb - start_tb);
690 		new_thread->start_tb = current_tb;
691 	}
692 #endif /* CONFIG_PPC64 */
693 
694 #ifdef CONFIG_PPC_BOOK3S_64
695 	batch = &__get_cpu_var(ppc64_tlb_batch);
696 	if (batch->active) {
697 		current_thread_info()->local_flags |= _TLF_LAZY_MMU;
698 		if (batch->index)
699 			__flush_tlb_pending(batch);
700 		batch->active = 0;
701 	}
702 #endif /* CONFIG_PPC_BOOK3S_64 */
703 
704 	local_irq_save(flags);
705 
706 	/*
707 	 * We can't take a PMU exception inside _switch() since there is a
708 	 * window where the kernel stack SLB and the kernel stack are out
709 	 * of sync. Hard disable here.
710 	 */
711 	hard_irq_disable();
712 
713 	tm_recheckpoint_new_task(new);
714 
715 	last = _switch(old_thread, new_thread);
716 
717 #ifdef CONFIG_PPC_BOOK3S_64
718 	if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
719 		current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
720 		batch = &__get_cpu_var(ppc64_tlb_batch);
721 		batch->active = 1;
722 	}
723 #endif /* CONFIG_PPC_BOOK3S_64 */
724 
725 	local_irq_restore(flags);
726 
727 	return last;
728 }
729 
730 static int instructions_to_print = 16;
731 
732 static void show_instructions(struct pt_regs *regs)
733 {
734 	int i;
735 	unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
736 			sizeof(int));
737 
738 	printk("Instruction dump:");
739 
740 	for (i = 0; i < instructions_to_print; i++) {
741 		int instr;
742 
743 		if (!(i % 8))
744 			printk("\n");
745 
746 #if !defined(CONFIG_BOOKE)
747 		/* If executing with the IMMU off, adjust pc rather
748 		 * than print XXXXXXXX.
749 		 */
750 		if (!(regs->msr & MSR_IR))
751 			pc = (unsigned long)phys_to_virt(pc);
752 #endif
753 
754 		/* We use __get_user here *only* to avoid an OOPS on a
755 		 * bad address because the pc *should* only be a
756 		 * kernel address.
757 		 */
758 		if (!__kernel_text_address(pc) ||
759 		     __get_user(instr, (unsigned int __user *)pc)) {
760 			printk(KERN_CONT "XXXXXXXX ");
761 		} else {
762 			if (regs->nip == pc)
763 				printk(KERN_CONT "<%08x> ", instr);
764 			else
765 				printk(KERN_CONT "%08x ", instr);
766 		}
767 
768 		pc += sizeof(int);
769 	}
770 
771 	printk("\n");
772 }
773 
774 static struct regbit {
775 	unsigned long bit;
776 	const char *name;
777 } msr_bits[] = {
778 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
779 	{MSR_SF,	"SF"},
780 	{MSR_HV,	"HV"},
781 #endif
782 	{MSR_VEC,	"VEC"},
783 	{MSR_VSX,	"VSX"},
784 #ifdef CONFIG_BOOKE
785 	{MSR_CE,	"CE"},
786 #endif
787 	{MSR_EE,	"EE"},
788 	{MSR_PR,	"PR"},
789 	{MSR_FP,	"FP"},
790 	{MSR_ME,	"ME"},
791 #ifdef CONFIG_BOOKE
792 	{MSR_DE,	"DE"},
793 #else
794 	{MSR_SE,	"SE"},
795 	{MSR_BE,	"BE"},
796 #endif
797 	{MSR_IR,	"IR"},
798 	{MSR_DR,	"DR"},
799 	{MSR_PMM,	"PMM"},
800 #ifndef CONFIG_BOOKE
801 	{MSR_RI,	"RI"},
802 	{MSR_LE,	"LE"},
803 #endif
804 	{0,		NULL}
805 };
806 
807 static void printbits(unsigned long val, struct regbit *bits)
808 {
809 	const char *sep = "";
810 
811 	printk("<");
812 	for (; bits->bit; ++bits)
813 		if (val & bits->bit) {
814 			printk("%s%s", sep, bits->name);
815 			sep = ",";
816 		}
817 	printk(">");
818 }
819 
820 #ifdef CONFIG_PPC64
821 #define REG		"%016lx"
822 #define REGS_PER_LINE	4
823 #define LAST_VOLATILE	13
824 #else
825 #define REG		"%08lx"
826 #define REGS_PER_LINE	8
827 #define LAST_VOLATILE	12
828 #endif
829 
830 void show_regs(struct pt_regs * regs)
831 {
832 	int i, trap;
833 
834 	show_regs_print_info(KERN_DEFAULT);
835 
836 	printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
837 	       regs->nip, regs->link, regs->ctr);
838 	printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
839 	       regs, regs->trap, print_tainted(), init_utsname()->release);
840 	printk("MSR: "REG" ", regs->msr);
841 	printbits(regs->msr, msr_bits);
842 	printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
843 #ifdef CONFIG_PPC64
844 	printk("SOFTE: %ld\n", regs->softe);
845 #endif
846 	trap = TRAP(regs);
847 	if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
848 		printk("CFAR: "REG"\n", regs->orig_gpr3);
849 	if (trap == 0x300 || trap == 0x600)
850 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
851 		printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
852 #else
853 		printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
854 #endif
855 
856 	for (i = 0;  i < 32;  i++) {
857 		if ((i % REGS_PER_LINE) == 0)
858 			printk("\nGPR%02d: ", i);
859 		printk(REG " ", regs->gpr[i]);
860 		if (i == LAST_VOLATILE && !FULL_REGS(regs))
861 			break;
862 	}
863 	printk("\n");
864 #ifdef CONFIG_KALLSYMS
865 	/*
866 	 * Lookup NIP late so we have the best change of getting the
867 	 * above info out without failing
868 	 */
869 	printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
870 	printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
871 #endif
872 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
873 	printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
874 #endif
875 	show_stack(current, (unsigned long *) regs->gpr[1]);
876 	if (!user_mode(regs))
877 		show_instructions(regs);
878 }
879 
880 void exit_thread(void)
881 {
882 	discard_lazy_cpu_state();
883 }
884 
885 void flush_thread(void)
886 {
887 	discard_lazy_cpu_state();
888 
889 #ifdef CONFIG_HAVE_HW_BREAKPOINT
890 	flush_ptrace_hw_breakpoint(current);
891 #else /* CONFIG_HAVE_HW_BREAKPOINT */
892 	set_debug_reg_defaults(&current->thread);
893 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
894 }
895 
896 void
897 release_thread(struct task_struct *t)
898 {
899 }
900 
901 /*
902  * this gets called so that we can store coprocessor state into memory and
903  * copy the current task into the new thread.
904  */
905 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
906 {
907 	flush_fp_to_thread(src);
908 	flush_altivec_to_thread(src);
909 	flush_vsx_to_thread(src);
910 	flush_spe_to_thread(src);
911 	*dst = *src;
912 	return 0;
913 }
914 
915 /*
916  * Copy a thread..
917  */
918 extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
919 
920 int copy_thread(unsigned long clone_flags, unsigned long usp,
921 		unsigned long arg, struct task_struct *p)
922 {
923 	struct pt_regs *childregs, *kregs;
924 	extern void ret_from_fork(void);
925 	extern void ret_from_kernel_thread(void);
926 	void (*f)(void);
927 	unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
928 
929 	/* Copy registers */
930 	sp -= sizeof(struct pt_regs);
931 	childregs = (struct pt_regs *) sp;
932 	if (unlikely(p->flags & PF_KTHREAD)) {
933 		struct thread_info *ti = (void *)task_stack_page(p);
934 		memset(childregs, 0, sizeof(struct pt_regs));
935 		childregs->gpr[1] = sp + sizeof(struct pt_regs);
936 		childregs->gpr[14] = usp;	/* function */
937 #ifdef CONFIG_PPC64
938 		clear_tsk_thread_flag(p, TIF_32BIT);
939 		childregs->softe = 1;
940 #endif
941 		childregs->gpr[15] = arg;
942 		p->thread.regs = NULL;	/* no user register state */
943 		ti->flags |= _TIF_RESTOREALL;
944 		f = ret_from_kernel_thread;
945 	} else {
946 		struct pt_regs *regs = current_pt_regs();
947 		CHECK_FULL_REGS(regs);
948 		*childregs = *regs;
949 		if (usp)
950 			childregs->gpr[1] = usp;
951 		p->thread.regs = childregs;
952 		childregs->gpr[3] = 0;  /* Result from fork() */
953 		if (clone_flags & CLONE_SETTLS) {
954 #ifdef CONFIG_PPC64
955 			if (!is_32bit_task())
956 				childregs->gpr[13] = childregs->gpr[6];
957 			else
958 #endif
959 				childregs->gpr[2] = childregs->gpr[6];
960 		}
961 
962 		f = ret_from_fork;
963 	}
964 	sp -= STACK_FRAME_OVERHEAD;
965 
966 	/*
967 	 * The way this works is that at some point in the future
968 	 * some task will call _switch to switch to the new task.
969 	 * That will pop off the stack frame created below and start
970 	 * the new task running at ret_from_fork.  The new task will
971 	 * do some house keeping and then return from the fork or clone
972 	 * system call, using the stack frame created above.
973 	 */
974 	sp -= sizeof(struct pt_regs);
975 	kregs = (struct pt_regs *) sp;
976 	sp -= STACK_FRAME_OVERHEAD;
977 	p->thread.ksp = sp;
978 	p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
979 				_ALIGN_UP(sizeof(struct thread_info), 16);
980 
981 #ifdef CONFIG_HAVE_HW_BREAKPOINT
982 	p->thread.ptrace_bps[0] = NULL;
983 #endif
984 
985 #ifdef CONFIG_PPC_STD_MMU_64
986 	if (mmu_has_feature(MMU_FTR_SLB)) {
987 		unsigned long sp_vsid;
988 		unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
989 
990 		if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
991 			sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
992 				<< SLB_VSID_SHIFT_1T;
993 		else
994 			sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
995 				<< SLB_VSID_SHIFT;
996 		sp_vsid |= SLB_VSID_KERNEL | llp;
997 		p->thread.ksp_vsid = sp_vsid;
998 	}
999 #endif /* CONFIG_PPC_STD_MMU_64 */
1000 #ifdef CONFIG_PPC64
1001 	if (cpu_has_feature(CPU_FTR_DSCR)) {
1002 		p->thread.dscr_inherit = current->thread.dscr_inherit;
1003 		p->thread.dscr = current->thread.dscr;
1004 	}
1005 	if (cpu_has_feature(CPU_FTR_HAS_PPR))
1006 		p->thread.ppr = INIT_PPR;
1007 #endif
1008 	/*
1009 	 * The PPC64 ABI makes use of a TOC to contain function
1010 	 * pointers.  The function (ret_from_except) is actually a pointer
1011 	 * to the TOC entry.  The first entry is a pointer to the actual
1012 	 * function.
1013 	 */
1014 #ifdef CONFIG_PPC64
1015 	kregs->nip = *((unsigned long *)f);
1016 #else
1017 	kregs->nip = (unsigned long)f;
1018 #endif
1019 	return 0;
1020 }
1021 
1022 /*
1023  * Set up a thread for executing a new program
1024  */
1025 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1026 {
1027 #ifdef CONFIG_PPC64
1028 	unsigned long load_addr = regs->gpr[2];	/* saved by ELF_PLAT_INIT */
1029 #endif
1030 
1031 	/*
1032 	 * If we exec out of a kernel thread then thread.regs will not be
1033 	 * set.  Do it now.
1034 	 */
1035 	if (!current->thread.regs) {
1036 		struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1037 		current->thread.regs = regs - 1;
1038 	}
1039 
1040 	memset(regs->gpr, 0, sizeof(regs->gpr));
1041 	regs->ctr = 0;
1042 	regs->link = 0;
1043 	regs->xer = 0;
1044 	regs->ccr = 0;
1045 	regs->gpr[1] = sp;
1046 
1047 	/*
1048 	 * We have just cleared all the nonvolatile GPRs, so make
1049 	 * FULL_REGS(regs) return true.  This is necessary to allow
1050 	 * ptrace to examine the thread immediately after exec.
1051 	 */
1052 	regs->trap &= ~1UL;
1053 
1054 #ifdef CONFIG_PPC32
1055 	regs->mq = 0;
1056 	regs->nip = start;
1057 	regs->msr = MSR_USER;
1058 #else
1059 	if (!is_32bit_task()) {
1060 		unsigned long entry, toc;
1061 
1062 		/* start is a relocated pointer to the function descriptor for
1063 		 * the elf _start routine.  The first entry in the function
1064 		 * descriptor is the entry address of _start and the second
1065 		 * entry is the TOC value we need to use.
1066 		 */
1067 		__get_user(entry, (unsigned long __user *)start);
1068 		__get_user(toc, (unsigned long __user *)start+1);
1069 
1070 		/* Check whether the e_entry function descriptor entries
1071 		 * need to be relocated before we can use them.
1072 		 */
1073 		if (load_addr != 0) {
1074 			entry += load_addr;
1075 			toc   += load_addr;
1076 		}
1077 		regs->nip = entry;
1078 		regs->gpr[2] = toc;
1079 		regs->msr = MSR_USER64;
1080 	} else {
1081 		regs->nip = start;
1082 		regs->gpr[2] = 0;
1083 		regs->msr = MSR_USER32;
1084 	}
1085 #endif
1086 	discard_lazy_cpu_state();
1087 #ifdef CONFIG_VSX
1088 	current->thread.used_vsr = 0;
1089 #endif
1090 	memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
1091 	current->thread.fpscr.val = 0;
1092 #ifdef CONFIG_ALTIVEC
1093 	memset(current->thread.vr, 0, sizeof(current->thread.vr));
1094 	memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
1095 	current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
1096 	current->thread.vrsave = 0;
1097 	current->thread.used_vr = 0;
1098 #endif /* CONFIG_ALTIVEC */
1099 #ifdef CONFIG_SPE
1100 	memset(current->thread.evr, 0, sizeof(current->thread.evr));
1101 	current->thread.acc = 0;
1102 	current->thread.spefscr = 0;
1103 	current->thread.used_spe = 0;
1104 #endif /* CONFIG_SPE */
1105 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1106 	if (cpu_has_feature(CPU_FTR_TM))
1107 		regs->msr |= MSR_TM;
1108 	current->thread.tm_tfhar = 0;
1109 	current->thread.tm_texasr = 0;
1110 	current->thread.tm_tfiar = 0;
1111 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1112 }
1113 
1114 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1115 		| PR_FP_EXC_RES | PR_FP_EXC_INV)
1116 
1117 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1118 {
1119 	struct pt_regs *regs = tsk->thread.regs;
1120 
1121 	/* This is a bit hairy.  If we are an SPE enabled  processor
1122 	 * (have embedded fp) we store the IEEE exception enable flags in
1123 	 * fpexc_mode.  fpexc_mode is also used for setting FP exception
1124 	 * mode (asyn, precise, disabled) for 'Classic' FP. */
1125 	if (val & PR_FP_EXC_SW_ENABLE) {
1126 #ifdef CONFIG_SPE
1127 		if (cpu_has_feature(CPU_FTR_SPE)) {
1128 			tsk->thread.fpexc_mode = val &
1129 				(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1130 			return 0;
1131 		} else {
1132 			return -EINVAL;
1133 		}
1134 #else
1135 		return -EINVAL;
1136 #endif
1137 	}
1138 
1139 	/* on a CONFIG_SPE this does not hurt us.  The bits that
1140 	 * __pack_fe01 use do not overlap with bits used for
1141 	 * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
1142 	 * on CONFIG_SPE implementations are reserved so writing to
1143 	 * them does not change anything */
1144 	if (val > PR_FP_EXC_PRECISE)
1145 		return -EINVAL;
1146 	tsk->thread.fpexc_mode = __pack_fe01(val);
1147 	if (regs != NULL && (regs->msr & MSR_FP) != 0)
1148 		regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1149 			| tsk->thread.fpexc_mode;
1150 	return 0;
1151 }
1152 
1153 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1154 {
1155 	unsigned int val;
1156 
1157 	if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1158 #ifdef CONFIG_SPE
1159 		if (cpu_has_feature(CPU_FTR_SPE))
1160 			val = tsk->thread.fpexc_mode;
1161 		else
1162 			return -EINVAL;
1163 #else
1164 		return -EINVAL;
1165 #endif
1166 	else
1167 		val = __unpack_fe01(tsk->thread.fpexc_mode);
1168 	return put_user(val, (unsigned int __user *) adr);
1169 }
1170 
1171 int set_endian(struct task_struct *tsk, unsigned int val)
1172 {
1173 	struct pt_regs *regs = tsk->thread.regs;
1174 
1175 	if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1176 	    (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1177 		return -EINVAL;
1178 
1179 	if (regs == NULL)
1180 		return -EINVAL;
1181 
1182 	if (val == PR_ENDIAN_BIG)
1183 		regs->msr &= ~MSR_LE;
1184 	else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1185 		regs->msr |= MSR_LE;
1186 	else
1187 		return -EINVAL;
1188 
1189 	return 0;
1190 }
1191 
1192 int get_endian(struct task_struct *tsk, unsigned long adr)
1193 {
1194 	struct pt_regs *regs = tsk->thread.regs;
1195 	unsigned int val;
1196 
1197 	if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1198 	    !cpu_has_feature(CPU_FTR_REAL_LE))
1199 		return -EINVAL;
1200 
1201 	if (regs == NULL)
1202 		return -EINVAL;
1203 
1204 	if (regs->msr & MSR_LE) {
1205 		if (cpu_has_feature(CPU_FTR_REAL_LE))
1206 			val = PR_ENDIAN_LITTLE;
1207 		else
1208 			val = PR_ENDIAN_PPC_LITTLE;
1209 	} else
1210 		val = PR_ENDIAN_BIG;
1211 
1212 	return put_user(val, (unsigned int __user *)adr);
1213 }
1214 
1215 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1216 {
1217 	tsk->thread.align_ctl = val;
1218 	return 0;
1219 }
1220 
1221 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1222 {
1223 	return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1224 }
1225 
1226 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1227 				  unsigned long nbytes)
1228 {
1229 	unsigned long stack_page;
1230 	unsigned long cpu = task_cpu(p);
1231 
1232 	/*
1233 	 * Avoid crashing if the stack has overflowed and corrupted
1234 	 * task_cpu(p), which is in the thread_info struct.
1235 	 */
1236 	if (cpu < NR_CPUS && cpu_possible(cpu)) {
1237 		stack_page = (unsigned long) hardirq_ctx[cpu];
1238 		if (sp >= stack_page + sizeof(struct thread_struct)
1239 		    && sp <= stack_page + THREAD_SIZE - nbytes)
1240 			return 1;
1241 
1242 		stack_page = (unsigned long) softirq_ctx[cpu];
1243 		if (sp >= stack_page + sizeof(struct thread_struct)
1244 		    && sp <= stack_page + THREAD_SIZE - nbytes)
1245 			return 1;
1246 	}
1247 	return 0;
1248 }
1249 
1250 int validate_sp(unsigned long sp, struct task_struct *p,
1251 		       unsigned long nbytes)
1252 {
1253 	unsigned long stack_page = (unsigned long)task_stack_page(p);
1254 
1255 	if (sp >= stack_page + sizeof(struct thread_struct)
1256 	    && sp <= stack_page + THREAD_SIZE - nbytes)
1257 		return 1;
1258 
1259 	return valid_irq_stack(sp, p, nbytes);
1260 }
1261 
1262 EXPORT_SYMBOL(validate_sp);
1263 
1264 unsigned long get_wchan(struct task_struct *p)
1265 {
1266 	unsigned long ip, sp;
1267 	int count = 0;
1268 
1269 	if (!p || p == current || p->state == TASK_RUNNING)
1270 		return 0;
1271 
1272 	sp = p->thread.ksp;
1273 	if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1274 		return 0;
1275 
1276 	do {
1277 		sp = *(unsigned long *)sp;
1278 		if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1279 			return 0;
1280 		if (count > 0) {
1281 			ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1282 			if (!in_sched_functions(ip))
1283 				return ip;
1284 		}
1285 	} while (count++ < 16);
1286 	return 0;
1287 }
1288 
1289 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1290 
1291 void show_stack(struct task_struct *tsk, unsigned long *stack)
1292 {
1293 	unsigned long sp, ip, lr, newsp;
1294 	int count = 0;
1295 	int firstframe = 1;
1296 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1297 	int curr_frame = current->curr_ret_stack;
1298 	extern void return_to_handler(void);
1299 	unsigned long rth = (unsigned long)return_to_handler;
1300 	unsigned long mrth = -1;
1301 #ifdef CONFIG_PPC64
1302 	extern void mod_return_to_handler(void);
1303 	rth = *(unsigned long *)rth;
1304 	mrth = (unsigned long)mod_return_to_handler;
1305 	mrth = *(unsigned long *)mrth;
1306 #endif
1307 #endif
1308 
1309 	sp = (unsigned long) stack;
1310 	if (tsk == NULL)
1311 		tsk = current;
1312 	if (sp == 0) {
1313 		if (tsk == current)
1314 			asm("mr %0,1" : "=r" (sp));
1315 		else
1316 			sp = tsk->thread.ksp;
1317 	}
1318 
1319 	lr = 0;
1320 	printk("Call Trace:\n");
1321 	do {
1322 		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1323 			return;
1324 
1325 		stack = (unsigned long *) sp;
1326 		newsp = stack[0];
1327 		ip = stack[STACK_FRAME_LR_SAVE];
1328 		if (!firstframe || ip != lr) {
1329 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1330 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1331 			if ((ip == rth || ip == mrth) && curr_frame >= 0) {
1332 				printk(" (%pS)",
1333 				       (void *)current->ret_stack[curr_frame].ret);
1334 				curr_frame--;
1335 			}
1336 #endif
1337 			if (firstframe)
1338 				printk(" (unreliable)");
1339 			printk("\n");
1340 		}
1341 		firstframe = 0;
1342 
1343 		/*
1344 		 * See if this is an exception frame.
1345 		 * We look for the "regshere" marker in the current frame.
1346 		 */
1347 		if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1348 		    && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1349 			struct pt_regs *regs = (struct pt_regs *)
1350 				(sp + STACK_FRAME_OVERHEAD);
1351 			lr = regs->link;
1352 			printk("--- Exception: %lx at %pS\n    LR = %pS\n",
1353 			       regs->trap, (void *)regs->nip, (void *)lr);
1354 			firstframe = 1;
1355 		}
1356 
1357 		sp = newsp;
1358 	} while (count++ < kstack_depth_to_print);
1359 }
1360 
1361 #ifdef CONFIG_PPC64
1362 /* Called with hard IRQs off */
1363 void __ppc64_runlatch_on(void)
1364 {
1365 	struct thread_info *ti = current_thread_info();
1366 	unsigned long ctrl;
1367 
1368 	ctrl = mfspr(SPRN_CTRLF);
1369 	ctrl |= CTRL_RUNLATCH;
1370 	mtspr(SPRN_CTRLT, ctrl);
1371 
1372 	ti->local_flags |= _TLF_RUNLATCH;
1373 }
1374 
1375 /* Called with hard IRQs off */
1376 void __ppc64_runlatch_off(void)
1377 {
1378 	struct thread_info *ti = current_thread_info();
1379 	unsigned long ctrl;
1380 
1381 	ti->local_flags &= ~_TLF_RUNLATCH;
1382 
1383 	ctrl = mfspr(SPRN_CTRLF);
1384 	ctrl &= ~CTRL_RUNLATCH;
1385 	mtspr(SPRN_CTRLT, ctrl);
1386 }
1387 #endif /* CONFIG_PPC64 */
1388 
1389 unsigned long arch_align_stack(unsigned long sp)
1390 {
1391 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1392 		sp -= get_random_int() & ~PAGE_MASK;
1393 	return sp & ~0xf;
1394 }
1395 
1396 static inline unsigned long brk_rnd(void)
1397 {
1398         unsigned long rnd = 0;
1399 
1400 	/* 8MB for 32bit, 1GB for 64bit */
1401 	if (is_32bit_task())
1402 		rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1403 	else
1404 		rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1405 
1406 	return rnd << PAGE_SHIFT;
1407 }
1408 
1409 unsigned long arch_randomize_brk(struct mm_struct *mm)
1410 {
1411 	unsigned long base = mm->brk;
1412 	unsigned long ret;
1413 
1414 #ifdef CONFIG_PPC_STD_MMU_64
1415 	/*
1416 	 * If we are using 1TB segments and we are allowed to randomise
1417 	 * the heap, we can put it above 1TB so it is backed by a 1TB
1418 	 * segment. Otherwise the heap will be in the bottom 1TB
1419 	 * which always uses 256MB segments and this may result in a
1420 	 * performance penalty.
1421 	 */
1422 	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1423 		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1424 #endif
1425 
1426 	ret = PAGE_ALIGN(base + brk_rnd());
1427 
1428 	if (ret < mm->brk)
1429 		return mm->brk;
1430 
1431 	return ret;
1432 }
1433 
1434 unsigned long randomize_et_dyn(unsigned long base)
1435 {
1436 	unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1437 
1438 	if (ret < base)
1439 		return base;
1440 
1441 	return ret;
1442 }
1443