xref: /openbmc/linux/arch/powerpc/kernel/process.c (revision e5f586c763a079349398e2b0c7c271386193ac34)
1 /*
2  *  Derived from "arch/i386/kernel/process.c"
3  *    Copyright (C) 1995  Linus Torvalds
4  *
5  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6  *  Paul Mackerras (paulus@cs.anu.edu.au)
7  *
8  *  PowerPC version
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation; either version
14  *  2 of the License, or (at your option) any later version.
15  */
16 
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/stddef.h>
26 #include <linux/unistd.h>
27 #include <linux/ptrace.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/elf.h>
31 #include <linux/prctl.h>
32 #include <linux/init_task.h>
33 #include <linux/export.h>
34 #include <linux/kallsyms.h>
35 #include <linux/mqueue.h>
36 #include <linux/hardirq.h>
37 #include <linux/utsname.h>
38 #include <linux/ftrace.h>
39 #include <linux/kernel_stat.h>
40 #include <linux/personality.h>
41 #include <linux/random.h>
42 #include <linux/hw_breakpoint.h>
43 #include <linux/uaccess.h>
44 #include <linux/elf-randomize.h>
45 
46 #include <asm/pgtable.h>
47 #include <asm/io.h>
48 #include <asm/processor.h>
49 #include <asm/mmu.h>
50 #include <asm/prom.h>
51 #include <asm/machdep.h>
52 #include <asm/time.h>
53 #include <asm/runlatch.h>
54 #include <asm/syscalls.h>
55 #include <asm/switch_to.h>
56 #include <asm/tm.h>
57 #include <asm/debug.h>
58 #ifdef CONFIG_PPC64
59 #include <asm/firmware.h>
60 #endif
61 #include <asm/code-patching.h>
62 #include <asm/exec.h>
63 #include <asm/livepatch.h>
64 #include <asm/cpu_has_feature.h>
65 #include <asm/asm-prototypes.h>
66 
67 #include <linux/kprobes.h>
68 #include <linux/kdebug.h>
69 
70 /* Transactional Memory debug */
71 #ifdef TM_DEBUG_SW
72 #define TM_DEBUG(x...) printk(KERN_INFO x)
73 #else
74 #define TM_DEBUG(x...) do { } while(0)
75 #endif
76 
77 extern unsigned long _get_SP(void);
78 
79 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
80 static void check_if_tm_restore_required(struct task_struct *tsk)
81 {
82 	/*
83 	 * If we are saving the current thread's registers, and the
84 	 * thread is in a transactional state, set the TIF_RESTORE_TM
85 	 * bit so that we know to restore the registers before
86 	 * returning to userspace.
87 	 */
88 	if (tsk == current && tsk->thread.regs &&
89 	    MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
90 	    !test_thread_flag(TIF_RESTORE_TM)) {
91 		tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
92 		set_thread_flag(TIF_RESTORE_TM);
93 	}
94 }
95 
96 static inline bool msr_tm_active(unsigned long msr)
97 {
98 	return MSR_TM_ACTIVE(msr);
99 }
100 #else
101 static inline bool msr_tm_active(unsigned long msr) { return false; }
102 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
103 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
104 
105 bool strict_msr_control;
106 EXPORT_SYMBOL(strict_msr_control);
107 
108 static int __init enable_strict_msr_control(char *str)
109 {
110 	strict_msr_control = true;
111 	pr_info("Enabling strict facility control\n");
112 
113 	return 0;
114 }
115 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
116 
117 unsigned long msr_check_and_set(unsigned long bits)
118 {
119 	unsigned long oldmsr = mfmsr();
120 	unsigned long newmsr;
121 
122 	newmsr = oldmsr | bits;
123 
124 #ifdef CONFIG_VSX
125 	if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
126 		newmsr |= MSR_VSX;
127 #endif
128 
129 	if (oldmsr != newmsr)
130 		mtmsr_isync(newmsr);
131 
132 	return newmsr;
133 }
134 
135 void __msr_check_and_clear(unsigned long bits)
136 {
137 	unsigned long oldmsr = mfmsr();
138 	unsigned long newmsr;
139 
140 	newmsr = oldmsr & ~bits;
141 
142 #ifdef CONFIG_VSX
143 	if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
144 		newmsr &= ~MSR_VSX;
145 #endif
146 
147 	if (oldmsr != newmsr)
148 		mtmsr_isync(newmsr);
149 }
150 EXPORT_SYMBOL(__msr_check_and_clear);
151 
152 #ifdef CONFIG_PPC_FPU
153 void __giveup_fpu(struct task_struct *tsk)
154 {
155 	unsigned long msr;
156 
157 	save_fpu(tsk);
158 	msr = tsk->thread.regs->msr;
159 	msr &= ~MSR_FP;
160 #ifdef CONFIG_VSX
161 	if (cpu_has_feature(CPU_FTR_VSX))
162 		msr &= ~MSR_VSX;
163 #endif
164 	tsk->thread.regs->msr = msr;
165 }
166 
167 void giveup_fpu(struct task_struct *tsk)
168 {
169 	check_if_tm_restore_required(tsk);
170 
171 	msr_check_and_set(MSR_FP);
172 	__giveup_fpu(tsk);
173 	msr_check_and_clear(MSR_FP);
174 }
175 EXPORT_SYMBOL(giveup_fpu);
176 
177 /*
178  * Make sure the floating-point register state in the
179  * the thread_struct is up to date for task tsk.
180  */
181 void flush_fp_to_thread(struct task_struct *tsk)
182 {
183 	if (tsk->thread.regs) {
184 		/*
185 		 * We need to disable preemption here because if we didn't,
186 		 * another process could get scheduled after the regs->msr
187 		 * test but before we have finished saving the FP registers
188 		 * to the thread_struct.  That process could take over the
189 		 * FPU, and then when we get scheduled again we would store
190 		 * bogus values for the remaining FP registers.
191 		 */
192 		preempt_disable();
193 		if (tsk->thread.regs->msr & MSR_FP) {
194 			/*
195 			 * This should only ever be called for current or
196 			 * for a stopped child process.  Since we save away
197 			 * the FP register state on context switch,
198 			 * there is something wrong if a stopped child appears
199 			 * to still have its FP state in the CPU registers.
200 			 */
201 			BUG_ON(tsk != current);
202 			giveup_fpu(tsk);
203 		}
204 		preempt_enable();
205 	}
206 }
207 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
208 
209 void enable_kernel_fp(void)
210 {
211 	unsigned long cpumsr;
212 
213 	WARN_ON(preemptible());
214 
215 	cpumsr = msr_check_and_set(MSR_FP);
216 
217 	if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
218 		check_if_tm_restore_required(current);
219 		/*
220 		 * If a thread has already been reclaimed then the
221 		 * checkpointed registers are on the CPU but have definitely
222 		 * been saved by the reclaim code. Don't need to and *cannot*
223 		 * giveup as this would save  to the 'live' structure not the
224 		 * checkpointed structure.
225 		 */
226 		if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
227 			return;
228 		__giveup_fpu(current);
229 	}
230 }
231 EXPORT_SYMBOL(enable_kernel_fp);
232 
233 static int restore_fp(struct task_struct *tsk) {
234 	if (tsk->thread.load_fp || msr_tm_active(tsk->thread.regs->msr)) {
235 		load_fp_state(&current->thread.fp_state);
236 		current->thread.load_fp++;
237 		return 1;
238 	}
239 	return 0;
240 }
241 #else
242 static int restore_fp(struct task_struct *tsk) { return 0; }
243 #endif /* CONFIG_PPC_FPU */
244 
245 #ifdef CONFIG_ALTIVEC
246 #define loadvec(thr) ((thr).load_vec)
247 
248 static void __giveup_altivec(struct task_struct *tsk)
249 {
250 	unsigned long msr;
251 
252 	save_altivec(tsk);
253 	msr = tsk->thread.regs->msr;
254 	msr &= ~MSR_VEC;
255 #ifdef CONFIG_VSX
256 	if (cpu_has_feature(CPU_FTR_VSX))
257 		msr &= ~MSR_VSX;
258 #endif
259 	tsk->thread.regs->msr = msr;
260 }
261 
262 void giveup_altivec(struct task_struct *tsk)
263 {
264 	check_if_tm_restore_required(tsk);
265 
266 	msr_check_and_set(MSR_VEC);
267 	__giveup_altivec(tsk);
268 	msr_check_and_clear(MSR_VEC);
269 }
270 EXPORT_SYMBOL(giveup_altivec);
271 
272 void enable_kernel_altivec(void)
273 {
274 	unsigned long cpumsr;
275 
276 	WARN_ON(preemptible());
277 
278 	cpumsr = msr_check_and_set(MSR_VEC);
279 
280 	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
281 		check_if_tm_restore_required(current);
282 		/*
283 		 * If a thread has already been reclaimed then the
284 		 * checkpointed registers are on the CPU but have definitely
285 		 * been saved by the reclaim code. Don't need to and *cannot*
286 		 * giveup as this would save  to the 'live' structure not the
287 		 * checkpointed structure.
288 		 */
289 		if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
290 			return;
291 		__giveup_altivec(current);
292 	}
293 }
294 EXPORT_SYMBOL(enable_kernel_altivec);
295 
296 /*
297  * Make sure the VMX/Altivec register state in the
298  * the thread_struct is up to date for task tsk.
299  */
300 void flush_altivec_to_thread(struct task_struct *tsk)
301 {
302 	if (tsk->thread.regs) {
303 		preempt_disable();
304 		if (tsk->thread.regs->msr & MSR_VEC) {
305 			BUG_ON(tsk != current);
306 			giveup_altivec(tsk);
307 		}
308 		preempt_enable();
309 	}
310 }
311 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
312 
313 static int restore_altivec(struct task_struct *tsk)
314 {
315 	if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
316 		(tsk->thread.load_vec || msr_tm_active(tsk->thread.regs->msr))) {
317 		load_vr_state(&tsk->thread.vr_state);
318 		tsk->thread.used_vr = 1;
319 		tsk->thread.load_vec++;
320 
321 		return 1;
322 	}
323 	return 0;
324 }
325 #else
326 #define loadvec(thr) 0
327 static inline int restore_altivec(struct task_struct *tsk) { return 0; }
328 #endif /* CONFIG_ALTIVEC */
329 
330 #ifdef CONFIG_VSX
331 static void __giveup_vsx(struct task_struct *tsk)
332 {
333 	if (tsk->thread.regs->msr & MSR_FP)
334 		__giveup_fpu(tsk);
335 	if (tsk->thread.regs->msr & MSR_VEC)
336 		__giveup_altivec(tsk);
337 	tsk->thread.regs->msr &= ~MSR_VSX;
338 }
339 
340 static void giveup_vsx(struct task_struct *tsk)
341 {
342 	check_if_tm_restore_required(tsk);
343 
344 	msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
345 	__giveup_vsx(tsk);
346 	msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
347 }
348 
349 static void save_vsx(struct task_struct *tsk)
350 {
351 	if (tsk->thread.regs->msr & MSR_FP)
352 		save_fpu(tsk);
353 	if (tsk->thread.regs->msr & MSR_VEC)
354 		save_altivec(tsk);
355 }
356 
357 void enable_kernel_vsx(void)
358 {
359 	unsigned long cpumsr;
360 
361 	WARN_ON(preemptible());
362 
363 	cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
364 
365 	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
366 		check_if_tm_restore_required(current);
367 		/*
368 		 * If a thread has already been reclaimed then the
369 		 * checkpointed registers are on the CPU but have definitely
370 		 * been saved by the reclaim code. Don't need to and *cannot*
371 		 * giveup as this would save  to the 'live' structure not the
372 		 * checkpointed structure.
373 		 */
374 		if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
375 			return;
376 		if (current->thread.regs->msr & MSR_FP)
377 			__giveup_fpu(current);
378 		if (current->thread.regs->msr & MSR_VEC)
379 			__giveup_altivec(current);
380 		__giveup_vsx(current);
381 	}
382 }
383 EXPORT_SYMBOL(enable_kernel_vsx);
384 
385 void flush_vsx_to_thread(struct task_struct *tsk)
386 {
387 	if (tsk->thread.regs) {
388 		preempt_disable();
389 		if (tsk->thread.regs->msr & MSR_VSX) {
390 			BUG_ON(tsk != current);
391 			giveup_vsx(tsk);
392 		}
393 		preempt_enable();
394 	}
395 }
396 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
397 
398 static int restore_vsx(struct task_struct *tsk)
399 {
400 	if (cpu_has_feature(CPU_FTR_VSX)) {
401 		tsk->thread.used_vsr = 1;
402 		return 1;
403 	}
404 
405 	return 0;
406 }
407 #else
408 static inline int restore_vsx(struct task_struct *tsk) { return 0; }
409 static inline void save_vsx(struct task_struct *tsk) { }
410 #endif /* CONFIG_VSX */
411 
412 #ifdef CONFIG_SPE
413 void giveup_spe(struct task_struct *tsk)
414 {
415 	check_if_tm_restore_required(tsk);
416 
417 	msr_check_and_set(MSR_SPE);
418 	__giveup_spe(tsk);
419 	msr_check_and_clear(MSR_SPE);
420 }
421 EXPORT_SYMBOL(giveup_spe);
422 
423 void enable_kernel_spe(void)
424 {
425 	WARN_ON(preemptible());
426 
427 	msr_check_and_set(MSR_SPE);
428 
429 	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
430 		check_if_tm_restore_required(current);
431 		__giveup_spe(current);
432 	}
433 }
434 EXPORT_SYMBOL(enable_kernel_spe);
435 
436 void flush_spe_to_thread(struct task_struct *tsk)
437 {
438 	if (tsk->thread.regs) {
439 		preempt_disable();
440 		if (tsk->thread.regs->msr & MSR_SPE) {
441 			BUG_ON(tsk != current);
442 			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
443 			giveup_spe(tsk);
444 		}
445 		preempt_enable();
446 	}
447 }
448 #endif /* CONFIG_SPE */
449 
450 static unsigned long msr_all_available;
451 
452 static int __init init_msr_all_available(void)
453 {
454 #ifdef CONFIG_PPC_FPU
455 	msr_all_available |= MSR_FP;
456 #endif
457 #ifdef CONFIG_ALTIVEC
458 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
459 		msr_all_available |= MSR_VEC;
460 #endif
461 #ifdef CONFIG_VSX
462 	if (cpu_has_feature(CPU_FTR_VSX))
463 		msr_all_available |= MSR_VSX;
464 #endif
465 #ifdef CONFIG_SPE
466 	if (cpu_has_feature(CPU_FTR_SPE))
467 		msr_all_available |= MSR_SPE;
468 #endif
469 
470 	return 0;
471 }
472 early_initcall(init_msr_all_available);
473 
474 void giveup_all(struct task_struct *tsk)
475 {
476 	unsigned long usermsr;
477 
478 	if (!tsk->thread.regs)
479 		return;
480 
481 	usermsr = tsk->thread.regs->msr;
482 
483 	if ((usermsr & msr_all_available) == 0)
484 		return;
485 
486 	msr_check_and_set(msr_all_available);
487 	check_if_tm_restore_required(tsk);
488 
489 #ifdef CONFIG_PPC_FPU
490 	if (usermsr & MSR_FP)
491 		__giveup_fpu(tsk);
492 #endif
493 #ifdef CONFIG_ALTIVEC
494 	if (usermsr & MSR_VEC)
495 		__giveup_altivec(tsk);
496 #endif
497 #ifdef CONFIG_VSX
498 	if (usermsr & MSR_VSX)
499 		__giveup_vsx(tsk);
500 #endif
501 #ifdef CONFIG_SPE
502 	if (usermsr & MSR_SPE)
503 		__giveup_spe(tsk);
504 #endif
505 
506 	msr_check_and_clear(msr_all_available);
507 }
508 EXPORT_SYMBOL(giveup_all);
509 
510 void restore_math(struct pt_regs *regs)
511 {
512 	unsigned long msr;
513 
514 	if (!msr_tm_active(regs->msr) &&
515 		!current->thread.load_fp && !loadvec(current->thread))
516 		return;
517 
518 	msr = regs->msr;
519 	msr_check_and_set(msr_all_available);
520 
521 	/*
522 	 * Only reload if the bit is not set in the user MSR, the bit BEING set
523 	 * indicates that the registers are hot
524 	 */
525 	if ((!(msr & MSR_FP)) && restore_fp(current))
526 		msr |= MSR_FP | current->thread.fpexc_mode;
527 
528 	if ((!(msr & MSR_VEC)) && restore_altivec(current))
529 		msr |= MSR_VEC;
530 
531 	if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
532 			restore_vsx(current)) {
533 		msr |= MSR_VSX;
534 	}
535 
536 	msr_check_and_clear(msr_all_available);
537 
538 	regs->msr = msr;
539 }
540 
541 void save_all(struct task_struct *tsk)
542 {
543 	unsigned long usermsr;
544 
545 	if (!tsk->thread.regs)
546 		return;
547 
548 	usermsr = tsk->thread.regs->msr;
549 
550 	if ((usermsr & msr_all_available) == 0)
551 		return;
552 
553 	msr_check_and_set(msr_all_available);
554 
555 	/*
556 	 * Saving the way the register space is in hardware, save_vsx boils
557 	 * down to a save_fpu() and save_altivec()
558 	 */
559 	if (usermsr & MSR_VSX) {
560 		save_vsx(tsk);
561 	} else {
562 		if (usermsr & MSR_FP)
563 			save_fpu(tsk);
564 
565 		if (usermsr & MSR_VEC)
566 			save_altivec(tsk);
567 	}
568 
569 	if (usermsr & MSR_SPE)
570 		__giveup_spe(tsk);
571 
572 	msr_check_and_clear(msr_all_available);
573 }
574 
575 void flush_all_to_thread(struct task_struct *tsk)
576 {
577 	if (tsk->thread.regs) {
578 		preempt_disable();
579 		BUG_ON(tsk != current);
580 		save_all(tsk);
581 
582 #ifdef CONFIG_SPE
583 		if (tsk->thread.regs->msr & MSR_SPE)
584 			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
585 #endif
586 
587 		preempt_enable();
588 	}
589 }
590 EXPORT_SYMBOL(flush_all_to_thread);
591 
592 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
593 void do_send_trap(struct pt_regs *regs, unsigned long address,
594 		  unsigned long error_code, int signal_code, int breakpt)
595 {
596 	siginfo_t info;
597 
598 	current->thread.trap_nr = signal_code;
599 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
600 			11, SIGSEGV) == NOTIFY_STOP)
601 		return;
602 
603 	/* Deliver the signal to userspace */
604 	info.si_signo = SIGTRAP;
605 	info.si_errno = breakpt;	/* breakpoint or watchpoint id */
606 	info.si_code = signal_code;
607 	info.si_addr = (void __user *)address;
608 	force_sig_info(SIGTRAP, &info, current);
609 }
610 #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
611 void do_break (struct pt_regs *regs, unsigned long address,
612 		    unsigned long error_code)
613 {
614 	siginfo_t info;
615 
616 	current->thread.trap_nr = TRAP_HWBKPT;
617 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
618 			11, SIGSEGV) == NOTIFY_STOP)
619 		return;
620 
621 	if (debugger_break_match(regs))
622 		return;
623 
624 	/* Clear the breakpoint */
625 	hw_breakpoint_disable();
626 
627 	/* Deliver the signal to userspace */
628 	info.si_signo = SIGTRAP;
629 	info.si_errno = 0;
630 	info.si_code = TRAP_HWBKPT;
631 	info.si_addr = (void __user *)address;
632 	force_sig_info(SIGTRAP, &info, current);
633 }
634 #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
635 
636 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
637 
638 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
639 /*
640  * Set the debug registers back to their default "safe" values.
641  */
642 static void set_debug_reg_defaults(struct thread_struct *thread)
643 {
644 	thread->debug.iac1 = thread->debug.iac2 = 0;
645 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
646 	thread->debug.iac3 = thread->debug.iac4 = 0;
647 #endif
648 	thread->debug.dac1 = thread->debug.dac2 = 0;
649 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
650 	thread->debug.dvc1 = thread->debug.dvc2 = 0;
651 #endif
652 	thread->debug.dbcr0 = 0;
653 #ifdef CONFIG_BOOKE
654 	/*
655 	 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
656 	 */
657 	thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
658 			DBCR1_IAC3US | DBCR1_IAC4US;
659 	/*
660 	 * Force Data Address Compare User/Supervisor bits to be User-only
661 	 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
662 	 */
663 	thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
664 #else
665 	thread->debug.dbcr1 = 0;
666 #endif
667 }
668 
669 static void prime_debug_regs(struct debug_reg *debug)
670 {
671 	/*
672 	 * We could have inherited MSR_DE from userspace, since
673 	 * it doesn't get cleared on exception entry.  Make sure
674 	 * MSR_DE is clear before we enable any debug events.
675 	 */
676 	mtmsr(mfmsr() & ~MSR_DE);
677 
678 	mtspr(SPRN_IAC1, debug->iac1);
679 	mtspr(SPRN_IAC2, debug->iac2);
680 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
681 	mtspr(SPRN_IAC3, debug->iac3);
682 	mtspr(SPRN_IAC4, debug->iac4);
683 #endif
684 	mtspr(SPRN_DAC1, debug->dac1);
685 	mtspr(SPRN_DAC2, debug->dac2);
686 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
687 	mtspr(SPRN_DVC1, debug->dvc1);
688 	mtspr(SPRN_DVC2, debug->dvc2);
689 #endif
690 	mtspr(SPRN_DBCR0, debug->dbcr0);
691 	mtspr(SPRN_DBCR1, debug->dbcr1);
692 #ifdef CONFIG_BOOKE
693 	mtspr(SPRN_DBCR2, debug->dbcr2);
694 #endif
695 }
696 /*
697  * Unless neither the old or new thread are making use of the
698  * debug registers, set the debug registers from the values
699  * stored in the new thread.
700  */
701 void switch_booke_debug_regs(struct debug_reg *new_debug)
702 {
703 	if ((current->thread.debug.dbcr0 & DBCR0_IDM)
704 		|| (new_debug->dbcr0 & DBCR0_IDM))
705 			prime_debug_regs(new_debug);
706 }
707 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
708 #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
709 #ifndef CONFIG_HAVE_HW_BREAKPOINT
710 static void set_debug_reg_defaults(struct thread_struct *thread)
711 {
712 	thread->hw_brk.address = 0;
713 	thread->hw_brk.type = 0;
714 	set_breakpoint(&thread->hw_brk);
715 }
716 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
717 #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
718 
719 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
720 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
721 {
722 	mtspr(SPRN_DAC1, dabr);
723 #ifdef CONFIG_PPC_47x
724 	isync();
725 #endif
726 	return 0;
727 }
728 #elif defined(CONFIG_PPC_BOOK3S)
729 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
730 {
731 	mtspr(SPRN_DABR, dabr);
732 	if (cpu_has_feature(CPU_FTR_DABRX))
733 		mtspr(SPRN_DABRX, dabrx);
734 	return 0;
735 }
736 #elif defined(CONFIG_PPC_8xx)
737 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
738 {
739 	unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
740 	unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */
741 	unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */
742 
743 	if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
744 		lctrl1 |= 0xa0000;
745 	else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
746 		lctrl1 |= 0xf0000;
747 	else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
748 		lctrl2 = 0;
749 
750 	mtspr(SPRN_LCTRL2, 0);
751 	mtspr(SPRN_CMPE, addr);
752 	mtspr(SPRN_CMPF, addr + 4);
753 	mtspr(SPRN_LCTRL1, lctrl1);
754 	mtspr(SPRN_LCTRL2, lctrl2);
755 
756 	return 0;
757 }
758 #else
759 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
760 {
761 	return -EINVAL;
762 }
763 #endif
764 
765 static inline int set_dabr(struct arch_hw_breakpoint *brk)
766 {
767 	unsigned long dabr, dabrx;
768 
769 	dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
770 	dabrx = ((brk->type >> 3) & 0x7);
771 
772 	if (ppc_md.set_dabr)
773 		return ppc_md.set_dabr(dabr, dabrx);
774 
775 	return __set_dabr(dabr, dabrx);
776 }
777 
778 static inline int set_dawr(struct arch_hw_breakpoint *brk)
779 {
780 	unsigned long dawr, dawrx, mrd;
781 
782 	dawr = brk->address;
783 
784 	dawrx  = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
785 		                   << (63 - 58); //* read/write bits */
786 	dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
787 		                   << (63 - 59); //* translate */
788 	dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
789 		                   >> 3; //* PRIM bits */
790 	/* dawr length is stored in field MDR bits 48:53.  Matches range in
791 	   doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
792 	   0b111111=64DW.
793 	   brk->len is in bytes.
794 	   This aligns up to double word size, shifts and does the bias.
795 	*/
796 	mrd = ((brk->len + 7) >> 3) - 1;
797 	dawrx |= (mrd & 0x3f) << (63 - 53);
798 
799 	if (ppc_md.set_dawr)
800 		return ppc_md.set_dawr(dawr, dawrx);
801 	mtspr(SPRN_DAWR, dawr);
802 	mtspr(SPRN_DAWRX, dawrx);
803 	return 0;
804 }
805 
806 void __set_breakpoint(struct arch_hw_breakpoint *brk)
807 {
808 	memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
809 
810 	if (cpu_has_feature(CPU_FTR_DAWR))
811 		set_dawr(brk);
812 	else
813 		set_dabr(brk);
814 }
815 
816 void set_breakpoint(struct arch_hw_breakpoint *brk)
817 {
818 	preempt_disable();
819 	__set_breakpoint(brk);
820 	preempt_enable();
821 }
822 
823 #ifdef CONFIG_PPC64
824 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
825 #endif
826 
827 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
828 			      struct arch_hw_breakpoint *b)
829 {
830 	if (a->address != b->address)
831 		return false;
832 	if (a->type != b->type)
833 		return false;
834 	if (a->len != b->len)
835 		return false;
836 	return true;
837 }
838 
839 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
840 
841 static inline bool tm_enabled(struct task_struct *tsk)
842 {
843 	return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
844 }
845 
846 static void tm_reclaim_thread(struct thread_struct *thr,
847 			      struct thread_info *ti, uint8_t cause)
848 {
849 	/*
850 	 * Use the current MSR TM suspended bit to track if we have
851 	 * checkpointed state outstanding.
852 	 * On signal delivery, we'd normally reclaim the checkpointed
853 	 * state to obtain stack pointer (see:get_tm_stackpointer()).
854 	 * This will then directly return to userspace without going
855 	 * through __switch_to(). However, if the stack frame is bad,
856 	 * we need to exit this thread which calls __switch_to() which
857 	 * will again attempt to reclaim the already saved tm state.
858 	 * Hence we need to check that we've not already reclaimed
859 	 * this state.
860 	 * We do this using the current MSR, rather tracking it in
861 	 * some specific thread_struct bit, as it has the additional
862 	 * benefit of checking for a potential TM bad thing exception.
863 	 */
864 	if (!MSR_TM_SUSPENDED(mfmsr()))
865 		return;
866 
867 	giveup_all(container_of(thr, struct task_struct, thread));
868 
869 	tm_reclaim(thr, thr->ckpt_regs.msr, cause);
870 }
871 
872 void tm_reclaim_current(uint8_t cause)
873 {
874 	tm_enable();
875 	tm_reclaim_thread(&current->thread, current_thread_info(), cause);
876 }
877 
878 static inline void tm_reclaim_task(struct task_struct *tsk)
879 {
880 	/* We have to work out if we're switching from/to a task that's in the
881 	 * middle of a transaction.
882 	 *
883 	 * In switching we need to maintain a 2nd register state as
884 	 * oldtask->thread.ckpt_regs.  We tm_reclaim(oldproc); this saves the
885 	 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
886 	 * ckvr_state
887 	 *
888 	 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
889 	 */
890 	struct thread_struct *thr = &tsk->thread;
891 
892 	if (!thr->regs)
893 		return;
894 
895 	if (!MSR_TM_ACTIVE(thr->regs->msr))
896 		goto out_and_saveregs;
897 
898 	TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
899 		 "ccr=%lx, msr=%lx, trap=%lx)\n",
900 		 tsk->pid, thr->regs->nip,
901 		 thr->regs->ccr, thr->regs->msr,
902 		 thr->regs->trap);
903 
904 	tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
905 
906 	TM_DEBUG("--- tm_reclaim on pid %d complete\n",
907 		 tsk->pid);
908 
909 out_and_saveregs:
910 	/* Always save the regs here, even if a transaction's not active.
911 	 * This context-switches a thread's TM info SPRs.  We do it here to
912 	 * be consistent with the restore path (in recheckpoint) which
913 	 * cannot happen later in _switch().
914 	 */
915 	tm_save_sprs(thr);
916 }
917 
918 extern void __tm_recheckpoint(struct thread_struct *thread,
919 			      unsigned long orig_msr);
920 
921 void tm_recheckpoint(struct thread_struct *thread,
922 		     unsigned long orig_msr)
923 {
924 	unsigned long flags;
925 
926 	if (!(thread->regs->msr & MSR_TM))
927 		return;
928 
929 	/* We really can't be interrupted here as the TEXASR registers can't
930 	 * change and later in the trecheckpoint code, we have a userspace R1.
931 	 * So let's hard disable over this region.
932 	 */
933 	local_irq_save(flags);
934 	hard_irq_disable();
935 
936 	/* The TM SPRs are restored here, so that TEXASR.FS can be set
937 	 * before the trecheckpoint and no explosion occurs.
938 	 */
939 	tm_restore_sprs(thread);
940 
941 	__tm_recheckpoint(thread, orig_msr);
942 
943 	local_irq_restore(flags);
944 }
945 
946 static inline void tm_recheckpoint_new_task(struct task_struct *new)
947 {
948 	unsigned long msr;
949 
950 	if (!cpu_has_feature(CPU_FTR_TM))
951 		return;
952 
953 	/* Recheckpoint the registers of the thread we're about to switch to.
954 	 *
955 	 * If the task was using FP, we non-lazily reload both the original and
956 	 * the speculative FP register states.  This is because the kernel
957 	 * doesn't see if/when a TM rollback occurs, so if we take an FP
958 	 * unavailable later, we are unable to determine which set of FP regs
959 	 * need to be restored.
960 	 */
961 	if (!tm_enabled(new))
962 		return;
963 
964 	if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
965 		tm_restore_sprs(&new->thread);
966 		return;
967 	}
968 	msr = new->thread.ckpt_regs.msr;
969 	/* Recheckpoint to restore original checkpointed register state. */
970 	TM_DEBUG("*** tm_recheckpoint of pid %d "
971 		 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
972 		 new->pid, new->thread.regs->msr, msr);
973 
974 	tm_recheckpoint(&new->thread, msr);
975 
976 	/*
977 	 * The checkpointed state has been restored but the live state has
978 	 * not, ensure all the math functionality is turned off to trigger
979 	 * restore_math() to reload.
980 	 */
981 	new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
982 
983 	TM_DEBUG("*** tm_recheckpoint of pid %d complete "
984 		 "(kernel msr 0x%lx)\n",
985 		 new->pid, mfmsr());
986 }
987 
988 static inline void __switch_to_tm(struct task_struct *prev,
989 		struct task_struct *new)
990 {
991 	if (cpu_has_feature(CPU_FTR_TM)) {
992 		if (tm_enabled(prev) || tm_enabled(new))
993 			tm_enable();
994 
995 		if (tm_enabled(prev)) {
996 			prev->thread.load_tm++;
997 			tm_reclaim_task(prev);
998 			if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
999 				prev->thread.regs->msr &= ~MSR_TM;
1000 		}
1001 
1002 		tm_recheckpoint_new_task(new);
1003 	}
1004 }
1005 
1006 /*
1007  * This is called if we are on the way out to userspace and the
1008  * TIF_RESTORE_TM flag is set.  It checks if we need to reload
1009  * FP and/or vector state and does so if necessary.
1010  * If userspace is inside a transaction (whether active or
1011  * suspended) and FP/VMX/VSX instructions have ever been enabled
1012  * inside that transaction, then we have to keep them enabled
1013  * and keep the FP/VMX/VSX state loaded while ever the transaction
1014  * continues.  The reason is that if we didn't, and subsequently
1015  * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1016  * we don't know whether it's the same transaction, and thus we
1017  * don't know which of the checkpointed state and the transactional
1018  * state to use.
1019  */
1020 void restore_tm_state(struct pt_regs *regs)
1021 {
1022 	unsigned long msr_diff;
1023 
1024 	/*
1025 	 * This is the only moment we should clear TIF_RESTORE_TM as
1026 	 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1027 	 * again, anything else could lead to an incorrect ckpt_msr being
1028 	 * saved and therefore incorrect signal contexts.
1029 	 */
1030 	clear_thread_flag(TIF_RESTORE_TM);
1031 	if (!MSR_TM_ACTIVE(regs->msr))
1032 		return;
1033 
1034 	msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1035 	msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1036 
1037 	/* Ensure that restore_math() will restore */
1038 	if (msr_diff & MSR_FP)
1039 		current->thread.load_fp = 1;
1040 #ifdef CONFIG_ALTIVEC
1041 	if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1042 		current->thread.load_vec = 1;
1043 #endif
1044 	restore_math(regs);
1045 
1046 	regs->msr |= msr_diff;
1047 }
1048 
1049 #else
1050 #define tm_recheckpoint_new_task(new)
1051 #define __switch_to_tm(prev, new)
1052 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1053 
1054 static inline void save_sprs(struct thread_struct *t)
1055 {
1056 #ifdef CONFIG_ALTIVEC
1057 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
1058 		t->vrsave = mfspr(SPRN_VRSAVE);
1059 #endif
1060 #ifdef CONFIG_PPC_BOOK3S_64
1061 	if (cpu_has_feature(CPU_FTR_DSCR))
1062 		t->dscr = mfspr(SPRN_DSCR);
1063 
1064 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1065 		t->bescr = mfspr(SPRN_BESCR);
1066 		t->ebbhr = mfspr(SPRN_EBBHR);
1067 		t->ebbrr = mfspr(SPRN_EBBRR);
1068 
1069 		t->fscr = mfspr(SPRN_FSCR);
1070 
1071 		/*
1072 		 * Note that the TAR is not available for use in the kernel.
1073 		 * (To provide this, the TAR should be backed up/restored on
1074 		 * exception entry/exit instead, and be in pt_regs.  FIXME,
1075 		 * this should be in pt_regs anyway (for debug).)
1076 		 */
1077 		t->tar = mfspr(SPRN_TAR);
1078 	}
1079 #endif
1080 }
1081 
1082 static inline void restore_sprs(struct thread_struct *old_thread,
1083 				struct thread_struct *new_thread)
1084 {
1085 #ifdef CONFIG_ALTIVEC
1086 	if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1087 	    old_thread->vrsave != new_thread->vrsave)
1088 		mtspr(SPRN_VRSAVE, new_thread->vrsave);
1089 #endif
1090 #ifdef CONFIG_PPC_BOOK3S_64
1091 	if (cpu_has_feature(CPU_FTR_DSCR)) {
1092 		u64 dscr = get_paca()->dscr_default;
1093 		if (new_thread->dscr_inherit)
1094 			dscr = new_thread->dscr;
1095 
1096 		if (old_thread->dscr != dscr)
1097 			mtspr(SPRN_DSCR, dscr);
1098 	}
1099 
1100 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1101 		if (old_thread->bescr != new_thread->bescr)
1102 			mtspr(SPRN_BESCR, new_thread->bescr);
1103 		if (old_thread->ebbhr != new_thread->ebbhr)
1104 			mtspr(SPRN_EBBHR, new_thread->ebbhr);
1105 		if (old_thread->ebbrr != new_thread->ebbrr)
1106 			mtspr(SPRN_EBBRR, new_thread->ebbrr);
1107 
1108 		if (old_thread->fscr != new_thread->fscr)
1109 			mtspr(SPRN_FSCR, new_thread->fscr);
1110 
1111 		if (old_thread->tar != new_thread->tar)
1112 			mtspr(SPRN_TAR, new_thread->tar);
1113 	}
1114 #endif
1115 }
1116 
1117 struct task_struct *__switch_to(struct task_struct *prev,
1118 	struct task_struct *new)
1119 {
1120 	struct thread_struct *new_thread, *old_thread;
1121 	struct task_struct *last;
1122 #ifdef CONFIG_PPC_BOOK3S_64
1123 	struct ppc64_tlb_batch *batch;
1124 #endif
1125 
1126 	new_thread = &new->thread;
1127 	old_thread = &current->thread;
1128 
1129 	WARN_ON(!irqs_disabled());
1130 
1131 #ifdef CONFIG_PPC64
1132 	/*
1133 	 * Collect processor utilization data per process
1134 	 */
1135 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
1136 		struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
1137 		long unsigned start_tb, current_tb;
1138 		start_tb = old_thread->start_tb;
1139 		cu->current_tb = current_tb = mfspr(SPRN_PURR);
1140 		old_thread->accum_tb += (current_tb - start_tb);
1141 		new_thread->start_tb = current_tb;
1142 	}
1143 #endif /* CONFIG_PPC64 */
1144 
1145 #ifdef CONFIG_PPC_STD_MMU_64
1146 	batch = this_cpu_ptr(&ppc64_tlb_batch);
1147 	if (batch->active) {
1148 		current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1149 		if (batch->index)
1150 			__flush_tlb_pending(batch);
1151 		batch->active = 0;
1152 	}
1153 #endif /* CONFIG_PPC_STD_MMU_64 */
1154 
1155 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1156 	switch_booke_debug_regs(&new->thread.debug);
1157 #else
1158 /*
1159  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1160  * schedule DABR
1161  */
1162 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1163 	if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
1164 		__set_breakpoint(&new->thread.hw_brk);
1165 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1166 #endif
1167 
1168 	/*
1169 	 * We need to save SPRs before treclaim/trecheckpoint as these will
1170 	 * change a number of them.
1171 	 */
1172 	save_sprs(&prev->thread);
1173 
1174 	/* Save FPU, Altivec, VSX and SPE state */
1175 	giveup_all(prev);
1176 
1177 	__switch_to_tm(prev, new);
1178 
1179 	/*
1180 	 * We can't take a PMU exception inside _switch() since there is a
1181 	 * window where the kernel stack SLB and the kernel stack are out
1182 	 * of sync. Hard disable here.
1183 	 */
1184 	hard_irq_disable();
1185 
1186 	/*
1187 	 * Call restore_sprs() before calling _switch(). If we move it after
1188 	 * _switch() then we miss out on calling it for new tasks. The reason
1189 	 * for this is we manually create a stack frame for new tasks that
1190 	 * directly returns through ret_from_fork() or
1191 	 * ret_from_kernel_thread(). See copy_thread() for details.
1192 	 */
1193 	restore_sprs(old_thread, new_thread);
1194 
1195 	last = _switch(old_thread, new_thread);
1196 
1197 #ifdef CONFIG_PPC_STD_MMU_64
1198 	if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1199 		current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1200 		batch = this_cpu_ptr(&ppc64_tlb_batch);
1201 		batch->active = 1;
1202 	}
1203 
1204 	if (current_thread_info()->task->thread.regs)
1205 		restore_math(current_thread_info()->task->thread.regs);
1206 #endif /* CONFIG_PPC_STD_MMU_64 */
1207 
1208 	return last;
1209 }
1210 
1211 static int instructions_to_print = 16;
1212 
1213 static void show_instructions(struct pt_regs *regs)
1214 {
1215 	int i;
1216 	unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
1217 			sizeof(int));
1218 
1219 	printk("Instruction dump:");
1220 
1221 	for (i = 0; i < instructions_to_print; i++) {
1222 		int instr;
1223 
1224 		if (!(i % 8))
1225 			pr_cont("\n");
1226 
1227 #if !defined(CONFIG_BOOKE)
1228 		/* If executing with the IMMU off, adjust pc rather
1229 		 * than print XXXXXXXX.
1230 		 */
1231 		if (!(regs->msr & MSR_IR))
1232 			pc = (unsigned long)phys_to_virt(pc);
1233 #endif
1234 
1235 		if (!__kernel_text_address(pc) ||
1236 		     probe_kernel_address((unsigned int __user *)pc, instr)) {
1237 			pr_cont("XXXXXXXX ");
1238 		} else {
1239 			if (regs->nip == pc)
1240 				pr_cont("<%08x> ", instr);
1241 			else
1242 				pr_cont("%08x ", instr);
1243 		}
1244 
1245 		pc += sizeof(int);
1246 	}
1247 
1248 	pr_cont("\n");
1249 }
1250 
1251 struct regbit {
1252 	unsigned long bit;
1253 	const char *name;
1254 };
1255 
1256 static struct regbit msr_bits[] = {
1257 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1258 	{MSR_SF,	"SF"},
1259 	{MSR_HV,	"HV"},
1260 #endif
1261 	{MSR_VEC,	"VEC"},
1262 	{MSR_VSX,	"VSX"},
1263 #ifdef CONFIG_BOOKE
1264 	{MSR_CE,	"CE"},
1265 #endif
1266 	{MSR_EE,	"EE"},
1267 	{MSR_PR,	"PR"},
1268 	{MSR_FP,	"FP"},
1269 	{MSR_ME,	"ME"},
1270 #ifdef CONFIG_BOOKE
1271 	{MSR_DE,	"DE"},
1272 #else
1273 	{MSR_SE,	"SE"},
1274 	{MSR_BE,	"BE"},
1275 #endif
1276 	{MSR_IR,	"IR"},
1277 	{MSR_DR,	"DR"},
1278 	{MSR_PMM,	"PMM"},
1279 #ifndef CONFIG_BOOKE
1280 	{MSR_RI,	"RI"},
1281 	{MSR_LE,	"LE"},
1282 #endif
1283 	{0,		NULL}
1284 };
1285 
1286 static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1287 {
1288 	const char *s = "";
1289 
1290 	for (; bits->bit; ++bits)
1291 		if (val & bits->bit) {
1292 			pr_cont("%s%s", s, bits->name);
1293 			s = sep;
1294 		}
1295 }
1296 
1297 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1298 static struct regbit msr_tm_bits[] = {
1299 	{MSR_TS_T,	"T"},
1300 	{MSR_TS_S,	"S"},
1301 	{MSR_TM,	"E"},
1302 	{0,		NULL}
1303 };
1304 
1305 static void print_tm_bits(unsigned long val)
1306 {
1307 /*
1308  * This only prints something if at least one of the TM bit is set.
1309  * Inside the TM[], the output means:
1310  *   E: Enabled		(bit 32)
1311  *   S: Suspended	(bit 33)
1312  *   T: Transactional	(bit 34)
1313  */
1314 	if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1315 		pr_cont(",TM[");
1316 		print_bits(val, msr_tm_bits, "");
1317 		pr_cont("]");
1318 	}
1319 }
1320 #else
1321 static void print_tm_bits(unsigned long val) {}
1322 #endif
1323 
1324 static void print_msr_bits(unsigned long val)
1325 {
1326 	pr_cont("<");
1327 	print_bits(val, msr_bits, ",");
1328 	print_tm_bits(val);
1329 	pr_cont(">");
1330 }
1331 
1332 #ifdef CONFIG_PPC64
1333 #define REG		"%016lx"
1334 #define REGS_PER_LINE	4
1335 #define LAST_VOLATILE	13
1336 #else
1337 #define REG		"%08lx"
1338 #define REGS_PER_LINE	8
1339 #define LAST_VOLATILE	12
1340 #endif
1341 
1342 void show_regs(struct pt_regs * regs)
1343 {
1344 	int i, trap;
1345 
1346 	show_regs_print_info(KERN_DEFAULT);
1347 
1348 	printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1349 	       regs->nip, regs->link, regs->ctr);
1350 	printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
1351 	       regs, regs->trap, print_tainted(), init_utsname()->release);
1352 	printk("MSR: "REG" ", regs->msr);
1353 	print_msr_bits(regs->msr);
1354 	printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
1355 	trap = TRAP(regs);
1356 	if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1357 		pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1358 	if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1359 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1360 		pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1361 #else
1362 		pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1363 #endif
1364 #ifdef CONFIG_PPC64
1365 	pr_cont("SOFTE: %ld ", regs->softe);
1366 #endif
1367 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1368 	if (MSR_TM_ACTIVE(regs->msr))
1369 		pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1370 #endif
1371 
1372 	for (i = 0;  i < 32;  i++) {
1373 		if ((i % REGS_PER_LINE) == 0)
1374 			pr_cont("\nGPR%02d: ", i);
1375 		pr_cont(REG " ", regs->gpr[i]);
1376 		if (i == LAST_VOLATILE && !FULL_REGS(regs))
1377 			break;
1378 	}
1379 	pr_cont("\n");
1380 #ifdef CONFIG_KALLSYMS
1381 	/*
1382 	 * Lookup NIP late so we have the best change of getting the
1383 	 * above info out without failing
1384 	 */
1385 	printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1386 	printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1387 #endif
1388 	show_stack(current, (unsigned long *) regs->gpr[1]);
1389 	if (!user_mode(regs))
1390 		show_instructions(regs);
1391 }
1392 
1393 void flush_thread(void)
1394 {
1395 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1396 	flush_ptrace_hw_breakpoint(current);
1397 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1398 	set_debug_reg_defaults(&current->thread);
1399 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1400 }
1401 
1402 void
1403 release_thread(struct task_struct *t)
1404 {
1405 }
1406 
1407 /*
1408  * this gets called so that we can store coprocessor state into memory and
1409  * copy the current task into the new thread.
1410  */
1411 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1412 {
1413 	flush_all_to_thread(src);
1414 	/*
1415 	 * Flush TM state out so we can copy it.  __switch_to_tm() does this
1416 	 * flush but it removes the checkpointed state from the current CPU and
1417 	 * transitions the CPU out of TM mode.  Hence we need to call
1418 	 * tm_recheckpoint_new_task() (on the same task) to restore the
1419 	 * checkpointed state back and the TM mode.
1420 	 *
1421 	 * Can't pass dst because it isn't ready. Doesn't matter, passing
1422 	 * dst is only important for __switch_to()
1423 	 */
1424 	__switch_to_tm(src, src);
1425 
1426 	*dst = *src;
1427 
1428 	clear_task_ebb(dst);
1429 
1430 	return 0;
1431 }
1432 
1433 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1434 {
1435 #ifdef CONFIG_PPC_STD_MMU_64
1436 	unsigned long sp_vsid;
1437 	unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1438 
1439 	if (radix_enabled())
1440 		return;
1441 
1442 	if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1443 		sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1444 			<< SLB_VSID_SHIFT_1T;
1445 	else
1446 		sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1447 			<< SLB_VSID_SHIFT;
1448 	sp_vsid |= SLB_VSID_KERNEL | llp;
1449 	p->thread.ksp_vsid = sp_vsid;
1450 #endif
1451 }
1452 
1453 /*
1454  * Copy a thread..
1455  */
1456 
1457 /*
1458  * Copy architecture-specific thread state
1459  */
1460 int copy_thread(unsigned long clone_flags, unsigned long usp,
1461 		unsigned long kthread_arg, struct task_struct *p)
1462 {
1463 	struct pt_regs *childregs, *kregs;
1464 	extern void ret_from_fork(void);
1465 	extern void ret_from_kernel_thread(void);
1466 	void (*f)(void);
1467 	unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1468 	struct thread_info *ti = task_thread_info(p);
1469 
1470 	klp_init_thread_info(ti);
1471 
1472 	/* Copy registers */
1473 	sp -= sizeof(struct pt_regs);
1474 	childregs = (struct pt_regs *) sp;
1475 	if (unlikely(p->flags & PF_KTHREAD)) {
1476 		/* kernel thread */
1477 		memset(childregs, 0, sizeof(struct pt_regs));
1478 		childregs->gpr[1] = sp + sizeof(struct pt_regs);
1479 		/* function */
1480 		if (usp)
1481 			childregs->gpr[14] = ppc_function_entry((void *)usp);
1482 #ifdef CONFIG_PPC64
1483 		clear_tsk_thread_flag(p, TIF_32BIT);
1484 		childregs->softe = 1;
1485 #endif
1486 		childregs->gpr[15] = kthread_arg;
1487 		p->thread.regs = NULL;	/* no user register state */
1488 		ti->flags |= _TIF_RESTOREALL;
1489 		f = ret_from_kernel_thread;
1490 	} else {
1491 		/* user thread */
1492 		struct pt_regs *regs = current_pt_regs();
1493 		CHECK_FULL_REGS(regs);
1494 		*childregs = *regs;
1495 		if (usp)
1496 			childregs->gpr[1] = usp;
1497 		p->thread.regs = childregs;
1498 		childregs->gpr[3] = 0;  /* Result from fork() */
1499 		if (clone_flags & CLONE_SETTLS) {
1500 #ifdef CONFIG_PPC64
1501 			if (!is_32bit_task())
1502 				childregs->gpr[13] = childregs->gpr[6];
1503 			else
1504 #endif
1505 				childregs->gpr[2] = childregs->gpr[6];
1506 		}
1507 
1508 		f = ret_from_fork;
1509 	}
1510 	childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1511 	sp -= STACK_FRAME_OVERHEAD;
1512 
1513 	/*
1514 	 * The way this works is that at some point in the future
1515 	 * some task will call _switch to switch to the new task.
1516 	 * That will pop off the stack frame created below and start
1517 	 * the new task running at ret_from_fork.  The new task will
1518 	 * do some house keeping and then return from the fork or clone
1519 	 * system call, using the stack frame created above.
1520 	 */
1521 	((unsigned long *)sp)[0] = 0;
1522 	sp -= sizeof(struct pt_regs);
1523 	kregs = (struct pt_regs *) sp;
1524 	sp -= STACK_FRAME_OVERHEAD;
1525 	p->thread.ksp = sp;
1526 #ifdef CONFIG_PPC32
1527 	p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1528 				_ALIGN_UP(sizeof(struct thread_info), 16);
1529 #endif
1530 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1531 	p->thread.ptrace_bps[0] = NULL;
1532 #endif
1533 
1534 	p->thread.fp_save_area = NULL;
1535 #ifdef CONFIG_ALTIVEC
1536 	p->thread.vr_save_area = NULL;
1537 #endif
1538 
1539 	setup_ksp_vsid(p, sp);
1540 
1541 #ifdef CONFIG_PPC64
1542 	if (cpu_has_feature(CPU_FTR_DSCR)) {
1543 		p->thread.dscr_inherit = current->thread.dscr_inherit;
1544 		p->thread.dscr = mfspr(SPRN_DSCR);
1545 	}
1546 	if (cpu_has_feature(CPU_FTR_HAS_PPR))
1547 		p->thread.ppr = INIT_PPR;
1548 #endif
1549 	kregs->nip = ppc_function_entry(f);
1550 	return 0;
1551 }
1552 
1553 /*
1554  * Set up a thread for executing a new program
1555  */
1556 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1557 {
1558 #ifdef CONFIG_PPC64
1559 	unsigned long load_addr = regs->gpr[2];	/* saved by ELF_PLAT_INIT */
1560 #endif
1561 
1562 	/*
1563 	 * If we exec out of a kernel thread then thread.regs will not be
1564 	 * set.  Do it now.
1565 	 */
1566 	if (!current->thread.regs) {
1567 		struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1568 		current->thread.regs = regs - 1;
1569 	}
1570 
1571 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1572 	/*
1573 	 * Clear any transactional state, we're exec()ing. The cause is
1574 	 * not important as there will never be a recheckpoint so it's not
1575 	 * user visible.
1576 	 */
1577 	if (MSR_TM_SUSPENDED(mfmsr()))
1578 		tm_reclaim_current(0);
1579 #endif
1580 
1581 	memset(regs->gpr, 0, sizeof(regs->gpr));
1582 	regs->ctr = 0;
1583 	regs->link = 0;
1584 	regs->xer = 0;
1585 	regs->ccr = 0;
1586 	regs->gpr[1] = sp;
1587 
1588 	/*
1589 	 * We have just cleared all the nonvolatile GPRs, so make
1590 	 * FULL_REGS(regs) return true.  This is necessary to allow
1591 	 * ptrace to examine the thread immediately after exec.
1592 	 */
1593 	regs->trap &= ~1UL;
1594 
1595 #ifdef CONFIG_PPC32
1596 	regs->mq = 0;
1597 	regs->nip = start;
1598 	regs->msr = MSR_USER;
1599 #else
1600 	if (!is_32bit_task()) {
1601 		unsigned long entry;
1602 
1603 		if (is_elf2_task()) {
1604 			/* Look ma, no function descriptors! */
1605 			entry = start;
1606 
1607 			/*
1608 			 * Ulrich says:
1609 			 *   The latest iteration of the ABI requires that when
1610 			 *   calling a function (at its global entry point),
1611 			 *   the caller must ensure r12 holds the entry point
1612 			 *   address (so that the function can quickly
1613 			 *   establish addressability).
1614 			 */
1615 			regs->gpr[12] = start;
1616 			/* Make sure that's restored on entry to userspace. */
1617 			set_thread_flag(TIF_RESTOREALL);
1618 		} else {
1619 			unsigned long toc;
1620 
1621 			/* start is a relocated pointer to the function
1622 			 * descriptor for the elf _start routine.  The first
1623 			 * entry in the function descriptor is the entry
1624 			 * address of _start and the second entry is the TOC
1625 			 * value we need to use.
1626 			 */
1627 			__get_user(entry, (unsigned long __user *)start);
1628 			__get_user(toc, (unsigned long __user *)start+1);
1629 
1630 			/* Check whether the e_entry function descriptor entries
1631 			 * need to be relocated before we can use them.
1632 			 */
1633 			if (load_addr != 0) {
1634 				entry += load_addr;
1635 				toc   += load_addr;
1636 			}
1637 			regs->gpr[2] = toc;
1638 		}
1639 		regs->nip = entry;
1640 		regs->msr = MSR_USER64;
1641 	} else {
1642 		regs->nip = start;
1643 		regs->gpr[2] = 0;
1644 		regs->msr = MSR_USER32;
1645 	}
1646 #endif
1647 #ifdef CONFIG_VSX
1648 	current->thread.used_vsr = 0;
1649 #endif
1650 	memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
1651 	current->thread.fp_save_area = NULL;
1652 #ifdef CONFIG_ALTIVEC
1653 	memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
1654 	current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1655 	current->thread.vr_save_area = NULL;
1656 	current->thread.vrsave = 0;
1657 	current->thread.used_vr = 0;
1658 #endif /* CONFIG_ALTIVEC */
1659 #ifdef CONFIG_SPE
1660 	memset(current->thread.evr, 0, sizeof(current->thread.evr));
1661 	current->thread.acc = 0;
1662 	current->thread.spefscr = 0;
1663 	current->thread.used_spe = 0;
1664 #endif /* CONFIG_SPE */
1665 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1666 	current->thread.tm_tfhar = 0;
1667 	current->thread.tm_texasr = 0;
1668 	current->thread.tm_tfiar = 0;
1669 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1670 }
1671 EXPORT_SYMBOL(start_thread);
1672 
1673 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1674 		| PR_FP_EXC_RES | PR_FP_EXC_INV)
1675 
1676 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1677 {
1678 	struct pt_regs *regs = tsk->thread.regs;
1679 
1680 	/* This is a bit hairy.  If we are an SPE enabled  processor
1681 	 * (have embedded fp) we store the IEEE exception enable flags in
1682 	 * fpexc_mode.  fpexc_mode is also used for setting FP exception
1683 	 * mode (asyn, precise, disabled) for 'Classic' FP. */
1684 	if (val & PR_FP_EXC_SW_ENABLE) {
1685 #ifdef CONFIG_SPE
1686 		if (cpu_has_feature(CPU_FTR_SPE)) {
1687 			/*
1688 			 * When the sticky exception bits are set
1689 			 * directly by userspace, it must call prctl
1690 			 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1691 			 * in the existing prctl settings) or
1692 			 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1693 			 * the bits being set).  <fenv.h> functions
1694 			 * saving and restoring the whole
1695 			 * floating-point environment need to do so
1696 			 * anyway to restore the prctl settings from
1697 			 * the saved environment.
1698 			 */
1699 			tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1700 			tsk->thread.fpexc_mode = val &
1701 				(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1702 			return 0;
1703 		} else {
1704 			return -EINVAL;
1705 		}
1706 #else
1707 		return -EINVAL;
1708 #endif
1709 	}
1710 
1711 	/* on a CONFIG_SPE this does not hurt us.  The bits that
1712 	 * __pack_fe01 use do not overlap with bits used for
1713 	 * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
1714 	 * on CONFIG_SPE implementations are reserved so writing to
1715 	 * them does not change anything */
1716 	if (val > PR_FP_EXC_PRECISE)
1717 		return -EINVAL;
1718 	tsk->thread.fpexc_mode = __pack_fe01(val);
1719 	if (regs != NULL && (regs->msr & MSR_FP) != 0)
1720 		regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1721 			| tsk->thread.fpexc_mode;
1722 	return 0;
1723 }
1724 
1725 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1726 {
1727 	unsigned int val;
1728 
1729 	if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1730 #ifdef CONFIG_SPE
1731 		if (cpu_has_feature(CPU_FTR_SPE)) {
1732 			/*
1733 			 * When the sticky exception bits are set
1734 			 * directly by userspace, it must call prctl
1735 			 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1736 			 * in the existing prctl settings) or
1737 			 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1738 			 * the bits being set).  <fenv.h> functions
1739 			 * saving and restoring the whole
1740 			 * floating-point environment need to do so
1741 			 * anyway to restore the prctl settings from
1742 			 * the saved environment.
1743 			 */
1744 			tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1745 			val = tsk->thread.fpexc_mode;
1746 		} else
1747 			return -EINVAL;
1748 #else
1749 		return -EINVAL;
1750 #endif
1751 	else
1752 		val = __unpack_fe01(tsk->thread.fpexc_mode);
1753 	return put_user(val, (unsigned int __user *) adr);
1754 }
1755 
1756 int set_endian(struct task_struct *tsk, unsigned int val)
1757 {
1758 	struct pt_regs *regs = tsk->thread.regs;
1759 
1760 	if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1761 	    (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1762 		return -EINVAL;
1763 
1764 	if (regs == NULL)
1765 		return -EINVAL;
1766 
1767 	if (val == PR_ENDIAN_BIG)
1768 		regs->msr &= ~MSR_LE;
1769 	else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1770 		regs->msr |= MSR_LE;
1771 	else
1772 		return -EINVAL;
1773 
1774 	return 0;
1775 }
1776 
1777 int get_endian(struct task_struct *tsk, unsigned long adr)
1778 {
1779 	struct pt_regs *regs = tsk->thread.regs;
1780 	unsigned int val;
1781 
1782 	if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1783 	    !cpu_has_feature(CPU_FTR_REAL_LE))
1784 		return -EINVAL;
1785 
1786 	if (regs == NULL)
1787 		return -EINVAL;
1788 
1789 	if (regs->msr & MSR_LE) {
1790 		if (cpu_has_feature(CPU_FTR_REAL_LE))
1791 			val = PR_ENDIAN_LITTLE;
1792 		else
1793 			val = PR_ENDIAN_PPC_LITTLE;
1794 	} else
1795 		val = PR_ENDIAN_BIG;
1796 
1797 	return put_user(val, (unsigned int __user *)adr);
1798 }
1799 
1800 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1801 {
1802 	tsk->thread.align_ctl = val;
1803 	return 0;
1804 }
1805 
1806 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1807 {
1808 	return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1809 }
1810 
1811 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1812 				  unsigned long nbytes)
1813 {
1814 	unsigned long stack_page;
1815 	unsigned long cpu = task_cpu(p);
1816 
1817 	/*
1818 	 * Avoid crashing if the stack has overflowed and corrupted
1819 	 * task_cpu(p), which is in the thread_info struct.
1820 	 */
1821 	if (cpu < NR_CPUS && cpu_possible(cpu)) {
1822 		stack_page = (unsigned long) hardirq_ctx[cpu];
1823 		if (sp >= stack_page + sizeof(struct thread_struct)
1824 		    && sp <= stack_page + THREAD_SIZE - nbytes)
1825 			return 1;
1826 
1827 		stack_page = (unsigned long) softirq_ctx[cpu];
1828 		if (sp >= stack_page + sizeof(struct thread_struct)
1829 		    && sp <= stack_page + THREAD_SIZE - nbytes)
1830 			return 1;
1831 	}
1832 	return 0;
1833 }
1834 
1835 int validate_sp(unsigned long sp, struct task_struct *p,
1836 		       unsigned long nbytes)
1837 {
1838 	unsigned long stack_page = (unsigned long)task_stack_page(p);
1839 
1840 	if (sp >= stack_page + sizeof(struct thread_struct)
1841 	    && sp <= stack_page + THREAD_SIZE - nbytes)
1842 		return 1;
1843 
1844 	return valid_irq_stack(sp, p, nbytes);
1845 }
1846 
1847 EXPORT_SYMBOL(validate_sp);
1848 
1849 unsigned long get_wchan(struct task_struct *p)
1850 {
1851 	unsigned long ip, sp;
1852 	int count = 0;
1853 
1854 	if (!p || p == current || p->state == TASK_RUNNING)
1855 		return 0;
1856 
1857 	sp = p->thread.ksp;
1858 	if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1859 		return 0;
1860 
1861 	do {
1862 		sp = *(unsigned long *)sp;
1863 		if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1864 			return 0;
1865 		if (count > 0) {
1866 			ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1867 			if (!in_sched_functions(ip))
1868 				return ip;
1869 		}
1870 	} while (count++ < 16);
1871 	return 0;
1872 }
1873 
1874 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1875 
1876 void show_stack(struct task_struct *tsk, unsigned long *stack)
1877 {
1878 	unsigned long sp, ip, lr, newsp;
1879 	int count = 0;
1880 	int firstframe = 1;
1881 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1882 	int curr_frame = current->curr_ret_stack;
1883 	extern void return_to_handler(void);
1884 	unsigned long rth = (unsigned long)return_to_handler;
1885 #endif
1886 
1887 	sp = (unsigned long) stack;
1888 	if (tsk == NULL)
1889 		tsk = current;
1890 	if (sp == 0) {
1891 		if (tsk == current)
1892 			sp = current_stack_pointer();
1893 		else
1894 			sp = tsk->thread.ksp;
1895 	}
1896 
1897 	lr = 0;
1898 	printk("Call Trace:\n");
1899 	do {
1900 		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1901 			return;
1902 
1903 		stack = (unsigned long *) sp;
1904 		newsp = stack[0];
1905 		ip = stack[STACK_FRAME_LR_SAVE];
1906 		if (!firstframe || ip != lr) {
1907 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1908 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1909 			if ((ip == rth) && curr_frame >= 0) {
1910 				pr_cont(" (%pS)",
1911 				       (void *)current->ret_stack[curr_frame].ret);
1912 				curr_frame--;
1913 			}
1914 #endif
1915 			if (firstframe)
1916 				pr_cont(" (unreliable)");
1917 			pr_cont("\n");
1918 		}
1919 		firstframe = 0;
1920 
1921 		/*
1922 		 * See if this is an exception frame.
1923 		 * We look for the "regshere" marker in the current frame.
1924 		 */
1925 		if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1926 		    && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1927 			struct pt_regs *regs = (struct pt_regs *)
1928 				(sp + STACK_FRAME_OVERHEAD);
1929 			lr = regs->link;
1930 			printk("--- interrupt: %lx at %pS\n    LR = %pS\n",
1931 			       regs->trap, (void *)regs->nip, (void *)lr);
1932 			firstframe = 1;
1933 		}
1934 
1935 		sp = newsp;
1936 	} while (count++ < kstack_depth_to_print);
1937 }
1938 
1939 #ifdef CONFIG_PPC64
1940 /* Called with hard IRQs off */
1941 void notrace __ppc64_runlatch_on(void)
1942 {
1943 	struct thread_info *ti = current_thread_info();
1944 	unsigned long ctrl;
1945 
1946 	ctrl = mfspr(SPRN_CTRLF);
1947 	ctrl |= CTRL_RUNLATCH;
1948 	mtspr(SPRN_CTRLT, ctrl);
1949 
1950 	ti->local_flags |= _TLF_RUNLATCH;
1951 }
1952 
1953 /* Called with hard IRQs off */
1954 void notrace __ppc64_runlatch_off(void)
1955 {
1956 	struct thread_info *ti = current_thread_info();
1957 	unsigned long ctrl;
1958 
1959 	ti->local_flags &= ~_TLF_RUNLATCH;
1960 
1961 	ctrl = mfspr(SPRN_CTRLF);
1962 	ctrl &= ~CTRL_RUNLATCH;
1963 	mtspr(SPRN_CTRLT, ctrl);
1964 }
1965 #endif /* CONFIG_PPC64 */
1966 
1967 unsigned long arch_align_stack(unsigned long sp)
1968 {
1969 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1970 		sp -= get_random_int() & ~PAGE_MASK;
1971 	return sp & ~0xf;
1972 }
1973 
1974 static inline unsigned long brk_rnd(void)
1975 {
1976         unsigned long rnd = 0;
1977 
1978 	/* 8MB for 32bit, 1GB for 64bit */
1979 	if (is_32bit_task())
1980 		rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
1981 	else
1982 		rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
1983 
1984 	return rnd << PAGE_SHIFT;
1985 }
1986 
1987 unsigned long arch_randomize_brk(struct mm_struct *mm)
1988 {
1989 	unsigned long base = mm->brk;
1990 	unsigned long ret;
1991 
1992 #ifdef CONFIG_PPC_STD_MMU_64
1993 	/*
1994 	 * If we are using 1TB segments and we are allowed to randomise
1995 	 * the heap, we can put it above 1TB so it is backed by a 1TB
1996 	 * segment. Otherwise the heap will be in the bottom 1TB
1997 	 * which always uses 256MB segments and this may result in a
1998 	 * performance penalty. We don't need to worry about radix. For
1999 	 * radix, mmu_highuser_ssize remains unchanged from 256MB.
2000 	 */
2001 	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2002 		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2003 #endif
2004 
2005 	ret = PAGE_ALIGN(base + brk_rnd());
2006 
2007 	if (ret < mm->brk)
2008 		return mm->brk;
2009 
2010 	return ret;
2011 }
2012 
2013