xref: /openbmc/linux/arch/powerpc/kernel/signal_32.c (revision 26cfd12b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
4  *
5  *  PowerPC version
6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7  * Copyright (C) 2001 IBM
8  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
10  *
11  *  Derived from "arch/i386/kernel/signal.c"
12  *    Copyright (C) 1991, 1992 Linus Torvalds
13  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
14  */
15 
16 #include <linux/sched.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/kernel.h>
20 #include <linux/signal.h>
21 #include <linux/errno.h>
22 #include <linux/elf.h>
23 #include <linux/ptrace.h>
24 #include <linux/pagemap.h>
25 #include <linux/ratelimit.h>
26 #include <linux/syscalls.h>
27 #ifdef CONFIG_PPC64
28 #include <linux/compat.h>
29 #else
30 #include <linux/wait.h>
31 #include <linux/unistd.h>
32 #include <linux/stddef.h>
33 #include <linux/tty.h>
34 #include <linux/binfmts.h>
35 #endif
36 
37 #include <linux/uaccess.h>
38 #include <asm/cacheflush.h>
39 #include <asm/syscalls.h>
40 #include <asm/sigcontext.h>
41 #include <asm/vdso.h>
42 #include <asm/switch_to.h>
43 #include <asm/tm.h>
44 #include <asm/asm-prototypes.h>
45 #ifdef CONFIG_PPC64
46 #include "ppc32.h"
47 #include <asm/unistd.h>
48 #else
49 #include <asm/ucontext.h>
50 #endif
51 
52 #include "signal.h"
53 
54 
55 #ifdef CONFIG_PPC64
56 #define old_sigaction	old_sigaction32
57 #define sigcontext	sigcontext32
58 #define mcontext	mcontext32
59 #define ucontext	ucontext32
60 
61 #define __save_altstack __compat_save_altstack
62 
63 /*
64  * Userspace code may pass a ucontext which doesn't include VSX added
65  * at the end.  We need to check for this case.
66  */
67 #define UCONTEXTSIZEWITHOUTVSX \
68 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
69 
70 /*
71  * Returning 0 means we return to userspace via
72  * ret_from_except and thus restore all user
73  * registers from *regs.  This is what we need
74  * to do when a signal has been delivered.
75  */
76 
77 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
78 #undef __SIGNAL_FRAMESIZE
79 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
80 #undef ELF_NVRREG
81 #define ELF_NVRREG	ELF_NVRREG32
82 
83 /*
84  * Functions for flipping sigsets (thanks to brain dead generic
85  * implementation that makes things simple for little endian only)
86  */
87 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
88 {
89 	return put_compat_sigset(uset, set, sizeof(*uset));
90 }
91 
92 static inline int get_sigset_t(sigset_t *set,
93 			       const compat_sigset_t __user *uset)
94 {
95 	return get_compat_sigset(set, uset);
96 }
97 
98 #define to_user_ptr(p)		ptr_to_compat(p)
99 #define from_user_ptr(p)	compat_ptr(p)
100 
101 static inline int save_general_regs(struct pt_regs *regs,
102 		struct mcontext __user *frame)
103 {
104 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
105 	int i;
106 	/* Force usr to alway see softe as 1 (interrupts enabled) */
107 	elf_greg_t64 softe = 0x1;
108 
109 	WARN_ON(!FULL_REGS(regs));
110 
111 	for (i = 0; i <= PT_RESULT; i ++) {
112 		if (i == 14 && !FULL_REGS(regs))
113 			i = 32;
114 		if ( i == PT_SOFTE) {
115 			if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
116 				return -EFAULT;
117 			else
118 				continue;
119 		}
120 		if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
121 			return -EFAULT;
122 	}
123 	return 0;
124 }
125 
126 static inline int restore_general_regs(struct pt_regs *regs,
127 		struct mcontext __user *sr)
128 {
129 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
130 	int i;
131 
132 	for (i = 0; i <= PT_RESULT; i++) {
133 		if ((i == PT_MSR) || (i == PT_SOFTE))
134 			continue;
135 		if (__get_user(gregs[i], &sr->mc_gregs[i]))
136 			return -EFAULT;
137 	}
138 	return 0;
139 }
140 
141 #else /* CONFIG_PPC64 */
142 
143 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
144 
145 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
146 {
147 	return copy_to_user(uset, set, sizeof(*uset));
148 }
149 
150 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
151 {
152 	return copy_from_user(set, uset, sizeof(*uset));
153 }
154 
155 #define to_user_ptr(p)		((unsigned long)(p))
156 #define from_user_ptr(p)	((void __user *)(p))
157 
158 static inline int save_general_regs(struct pt_regs *regs,
159 		struct mcontext __user *frame)
160 {
161 	WARN_ON(!FULL_REGS(regs));
162 	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
163 }
164 
165 static inline int restore_general_regs(struct pt_regs *regs,
166 		struct mcontext __user *sr)
167 {
168 	/* copy up to but not including MSR */
169 	if (__copy_from_user(regs, &sr->mc_gregs,
170 				PT_MSR * sizeof(elf_greg_t)))
171 		return -EFAULT;
172 	/* copy from orig_r3 (the word after the MSR) up to the end */
173 	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
174 				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
175 		return -EFAULT;
176 	return 0;
177 }
178 #endif
179 
180 /*
181  * When we have signals to deliver, we set up on the
182  * user stack, going down from the original stack pointer:
183  *	an ABI gap of 56 words
184  *	an mcontext struct
185  *	a sigcontext struct
186  *	a gap of __SIGNAL_FRAMESIZE bytes
187  *
188  * Each of these things must be a multiple of 16 bytes in size. The following
189  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
190  *
191  */
192 struct sigframe {
193 	struct sigcontext sctx;		/* the sigcontext */
194 	struct mcontext	mctx;		/* all the register values */
195 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
196 	struct sigcontext sctx_transact;
197 	struct mcontext	mctx_transact;
198 #endif
199 	/*
200 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
201 	 * regs and 18 fp regs below sp before decrementing it.
202 	 */
203 	int			abigap[56];
204 };
205 
206 /* We use the mc_pad field for the signal return trampoline. */
207 #define tramp	mc_pad
208 
209 /*
210  *  When we have rt signals to deliver, we set up on the
211  *  user stack, going down from the original stack pointer:
212  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
213  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
214  *  (the +16 is to get the siginfo and ucontext in the same
215  *  positions as in older kernels).
216  *
217  *  Each of these things must be a multiple of 16 bytes in size.
218  *
219  */
220 struct rt_sigframe {
221 #ifdef CONFIG_PPC64
222 	compat_siginfo_t info;
223 #else
224 	struct siginfo info;
225 #endif
226 	struct ucontext	uc;
227 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
228 	struct ucontext	uc_transact;
229 #endif
230 	/*
231 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
232 	 * regs and 18 fp regs below sp before decrementing it.
233 	 */
234 	int			abigap[56];
235 };
236 
237 /*
238  * Save the current user registers on the user stack.
239  * We only save the altivec/spe registers if the process has used
240  * altivec/spe instructions at some point.
241  */
242 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
243 			  struct mcontext __user *tm_frame, int sigret,
244 			  int ctx_has_vsx_region)
245 {
246 	unsigned long msr = regs->msr;
247 
248 	/* Make sure floating point registers are stored in regs */
249 	flush_fp_to_thread(current);
250 
251 	/* save general registers */
252 	if (save_general_regs(regs, frame))
253 		return 1;
254 
255 #ifdef CONFIG_ALTIVEC
256 	/* save altivec registers */
257 	if (current->thread.used_vr) {
258 		flush_altivec_to_thread(current);
259 		if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
260 				   ELF_NVRREG * sizeof(vector128)))
261 			return 1;
262 		/* set MSR_VEC in the saved MSR value to indicate that
263 		   frame->mc_vregs contains valid data */
264 		msr |= MSR_VEC;
265 	}
266 	/* else assert((regs->msr & MSR_VEC) == 0) */
267 
268 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
269 	 * use altivec. Since VSCR only contains 32 bits saved in the least
270 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
271 	 * most significant bits of that same vector. --BenH
272 	 * Note that the current VRSAVE value is in the SPR at this point.
273 	 */
274 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
275 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
276 	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
277 		return 1;
278 #endif /* CONFIG_ALTIVEC */
279 	if (copy_fpr_to_user(&frame->mc_fregs, current))
280 		return 1;
281 
282 	/*
283 	 * Clear the MSR VSX bit to indicate there is no valid state attached
284 	 * to this context, except in the specific case below where we set it.
285 	 */
286 	msr &= ~MSR_VSX;
287 #ifdef CONFIG_VSX
288 	/*
289 	 * Copy VSR 0-31 upper half from thread_struct to local
290 	 * buffer, then write that to userspace.  Also set MSR_VSX in
291 	 * the saved MSR value to indicate that frame->mc_vregs
292 	 * contains valid data
293 	 */
294 	if (current->thread.used_vsr && ctx_has_vsx_region) {
295 		flush_vsx_to_thread(current);
296 		if (copy_vsx_to_user(&frame->mc_vsregs, current))
297 			return 1;
298 		msr |= MSR_VSX;
299 	}
300 #endif /* CONFIG_VSX */
301 #ifdef CONFIG_SPE
302 	/* save spe registers */
303 	if (current->thread.used_spe) {
304 		flush_spe_to_thread(current);
305 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
306 				   ELF_NEVRREG * sizeof(u32)))
307 			return 1;
308 		/* set MSR_SPE in the saved MSR value to indicate that
309 		   frame->mc_vregs contains valid data */
310 		msr |= MSR_SPE;
311 	}
312 	/* else assert((regs->msr & MSR_SPE) == 0) */
313 
314 	/* We always copy to/from spefscr */
315 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
316 		return 1;
317 #endif /* CONFIG_SPE */
318 
319 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
320 		return 1;
321 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
322 	 * can check it on the restore to see if TM is active
323 	 */
324 	if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
325 		return 1;
326 
327 	if (sigret) {
328 		/* Set up the sigreturn trampoline: li 0,sigret; sc */
329 		if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
330 		    || __put_user(PPC_INST_SC, &frame->tramp[1]))
331 			return 1;
332 		flush_icache_range((unsigned long) &frame->tramp[0],
333 				   (unsigned long) &frame->tramp[2]);
334 	}
335 
336 	return 0;
337 }
338 
339 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
340 /*
341  * Save the current user registers on the user stack.
342  * We only save the altivec/spe registers if the process has used
343  * altivec/spe instructions at some point.
344  * We also save the transactional registers to a second ucontext in the
345  * frame.
346  *
347  * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
348  */
349 static int save_tm_user_regs(struct pt_regs *regs,
350 			     struct mcontext __user *frame,
351 			     struct mcontext __user *tm_frame, int sigret,
352 			     unsigned long msr)
353 {
354 	WARN_ON(tm_suspend_disabled);
355 
356 	/* Save both sets of general registers */
357 	if (save_general_regs(&current->thread.ckpt_regs, frame)
358 	    || save_general_regs(regs, tm_frame))
359 		return 1;
360 
361 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
362 	 * of the transactional mcontext.  This way we have a backward-compatible
363 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
364 	 * also look at what type of transaction (T or S) was active at the
365 	 * time of the signal.
366 	 */
367 	if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
368 		return 1;
369 
370 #ifdef CONFIG_ALTIVEC
371 	/* save altivec registers */
372 	if (current->thread.used_vr) {
373 		if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
374 				   ELF_NVRREG * sizeof(vector128)))
375 			return 1;
376 		if (msr & MSR_VEC) {
377 			if (__copy_to_user(&tm_frame->mc_vregs,
378 					   &current->thread.vr_state,
379 					   ELF_NVRREG * sizeof(vector128)))
380 				return 1;
381 		} else {
382 			if (__copy_to_user(&tm_frame->mc_vregs,
383 					   &current->thread.ckvr_state,
384 					   ELF_NVRREG * sizeof(vector128)))
385 				return 1;
386 		}
387 
388 		/* set MSR_VEC in the saved MSR value to indicate that
389 		 * frame->mc_vregs contains valid data
390 		 */
391 		msr |= MSR_VEC;
392 	}
393 
394 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
395 	 * use altivec. Since VSCR only contains 32 bits saved in the least
396 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
397 	 * most significant bits of that same vector. --BenH
398 	 */
399 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
400 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
401 	if (__put_user(current->thread.ckvrsave,
402 		       (u32 __user *)&frame->mc_vregs[32]))
403 		return 1;
404 	if (msr & MSR_VEC) {
405 		if (__put_user(current->thread.vrsave,
406 			       (u32 __user *)&tm_frame->mc_vregs[32]))
407 			return 1;
408 	} else {
409 		if (__put_user(current->thread.ckvrsave,
410 			       (u32 __user *)&tm_frame->mc_vregs[32]))
411 			return 1;
412 	}
413 #endif /* CONFIG_ALTIVEC */
414 
415 	if (copy_ckfpr_to_user(&frame->mc_fregs, current))
416 		return 1;
417 	if (msr & MSR_FP) {
418 		if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
419 			return 1;
420 	} else {
421 		if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
422 			return 1;
423 	}
424 
425 #ifdef CONFIG_VSX
426 	/*
427 	 * Copy VSR 0-31 upper half from thread_struct to local
428 	 * buffer, then write that to userspace.  Also set MSR_VSX in
429 	 * the saved MSR value to indicate that frame->mc_vregs
430 	 * contains valid data
431 	 */
432 	if (current->thread.used_vsr) {
433 		if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
434 			return 1;
435 		if (msr & MSR_VSX) {
436 			if (copy_vsx_to_user(&tm_frame->mc_vsregs,
437 						      current))
438 				return 1;
439 		} else {
440 			if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
441 				return 1;
442 		}
443 
444 		msr |= MSR_VSX;
445 	}
446 #endif /* CONFIG_VSX */
447 #ifdef CONFIG_SPE
448 	/* SPE regs are not checkpointed with TM, so this section is
449 	 * simply the same as in save_user_regs().
450 	 */
451 	if (current->thread.used_spe) {
452 		flush_spe_to_thread(current);
453 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
454 				   ELF_NEVRREG * sizeof(u32)))
455 			return 1;
456 		/* set MSR_SPE in the saved MSR value to indicate that
457 		 * frame->mc_vregs contains valid data */
458 		msr |= MSR_SPE;
459 	}
460 
461 	/* We always copy to/from spefscr */
462 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
463 		return 1;
464 #endif /* CONFIG_SPE */
465 
466 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
467 		return 1;
468 	if (sigret) {
469 		/* Set up the sigreturn trampoline: li 0,sigret; sc */
470 		if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
471 		    || __put_user(PPC_INST_SC, &frame->tramp[1]))
472 			return 1;
473 		flush_icache_range((unsigned long) &frame->tramp[0],
474 				   (unsigned long) &frame->tramp[2]);
475 	}
476 
477 	return 0;
478 }
479 #endif
480 
481 /*
482  * Restore the current user register values from the user stack,
483  * (except for MSR).
484  */
485 static long restore_user_regs(struct pt_regs *regs,
486 			      struct mcontext __user *sr, int sig)
487 {
488 	long err;
489 	unsigned int save_r2 = 0;
490 	unsigned long msr;
491 #ifdef CONFIG_VSX
492 	int i;
493 #endif
494 
495 	/*
496 	 * restore general registers but not including MSR or SOFTE. Also
497 	 * take care of keeping r2 (TLS) intact if not a signal
498 	 */
499 	if (!sig)
500 		save_r2 = (unsigned int)regs->gpr[2];
501 	err = restore_general_regs(regs, sr);
502 	set_trap_norestart(regs);
503 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
504 	if (!sig)
505 		regs->gpr[2] = (unsigned long) save_r2;
506 	if (err)
507 		return 1;
508 
509 	/* if doing signal return, restore the previous little-endian mode */
510 	if (sig)
511 		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
512 
513 #ifdef CONFIG_ALTIVEC
514 	/*
515 	 * Force the process to reload the altivec registers from
516 	 * current->thread when it next does altivec instructions
517 	 */
518 	regs->msr &= ~MSR_VEC;
519 	if (msr & MSR_VEC) {
520 		/* restore altivec registers from the stack */
521 		if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
522 				     sizeof(sr->mc_vregs)))
523 			return 1;
524 		current->thread.used_vr = true;
525 	} else if (current->thread.used_vr)
526 		memset(&current->thread.vr_state, 0,
527 		       ELF_NVRREG * sizeof(vector128));
528 
529 	/* Always get VRSAVE back */
530 	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
531 		return 1;
532 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
533 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
534 #endif /* CONFIG_ALTIVEC */
535 	if (copy_fpr_from_user(current, &sr->mc_fregs))
536 		return 1;
537 
538 #ifdef CONFIG_VSX
539 	/*
540 	 * Force the process to reload the VSX registers from
541 	 * current->thread when it next does VSX instruction.
542 	 */
543 	regs->msr &= ~MSR_VSX;
544 	if (msr & MSR_VSX) {
545 		/*
546 		 * Restore altivec registers from the stack to a local
547 		 * buffer, then write this out to the thread_struct
548 		 */
549 		if (copy_vsx_from_user(current, &sr->mc_vsregs))
550 			return 1;
551 		current->thread.used_vsr = true;
552 	} else if (current->thread.used_vsr)
553 		for (i = 0; i < 32 ; i++)
554 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
555 #endif /* CONFIG_VSX */
556 	/*
557 	 * force the process to reload the FP registers from
558 	 * current->thread when it next does FP instructions
559 	 */
560 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
561 
562 #ifdef CONFIG_SPE
563 	/* force the process to reload the spe registers from
564 	   current->thread when it next does spe instructions */
565 	regs->msr &= ~MSR_SPE;
566 	if (msr & MSR_SPE) {
567 		/* restore spe registers from the stack */
568 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
569 				     ELF_NEVRREG * sizeof(u32)))
570 			return 1;
571 		current->thread.used_spe = true;
572 	} else if (current->thread.used_spe)
573 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
574 
575 	/* Always get SPEFSCR back */
576 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
577 		return 1;
578 #endif /* CONFIG_SPE */
579 
580 	return 0;
581 }
582 
583 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
584 /*
585  * Restore the current user register values from the user stack, except for
586  * MSR, and recheckpoint the original checkpointed register state for processes
587  * in transactions.
588  */
589 static long restore_tm_user_regs(struct pt_regs *regs,
590 				 struct mcontext __user *sr,
591 				 struct mcontext __user *tm_sr)
592 {
593 	long err;
594 	unsigned long msr, msr_hi;
595 #ifdef CONFIG_VSX
596 	int i;
597 #endif
598 
599 	if (tm_suspend_disabled)
600 		return 1;
601 	/*
602 	 * restore general registers but not including MSR or SOFTE. Also
603 	 * take care of keeping r2 (TLS) intact if not a signal.
604 	 * See comment in signal_64.c:restore_tm_sigcontexts();
605 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
606 	 * were set by the signal delivery.
607 	 */
608 	err = restore_general_regs(regs, tm_sr);
609 	err |= restore_general_regs(&current->thread.ckpt_regs, sr);
610 
611 	err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
612 
613 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
614 	if (err)
615 		return 1;
616 
617 	/* Restore the previous little-endian mode */
618 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
619 
620 #ifdef CONFIG_ALTIVEC
621 	regs->msr &= ~MSR_VEC;
622 	if (msr & MSR_VEC) {
623 		/* restore altivec registers from the stack */
624 		if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
625 				     sizeof(sr->mc_vregs)) ||
626 		    __copy_from_user(&current->thread.vr_state,
627 				     &tm_sr->mc_vregs,
628 				     sizeof(sr->mc_vregs)))
629 			return 1;
630 		current->thread.used_vr = true;
631 	} else if (current->thread.used_vr) {
632 		memset(&current->thread.vr_state, 0,
633 		       ELF_NVRREG * sizeof(vector128));
634 		memset(&current->thread.ckvr_state, 0,
635 		       ELF_NVRREG * sizeof(vector128));
636 	}
637 
638 	/* Always get VRSAVE back */
639 	if (__get_user(current->thread.ckvrsave,
640 		       (u32 __user *)&sr->mc_vregs[32]) ||
641 	    __get_user(current->thread.vrsave,
642 		       (u32 __user *)&tm_sr->mc_vregs[32]))
643 		return 1;
644 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
645 		mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
646 #endif /* CONFIG_ALTIVEC */
647 
648 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
649 
650 	if (copy_fpr_from_user(current, &sr->mc_fregs) ||
651 	    copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
652 		return 1;
653 
654 #ifdef CONFIG_VSX
655 	regs->msr &= ~MSR_VSX;
656 	if (msr & MSR_VSX) {
657 		/*
658 		 * Restore altivec registers from the stack to a local
659 		 * buffer, then write this out to the thread_struct
660 		 */
661 		if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
662 		    copy_ckvsx_from_user(current, &sr->mc_vsregs))
663 			return 1;
664 		current->thread.used_vsr = true;
665 	} else if (current->thread.used_vsr)
666 		for (i = 0; i < 32 ; i++) {
667 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
668 			current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
669 		}
670 #endif /* CONFIG_VSX */
671 
672 #ifdef CONFIG_SPE
673 	/* SPE regs are not checkpointed with TM, so this section is
674 	 * simply the same as in restore_user_regs().
675 	 */
676 	regs->msr &= ~MSR_SPE;
677 	if (msr & MSR_SPE) {
678 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
679 				     ELF_NEVRREG * sizeof(u32)))
680 			return 1;
681 		current->thread.used_spe = true;
682 	} else if (current->thread.used_spe)
683 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
684 
685 	/* Always get SPEFSCR back */
686 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
687 		       + ELF_NEVRREG))
688 		return 1;
689 #endif /* CONFIG_SPE */
690 
691 	/* Get the top half of the MSR from the user context */
692 	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
693 		return 1;
694 	msr_hi <<= 32;
695 	/* If TM bits are set to the reserved value, it's an invalid context */
696 	if (MSR_TM_RESV(msr_hi))
697 		return 1;
698 
699 	/*
700 	 * Disabling preemption, since it is unsafe to be preempted
701 	 * with MSR[TS] set without recheckpointing.
702 	 */
703 	preempt_disable();
704 
705 	/*
706 	 * CAUTION:
707 	 * After regs->MSR[TS] being updated, make sure that get_user(),
708 	 * put_user() or similar functions are *not* called. These
709 	 * functions can generate page faults which will cause the process
710 	 * to be de-scheduled with MSR[TS] set but without calling
711 	 * tm_recheckpoint(). This can cause a bug.
712 	 *
713 	 * Pull in the MSR TM bits from the user context
714 	 */
715 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
716 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
717 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
718 	 * transactional versions should be loaded.
719 	 */
720 	tm_enable();
721 	/* Make sure the transaction is marked as failed */
722 	current->thread.tm_texasr |= TEXASR_FS;
723 	/* This loads the checkpointed FP/VEC state, if used */
724 	tm_recheckpoint(&current->thread);
725 
726 	/* This loads the speculative FP/VEC state, if used */
727 	msr_check_and_set(msr & (MSR_FP | MSR_VEC));
728 	if (msr & MSR_FP) {
729 		load_fp_state(&current->thread.fp_state);
730 		regs->msr |= (MSR_FP | current->thread.fpexc_mode);
731 	}
732 #ifdef CONFIG_ALTIVEC
733 	if (msr & MSR_VEC) {
734 		load_vr_state(&current->thread.vr_state);
735 		regs->msr |= MSR_VEC;
736 	}
737 #endif
738 
739 	preempt_enable();
740 
741 	return 0;
742 }
743 #endif
744 
745 #ifdef CONFIG_PPC64
746 
747 #define copy_siginfo_to_user	copy_siginfo_to_user32
748 
749 #endif /* CONFIG_PPC64 */
750 
751 /*
752  * Set up a signal frame for a "real-time" signal handler
753  * (one which gets siginfo).
754  */
755 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
756 		       struct task_struct *tsk)
757 {
758 	struct rt_sigframe __user *rt_sf;
759 	struct mcontext __user *frame;
760 	struct mcontext __user *tm_frame = NULL;
761 	void __user *addr;
762 	unsigned long newsp = 0;
763 	int sigret;
764 	unsigned long tramp;
765 	struct pt_regs *regs = tsk->thread.regs;
766 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
767 	/* Save the thread's msr before get_tm_stackpointer() changes it */
768 	unsigned long msr = regs->msr;
769 #endif
770 
771 	BUG_ON(tsk != current);
772 
773 	/* Set up Signal Frame */
774 	/* Put a Real Time Context onto stack */
775 	rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
776 	addr = rt_sf;
777 	if (unlikely(rt_sf == NULL))
778 		goto badframe;
779 
780 	/* Put the siginfo & fill in most of the ucontext */
781 	if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
782 	    || __put_user(0, &rt_sf->uc.uc_flags)
783 	    || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
784 	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
785 		    &rt_sf->uc.uc_regs)
786 	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
787 		goto badframe;
788 
789 	/* Save user registers on the stack */
790 	frame = &rt_sf->uc.uc_mcontext;
791 	addr = frame;
792 	if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
793 		sigret = 0;
794 		tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
795 	} else {
796 		sigret = __NR_rt_sigreturn;
797 		tramp = (unsigned long) frame->tramp;
798 	}
799 
800 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
801 	tm_frame = &rt_sf->uc_transact.uc_mcontext;
802 	if (MSR_TM_ACTIVE(msr)) {
803 		if (__put_user((unsigned long)&rt_sf->uc_transact,
804 			       &rt_sf->uc.uc_link) ||
805 		    __put_user((unsigned long)tm_frame,
806 			       &rt_sf->uc_transact.uc_regs))
807 			goto badframe;
808 		if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
809 			goto badframe;
810 	}
811 	else
812 #endif
813 	{
814 		if (__put_user(0, &rt_sf->uc.uc_link))
815 			goto badframe;
816 		if (save_user_regs(regs, frame, tm_frame, sigret, 1))
817 			goto badframe;
818 	}
819 	regs->link = tramp;
820 
821 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
822 
823 	/* create a stack frame for the caller of the handler */
824 	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
825 	addr = (void __user *)regs->gpr[1];
826 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
827 		goto badframe;
828 
829 	/* Fill registers for signal handler */
830 	regs->gpr[1] = newsp;
831 	regs->gpr[3] = ksig->sig;
832 	regs->gpr[4] = (unsigned long) &rt_sf->info;
833 	regs->gpr[5] = (unsigned long) &rt_sf->uc;
834 	regs->gpr[6] = (unsigned long) rt_sf;
835 	regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
836 	/* enter the signal handler in native-endian mode */
837 	regs->msr &= ~MSR_LE;
838 	regs->msr |= (MSR_KERNEL & MSR_LE);
839 	return 0;
840 
841 badframe:
842 	if (show_unhandled_signals)
843 		printk_ratelimited(KERN_INFO
844 				   "%s[%d]: bad frame in handle_rt_signal32: "
845 				   "%p nip %08lx lr %08lx\n",
846 				   tsk->comm, tsk->pid,
847 				   addr, regs->nip, regs->link);
848 
849 	return 1;
850 }
851 
852 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
853 {
854 	sigset_t set;
855 	struct mcontext __user *mcp;
856 
857 	if (get_sigset_t(&set, &ucp->uc_sigmask))
858 		return -EFAULT;
859 #ifdef CONFIG_PPC64
860 	{
861 		u32 cmcp;
862 
863 		if (__get_user(cmcp, &ucp->uc_regs))
864 			return -EFAULT;
865 		mcp = (struct mcontext __user *)(u64)cmcp;
866 		/* no need to check access_ok(mcp), since mcp < 4GB */
867 	}
868 #else
869 	if (__get_user(mcp, &ucp->uc_regs))
870 		return -EFAULT;
871 	if (!access_ok(mcp, sizeof(*mcp)))
872 		return -EFAULT;
873 #endif
874 	set_current_blocked(&set);
875 	if (restore_user_regs(regs, mcp, sig))
876 		return -EFAULT;
877 
878 	return 0;
879 }
880 
881 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
882 static int do_setcontext_tm(struct ucontext __user *ucp,
883 			    struct ucontext __user *tm_ucp,
884 			    struct pt_regs *regs)
885 {
886 	sigset_t set;
887 	struct mcontext __user *mcp;
888 	struct mcontext __user *tm_mcp;
889 	u32 cmcp;
890 	u32 tm_cmcp;
891 
892 	if (get_sigset_t(&set, &ucp->uc_sigmask))
893 		return -EFAULT;
894 
895 	if (__get_user(cmcp, &ucp->uc_regs) ||
896 	    __get_user(tm_cmcp, &tm_ucp->uc_regs))
897 		return -EFAULT;
898 	mcp = (struct mcontext __user *)(u64)cmcp;
899 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
900 	/* no need to check access_ok(mcp), since mcp < 4GB */
901 
902 	set_current_blocked(&set);
903 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
904 		return -EFAULT;
905 
906 	return 0;
907 }
908 #endif
909 
910 #ifdef CONFIG_PPC64
911 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
912 		       struct ucontext __user *, new_ctx, int, ctx_size)
913 #else
914 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
915 		       struct ucontext __user *, new_ctx, long, ctx_size)
916 #endif
917 {
918 	struct pt_regs *regs = current_pt_regs();
919 	int ctx_has_vsx_region = 0;
920 
921 #ifdef CONFIG_PPC64
922 	unsigned long new_msr = 0;
923 
924 	if (new_ctx) {
925 		struct mcontext __user *mcp;
926 		u32 cmcp;
927 
928 		/*
929 		 * Get pointer to the real mcontext.  No need for
930 		 * access_ok since we are dealing with compat
931 		 * pointers.
932 		 */
933 		if (__get_user(cmcp, &new_ctx->uc_regs))
934 			return -EFAULT;
935 		mcp = (struct mcontext __user *)(u64)cmcp;
936 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
937 			return -EFAULT;
938 	}
939 	/*
940 	 * Check that the context is not smaller than the original
941 	 * size (with VMX but without VSX)
942 	 */
943 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
944 		return -EINVAL;
945 	/*
946 	 * If the new context state sets the MSR VSX bits but
947 	 * it doesn't provide VSX state.
948 	 */
949 	if ((ctx_size < sizeof(struct ucontext)) &&
950 	    (new_msr & MSR_VSX))
951 		return -EINVAL;
952 	/* Does the context have enough room to store VSX data? */
953 	if (ctx_size >= sizeof(struct ucontext))
954 		ctx_has_vsx_region = 1;
955 #else
956 	/* Context size is for future use. Right now, we only make sure
957 	 * we are passed something we understand
958 	 */
959 	if (ctx_size < sizeof(struct ucontext))
960 		return -EINVAL;
961 #endif
962 	if (old_ctx != NULL) {
963 		struct mcontext __user *mctx;
964 
965 		/*
966 		 * old_ctx might not be 16-byte aligned, in which
967 		 * case old_ctx->uc_mcontext won't be either.
968 		 * Because we have the old_ctx->uc_pad2 field
969 		 * before old_ctx->uc_mcontext, we need to round down
970 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
971 		 */
972 		mctx = (struct mcontext __user *)
973 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
974 		if (!access_ok(old_ctx, ctx_size)
975 		    || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
976 		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
977 		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
978 			return -EFAULT;
979 	}
980 	if (new_ctx == NULL)
981 		return 0;
982 	if (!access_ok(new_ctx, ctx_size) ||
983 	    fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
984 		return -EFAULT;
985 
986 	/*
987 	 * If we get a fault copying the context into the kernel's
988 	 * image of the user's registers, we can't just return -EFAULT
989 	 * because the user's registers will be corrupted.  For instance
990 	 * the NIP value may have been updated but not some of the
991 	 * other registers.  Given that we have done the access_ok
992 	 * and successfully read the first and last bytes of the region
993 	 * above, this should only happen in an out-of-memory situation
994 	 * or if another thread unmaps the region containing the context.
995 	 * We kill the task with a SIGSEGV in this situation.
996 	 */
997 	if (do_setcontext(new_ctx, regs, 0))
998 		do_exit(SIGSEGV);
999 
1000 	set_thread_flag(TIF_RESTOREALL);
1001 	return 0;
1002 }
1003 
1004 #ifdef CONFIG_PPC64
1005 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1006 #else
1007 SYSCALL_DEFINE0(rt_sigreturn)
1008 #endif
1009 {
1010 	struct rt_sigframe __user *rt_sf;
1011 	struct pt_regs *regs = current_pt_regs();
1012 	int tm_restore = 0;
1013 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1014 	struct ucontext __user *uc_transact;
1015 	unsigned long msr_hi;
1016 	unsigned long tmp;
1017 #endif
1018 	/* Always make any pending restarted system calls return -EINTR */
1019 	current->restart_block.fn = do_no_restart_syscall;
1020 
1021 	rt_sf = (struct rt_sigframe __user *)
1022 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1023 	if (!access_ok(rt_sf, sizeof(*rt_sf)))
1024 		goto bad;
1025 
1026 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1027 	/*
1028 	 * If there is a transactional state then throw it away.
1029 	 * The purpose of a sigreturn is to destroy all traces of the
1030 	 * signal frame, this includes any transactional state created
1031 	 * within in. We only check for suspended as we can never be
1032 	 * active in the kernel, we are active, there is nothing better to
1033 	 * do than go ahead and Bad Thing later.
1034 	 * The cause is not important as there will never be a
1035 	 * recheckpoint so it's not user visible.
1036 	 */
1037 	if (MSR_TM_SUSPENDED(mfmsr()))
1038 		tm_reclaim_current(0);
1039 
1040 	if (__get_user(tmp, &rt_sf->uc.uc_link))
1041 		goto bad;
1042 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1043 	if (uc_transact) {
1044 		u32 cmcp;
1045 		struct mcontext __user *mcp;
1046 
1047 		if (__get_user(cmcp, &uc_transact->uc_regs))
1048 			return -EFAULT;
1049 		mcp = (struct mcontext __user *)(u64)cmcp;
1050 		/* The top 32 bits of the MSR are stashed in the transactional
1051 		 * ucontext. */
1052 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1053 			goto bad;
1054 
1055 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1056 			/* Trying to start TM on non TM system */
1057 			if (!cpu_has_feature(CPU_FTR_TM))
1058 				goto bad;
1059 			/* We only recheckpoint on return if we're
1060 			 * transaction.
1061 			 */
1062 			tm_restore = 1;
1063 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1064 				goto bad;
1065 		}
1066 	}
1067 	if (!tm_restore) {
1068 		/*
1069 		 * Unset regs->msr because ucontext MSR TS is not
1070 		 * set, and recheckpoint was not called. This avoid
1071 		 * hitting a TM Bad thing at RFID
1072 		 */
1073 		regs->msr &= ~MSR_TS_MASK;
1074 	}
1075 	/* Fall through, for non-TM restore */
1076 #endif
1077 	if (!tm_restore)
1078 		if (do_setcontext(&rt_sf->uc, regs, 1))
1079 			goto bad;
1080 
1081 	/*
1082 	 * It's not clear whether or why it is desirable to save the
1083 	 * sigaltstack setting on signal delivery and restore it on
1084 	 * signal return.  But other architectures do this and we have
1085 	 * always done it up until now so it is probably better not to
1086 	 * change it.  -- paulus
1087 	 */
1088 #ifdef CONFIG_PPC64
1089 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1090 		goto bad;
1091 #else
1092 	if (restore_altstack(&rt_sf->uc.uc_stack))
1093 		goto bad;
1094 #endif
1095 	set_thread_flag(TIF_RESTOREALL);
1096 	return 0;
1097 
1098  bad:
1099 	if (show_unhandled_signals)
1100 		printk_ratelimited(KERN_INFO
1101 				   "%s[%d]: bad frame in sys_rt_sigreturn: "
1102 				   "%p nip %08lx lr %08lx\n",
1103 				   current->comm, current->pid,
1104 				   rt_sf, regs->nip, regs->link);
1105 
1106 	force_sig(SIGSEGV);
1107 	return 0;
1108 }
1109 
1110 #ifdef CONFIG_PPC32
1111 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1112 			 int, ndbg, struct sig_dbg_op __user *, dbg)
1113 {
1114 	struct pt_regs *regs = current_pt_regs();
1115 	struct sig_dbg_op op;
1116 	int i;
1117 	unsigned long new_msr = regs->msr;
1118 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1119 	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1120 #endif
1121 
1122 	for (i=0; i<ndbg; i++) {
1123 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1124 			return -EFAULT;
1125 		switch (op.dbg_type) {
1126 		case SIG_DBG_SINGLE_STEPPING:
1127 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1128 			if (op.dbg_value) {
1129 				new_msr |= MSR_DE;
1130 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1131 			} else {
1132 				new_dbcr0 &= ~DBCR0_IC;
1133 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1134 						current->thread.debug.dbcr1)) {
1135 					new_msr &= ~MSR_DE;
1136 					new_dbcr0 &= ~DBCR0_IDM;
1137 				}
1138 			}
1139 #else
1140 			if (op.dbg_value)
1141 				new_msr |= MSR_SE;
1142 			else
1143 				new_msr &= ~MSR_SE;
1144 #endif
1145 			break;
1146 		case SIG_DBG_BRANCH_TRACING:
1147 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1148 			return -EINVAL;
1149 #else
1150 			if (op.dbg_value)
1151 				new_msr |= MSR_BE;
1152 			else
1153 				new_msr &= ~MSR_BE;
1154 #endif
1155 			break;
1156 
1157 		default:
1158 			return -EINVAL;
1159 		}
1160 	}
1161 
1162 	/* We wait until here to actually install the values in the
1163 	   registers so if we fail in the above loop, it will not
1164 	   affect the contents of these registers.  After this point,
1165 	   failure is a problem, anyway, and it's very unlikely unless
1166 	   the user is really doing something wrong. */
1167 	regs->msr = new_msr;
1168 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1169 	current->thread.debug.dbcr0 = new_dbcr0;
1170 #endif
1171 
1172 	if (!access_ok(ctx, sizeof(*ctx)) ||
1173 	    fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1174 		return -EFAULT;
1175 
1176 	/*
1177 	 * If we get a fault copying the context into the kernel's
1178 	 * image of the user's registers, we can't just return -EFAULT
1179 	 * because the user's registers will be corrupted.  For instance
1180 	 * the NIP value may have been updated but not some of the
1181 	 * other registers.  Given that we have done the access_ok
1182 	 * and successfully read the first and last bytes of the region
1183 	 * above, this should only happen in an out-of-memory situation
1184 	 * or if another thread unmaps the region containing the context.
1185 	 * We kill the task with a SIGSEGV in this situation.
1186 	 */
1187 	if (do_setcontext(ctx, regs, 1)) {
1188 		if (show_unhandled_signals)
1189 			printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1190 					   "sys_debug_setcontext: %p nip %08lx "
1191 					   "lr %08lx\n",
1192 					   current->comm, current->pid,
1193 					   ctx, regs->nip, regs->link);
1194 
1195 		force_sig(SIGSEGV);
1196 		goto out;
1197 	}
1198 
1199 	/*
1200 	 * It's not clear whether or why it is desirable to save the
1201 	 * sigaltstack setting on signal delivery and restore it on
1202 	 * signal return.  But other architectures do this and we have
1203 	 * always done it up until now so it is probably better not to
1204 	 * change it.  -- paulus
1205 	 */
1206 	restore_altstack(&ctx->uc_stack);
1207 
1208 	set_thread_flag(TIF_RESTOREALL);
1209  out:
1210 	return 0;
1211 }
1212 #endif
1213 
1214 /*
1215  * OK, we're invoking a handler
1216  */
1217 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1218 		struct task_struct *tsk)
1219 {
1220 	struct sigcontext __user *sc;
1221 	struct sigframe __user *frame;
1222 	struct mcontext __user *tm_mctx = NULL;
1223 	unsigned long newsp = 0;
1224 	int sigret;
1225 	unsigned long tramp;
1226 	struct pt_regs *regs = tsk->thread.regs;
1227 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1228 	/* Save the thread's msr before get_tm_stackpointer() changes it */
1229 	unsigned long msr = regs->msr;
1230 #endif
1231 
1232 	BUG_ON(tsk != current);
1233 
1234 	/* Set up Signal Frame */
1235 	frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1236 	if (unlikely(frame == NULL))
1237 		goto badframe;
1238 	sc = (struct sigcontext __user *) &frame->sctx;
1239 
1240 #if _NSIG != 64
1241 #error "Please adjust handle_signal()"
1242 #endif
1243 	if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1244 	    || __put_user(oldset->sig[0], &sc->oldmask)
1245 #ifdef CONFIG_PPC64
1246 	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1247 #else
1248 	    || __put_user(oldset->sig[1], &sc->_unused[3])
1249 #endif
1250 	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1251 	    || __put_user(ksig->sig, &sc->signal))
1252 		goto badframe;
1253 
1254 	if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1255 		sigret = 0;
1256 		tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1257 	} else {
1258 		sigret = __NR_sigreturn;
1259 		tramp = (unsigned long) frame->mctx.tramp;
1260 	}
1261 
1262 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1263 	tm_mctx = &frame->mctx_transact;
1264 	if (MSR_TM_ACTIVE(msr)) {
1265 		if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1266 				      sigret, msr))
1267 			goto badframe;
1268 	}
1269 	else
1270 #endif
1271 	{
1272 		if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1273 			goto badframe;
1274 	}
1275 
1276 	regs->link = tramp;
1277 
1278 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
1279 
1280 	/* create a stack frame for the caller of the handler */
1281 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1282 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1283 		goto badframe;
1284 
1285 	regs->gpr[1] = newsp;
1286 	regs->gpr[3] = ksig->sig;
1287 	regs->gpr[4] = (unsigned long) sc;
1288 	regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1289 	/* enter the signal handler in big-endian mode */
1290 	regs->msr &= ~MSR_LE;
1291 	return 0;
1292 
1293 badframe:
1294 	if (show_unhandled_signals)
1295 		printk_ratelimited(KERN_INFO
1296 				   "%s[%d]: bad frame in handle_signal32: "
1297 				   "%p nip %08lx lr %08lx\n",
1298 				   tsk->comm, tsk->pid,
1299 				   frame, regs->nip, regs->link);
1300 
1301 	return 1;
1302 }
1303 
1304 /*
1305  * Do a signal return; undo the signal stack.
1306  */
1307 #ifdef CONFIG_PPC64
1308 COMPAT_SYSCALL_DEFINE0(sigreturn)
1309 #else
1310 SYSCALL_DEFINE0(sigreturn)
1311 #endif
1312 {
1313 	struct pt_regs *regs = current_pt_regs();
1314 	struct sigframe __user *sf;
1315 	struct sigcontext __user *sc;
1316 	struct sigcontext sigctx;
1317 	struct mcontext __user *sr;
1318 	void __user *addr;
1319 	sigset_t set;
1320 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1321 	struct mcontext __user *mcp, *tm_mcp;
1322 	unsigned long msr_hi;
1323 #endif
1324 
1325 	/* Always make any pending restarted system calls return -EINTR */
1326 	current->restart_block.fn = do_no_restart_syscall;
1327 
1328 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1329 	sc = &sf->sctx;
1330 	addr = sc;
1331 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1332 		goto badframe;
1333 
1334 #ifdef CONFIG_PPC64
1335 	/*
1336 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1337 	 * unused part of the signal stackframe
1338 	 */
1339 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1340 #else
1341 	set.sig[0] = sigctx.oldmask;
1342 	set.sig[1] = sigctx._unused[3];
1343 #endif
1344 	set_current_blocked(&set);
1345 
1346 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1347 	mcp = (struct mcontext __user *)&sf->mctx;
1348 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1349 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1350 		goto badframe;
1351 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
1352 		if (!cpu_has_feature(CPU_FTR_TM))
1353 			goto badframe;
1354 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
1355 			goto badframe;
1356 	} else
1357 #endif
1358 	{
1359 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1360 		addr = sr;
1361 		if (!access_ok(sr, sizeof(*sr))
1362 		    || restore_user_regs(regs, sr, 1))
1363 			goto badframe;
1364 	}
1365 
1366 	set_thread_flag(TIF_RESTOREALL);
1367 	return 0;
1368 
1369 badframe:
1370 	if (show_unhandled_signals)
1371 		printk_ratelimited(KERN_INFO
1372 				   "%s[%d]: bad frame in sys_sigreturn: "
1373 				   "%p nip %08lx lr %08lx\n",
1374 				   current->comm, current->pid,
1375 				   addr, regs->nip, regs->link);
1376 
1377 	force_sig(SIGSEGV);
1378 	return 0;
1379 }
1380