xref: /openbmc/linux/arch/powerpc/kernel/signal_32.c (revision 15e3ae36)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
4  *
5  *  PowerPC version
6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7  * Copyright (C) 2001 IBM
8  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
10  *
11  *  Derived from "arch/i386/kernel/signal.c"
12  *    Copyright (C) 1991, 1992 Linus Torvalds
13  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
14  */
15 
16 #include <linux/sched.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/kernel.h>
20 #include <linux/signal.h>
21 #include <linux/errno.h>
22 #include <linux/elf.h>
23 #include <linux/ptrace.h>
24 #include <linux/pagemap.h>
25 #include <linux/ratelimit.h>
26 #include <linux/syscalls.h>
27 #ifdef CONFIG_PPC64
28 #include <linux/compat.h>
29 #else
30 #include <linux/wait.h>
31 #include <linux/unistd.h>
32 #include <linux/stddef.h>
33 #include <linux/tty.h>
34 #include <linux/binfmts.h>
35 #endif
36 
37 #include <linux/uaccess.h>
38 #include <asm/cacheflush.h>
39 #include <asm/syscalls.h>
40 #include <asm/sigcontext.h>
41 #include <asm/vdso.h>
42 #include <asm/switch_to.h>
43 #include <asm/tm.h>
44 #include <asm/asm-prototypes.h>
45 #ifdef CONFIG_PPC64
46 #include "ppc32.h"
47 #include <asm/unistd.h>
48 #else
49 #include <asm/ucontext.h>
50 #include <asm/pgtable.h>
51 #endif
52 
53 #include "signal.h"
54 
55 
56 #ifdef CONFIG_PPC64
57 #define old_sigaction	old_sigaction32
58 #define sigcontext	sigcontext32
59 #define mcontext	mcontext32
60 #define ucontext	ucontext32
61 
62 #define __save_altstack __compat_save_altstack
63 
64 /*
65  * Userspace code may pass a ucontext which doesn't include VSX added
66  * at the end.  We need to check for this case.
67  */
68 #define UCONTEXTSIZEWITHOUTVSX \
69 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
70 
71 /*
72  * Returning 0 means we return to userspace via
73  * ret_from_except and thus restore all user
74  * registers from *regs.  This is what we need
75  * to do when a signal has been delivered.
76  */
77 
78 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
79 #undef __SIGNAL_FRAMESIZE
80 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
81 #undef ELF_NVRREG
82 #define ELF_NVRREG	ELF_NVRREG32
83 
84 /*
85  * Functions for flipping sigsets (thanks to brain dead generic
86  * implementation that makes things simple for little endian only)
87  */
88 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
89 {
90 	return put_compat_sigset(uset, set, sizeof(*uset));
91 }
92 
93 static inline int get_sigset_t(sigset_t *set,
94 			       const compat_sigset_t __user *uset)
95 {
96 	return get_compat_sigset(set, uset);
97 }
98 
99 #define to_user_ptr(p)		ptr_to_compat(p)
100 #define from_user_ptr(p)	compat_ptr(p)
101 
102 static inline int save_general_regs(struct pt_regs *regs,
103 		struct mcontext __user *frame)
104 {
105 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
106 	int i;
107 	/* Force usr to alway see softe as 1 (interrupts enabled) */
108 	elf_greg_t64 softe = 0x1;
109 
110 	WARN_ON(!FULL_REGS(regs));
111 
112 	for (i = 0; i <= PT_RESULT; i ++) {
113 		if (i == 14 && !FULL_REGS(regs))
114 			i = 32;
115 		if ( i == PT_SOFTE) {
116 			if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
117 				return -EFAULT;
118 			else
119 				continue;
120 		}
121 		if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
122 			return -EFAULT;
123 	}
124 	return 0;
125 }
126 
127 static inline int restore_general_regs(struct pt_regs *regs,
128 		struct mcontext __user *sr)
129 {
130 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
131 	int i;
132 
133 	for (i = 0; i <= PT_RESULT; i++) {
134 		if ((i == PT_MSR) || (i == PT_SOFTE))
135 			continue;
136 		if (__get_user(gregs[i], &sr->mc_gregs[i]))
137 			return -EFAULT;
138 	}
139 	return 0;
140 }
141 
142 #else /* CONFIG_PPC64 */
143 
144 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
145 
146 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
147 {
148 	return copy_to_user(uset, set, sizeof(*uset));
149 }
150 
151 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
152 {
153 	return copy_from_user(set, uset, sizeof(*uset));
154 }
155 
156 #define to_user_ptr(p)		((unsigned long)(p))
157 #define from_user_ptr(p)	((void __user *)(p))
158 
159 static inline int save_general_regs(struct pt_regs *regs,
160 		struct mcontext __user *frame)
161 {
162 	WARN_ON(!FULL_REGS(regs));
163 	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
164 }
165 
166 static inline int restore_general_regs(struct pt_regs *regs,
167 		struct mcontext __user *sr)
168 {
169 	/* copy up to but not including MSR */
170 	if (__copy_from_user(regs, &sr->mc_gregs,
171 				PT_MSR * sizeof(elf_greg_t)))
172 		return -EFAULT;
173 	/* copy from orig_r3 (the word after the MSR) up to the end */
174 	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
175 				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
176 		return -EFAULT;
177 	return 0;
178 }
179 #endif
180 
181 /*
182  * When we have signals to deliver, we set up on the
183  * user stack, going down from the original stack pointer:
184  *	an ABI gap of 56 words
185  *	an mcontext struct
186  *	a sigcontext struct
187  *	a gap of __SIGNAL_FRAMESIZE bytes
188  *
189  * Each of these things must be a multiple of 16 bytes in size. The following
190  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
191  *
192  */
193 struct sigframe {
194 	struct sigcontext sctx;		/* the sigcontext */
195 	struct mcontext	mctx;		/* all the register values */
196 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
197 	struct sigcontext sctx_transact;
198 	struct mcontext	mctx_transact;
199 #endif
200 	/*
201 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
202 	 * regs and 18 fp regs below sp before decrementing it.
203 	 */
204 	int			abigap[56];
205 };
206 
207 /* We use the mc_pad field for the signal return trampoline. */
208 #define tramp	mc_pad
209 
210 /*
211  *  When we have rt signals to deliver, we set up on the
212  *  user stack, going down from the original stack pointer:
213  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
214  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
215  *  (the +16 is to get the siginfo and ucontext in the same
216  *  positions as in older kernels).
217  *
218  *  Each of these things must be a multiple of 16 bytes in size.
219  *
220  */
221 struct rt_sigframe {
222 #ifdef CONFIG_PPC64
223 	compat_siginfo_t info;
224 #else
225 	struct siginfo info;
226 #endif
227 	struct ucontext	uc;
228 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
229 	struct ucontext	uc_transact;
230 #endif
231 	/*
232 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
233 	 * regs and 18 fp regs below sp before decrementing it.
234 	 */
235 	int			abigap[56];
236 };
237 
238 /*
239  * Save the current user registers on the user stack.
240  * We only save the altivec/spe registers if the process has used
241  * altivec/spe instructions at some point.
242  */
243 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
244 			  struct mcontext __user *tm_frame, int sigret,
245 			  int ctx_has_vsx_region)
246 {
247 	unsigned long msr = regs->msr;
248 
249 	/* Make sure floating point registers are stored in regs */
250 	flush_fp_to_thread(current);
251 
252 	/* save general registers */
253 	if (save_general_regs(regs, frame))
254 		return 1;
255 
256 #ifdef CONFIG_ALTIVEC
257 	/* save altivec registers */
258 	if (current->thread.used_vr) {
259 		flush_altivec_to_thread(current);
260 		if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
261 				   ELF_NVRREG * sizeof(vector128)))
262 			return 1;
263 		/* set MSR_VEC in the saved MSR value to indicate that
264 		   frame->mc_vregs contains valid data */
265 		msr |= MSR_VEC;
266 	}
267 	/* else assert((regs->msr & MSR_VEC) == 0) */
268 
269 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
270 	 * use altivec. Since VSCR only contains 32 bits saved in the least
271 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
272 	 * most significant bits of that same vector. --BenH
273 	 * Note that the current VRSAVE value is in the SPR at this point.
274 	 */
275 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
276 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
277 	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
278 		return 1;
279 #endif /* CONFIG_ALTIVEC */
280 	if (copy_fpr_to_user(&frame->mc_fregs, current))
281 		return 1;
282 
283 	/*
284 	 * Clear the MSR VSX bit to indicate there is no valid state attached
285 	 * to this context, except in the specific case below where we set it.
286 	 */
287 	msr &= ~MSR_VSX;
288 #ifdef CONFIG_VSX
289 	/*
290 	 * Copy VSR 0-31 upper half from thread_struct to local
291 	 * buffer, then write that to userspace.  Also set MSR_VSX in
292 	 * the saved MSR value to indicate that frame->mc_vregs
293 	 * contains valid data
294 	 */
295 	if (current->thread.used_vsr && ctx_has_vsx_region) {
296 		flush_vsx_to_thread(current);
297 		if (copy_vsx_to_user(&frame->mc_vsregs, current))
298 			return 1;
299 		msr |= MSR_VSX;
300 	}
301 #endif /* CONFIG_VSX */
302 #ifdef CONFIG_SPE
303 	/* save spe registers */
304 	if (current->thread.used_spe) {
305 		flush_spe_to_thread(current);
306 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
307 				   ELF_NEVRREG * sizeof(u32)))
308 			return 1;
309 		/* set MSR_SPE in the saved MSR value to indicate that
310 		   frame->mc_vregs contains valid data */
311 		msr |= MSR_SPE;
312 	}
313 	/* else assert((regs->msr & MSR_SPE) == 0) */
314 
315 	/* We always copy to/from spefscr */
316 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
317 		return 1;
318 #endif /* CONFIG_SPE */
319 
320 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
321 		return 1;
322 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
323 	 * can check it on the restore to see if TM is active
324 	 */
325 	if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
326 		return 1;
327 
328 	if (sigret) {
329 		/* Set up the sigreturn trampoline: li 0,sigret; sc */
330 		if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
331 		    || __put_user(PPC_INST_SC, &frame->tramp[1]))
332 			return 1;
333 		flush_icache_range((unsigned long) &frame->tramp[0],
334 				   (unsigned long) &frame->tramp[2]);
335 	}
336 
337 	return 0;
338 }
339 
340 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
341 /*
342  * Save the current user registers on the user stack.
343  * We only save the altivec/spe registers if the process has used
344  * altivec/spe instructions at some point.
345  * We also save the transactional registers to a second ucontext in the
346  * frame.
347  *
348  * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
349  */
350 static int save_tm_user_regs(struct pt_regs *regs,
351 			     struct mcontext __user *frame,
352 			     struct mcontext __user *tm_frame, int sigret,
353 			     unsigned long msr)
354 {
355 	WARN_ON(tm_suspend_disabled);
356 
357 	/* Save both sets of general registers */
358 	if (save_general_regs(&current->thread.ckpt_regs, frame)
359 	    || save_general_regs(regs, tm_frame))
360 		return 1;
361 
362 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
363 	 * of the transactional mcontext.  This way we have a backward-compatible
364 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
365 	 * also look at what type of transaction (T or S) was active at the
366 	 * time of the signal.
367 	 */
368 	if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
369 		return 1;
370 
371 #ifdef CONFIG_ALTIVEC
372 	/* save altivec registers */
373 	if (current->thread.used_vr) {
374 		if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
375 				   ELF_NVRREG * sizeof(vector128)))
376 			return 1;
377 		if (msr & MSR_VEC) {
378 			if (__copy_to_user(&tm_frame->mc_vregs,
379 					   &current->thread.vr_state,
380 					   ELF_NVRREG * sizeof(vector128)))
381 				return 1;
382 		} else {
383 			if (__copy_to_user(&tm_frame->mc_vregs,
384 					   &current->thread.ckvr_state,
385 					   ELF_NVRREG * sizeof(vector128)))
386 				return 1;
387 		}
388 
389 		/* set MSR_VEC in the saved MSR value to indicate that
390 		 * frame->mc_vregs contains valid data
391 		 */
392 		msr |= MSR_VEC;
393 	}
394 
395 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
396 	 * use altivec. Since VSCR only contains 32 bits saved in the least
397 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
398 	 * most significant bits of that same vector. --BenH
399 	 */
400 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
401 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
402 	if (__put_user(current->thread.ckvrsave,
403 		       (u32 __user *)&frame->mc_vregs[32]))
404 		return 1;
405 	if (msr & MSR_VEC) {
406 		if (__put_user(current->thread.vrsave,
407 			       (u32 __user *)&tm_frame->mc_vregs[32]))
408 			return 1;
409 	} else {
410 		if (__put_user(current->thread.ckvrsave,
411 			       (u32 __user *)&tm_frame->mc_vregs[32]))
412 			return 1;
413 	}
414 #endif /* CONFIG_ALTIVEC */
415 
416 	if (copy_ckfpr_to_user(&frame->mc_fregs, current))
417 		return 1;
418 	if (msr & MSR_FP) {
419 		if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
420 			return 1;
421 	} else {
422 		if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
423 			return 1;
424 	}
425 
426 #ifdef CONFIG_VSX
427 	/*
428 	 * Copy VSR 0-31 upper half from thread_struct to local
429 	 * buffer, then write that to userspace.  Also set MSR_VSX in
430 	 * the saved MSR value to indicate that frame->mc_vregs
431 	 * contains valid data
432 	 */
433 	if (current->thread.used_vsr) {
434 		if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
435 			return 1;
436 		if (msr & MSR_VSX) {
437 			if (copy_vsx_to_user(&tm_frame->mc_vsregs,
438 						      current))
439 				return 1;
440 		} else {
441 			if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
442 				return 1;
443 		}
444 
445 		msr |= MSR_VSX;
446 	}
447 #endif /* CONFIG_VSX */
448 #ifdef CONFIG_SPE
449 	/* SPE regs are not checkpointed with TM, so this section is
450 	 * simply the same as in save_user_regs().
451 	 */
452 	if (current->thread.used_spe) {
453 		flush_spe_to_thread(current);
454 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
455 				   ELF_NEVRREG * sizeof(u32)))
456 			return 1;
457 		/* set MSR_SPE in the saved MSR value to indicate that
458 		 * frame->mc_vregs contains valid data */
459 		msr |= MSR_SPE;
460 	}
461 
462 	/* We always copy to/from spefscr */
463 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
464 		return 1;
465 #endif /* CONFIG_SPE */
466 
467 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
468 		return 1;
469 	if (sigret) {
470 		/* Set up the sigreturn trampoline: li 0,sigret; sc */
471 		if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
472 		    || __put_user(PPC_INST_SC, &frame->tramp[1]))
473 			return 1;
474 		flush_icache_range((unsigned long) &frame->tramp[0],
475 				   (unsigned long) &frame->tramp[2]);
476 	}
477 
478 	return 0;
479 }
480 #endif
481 
482 /*
483  * Restore the current user register values from the user stack,
484  * (except for MSR).
485  */
486 static long restore_user_regs(struct pt_regs *regs,
487 			      struct mcontext __user *sr, int sig)
488 {
489 	long err;
490 	unsigned int save_r2 = 0;
491 	unsigned long msr;
492 #ifdef CONFIG_VSX
493 	int i;
494 #endif
495 
496 	/*
497 	 * restore general registers but not including MSR or SOFTE. Also
498 	 * take care of keeping r2 (TLS) intact if not a signal
499 	 */
500 	if (!sig)
501 		save_r2 = (unsigned int)regs->gpr[2];
502 	err = restore_general_regs(regs, sr);
503 	regs->trap = 0;
504 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
505 	if (!sig)
506 		regs->gpr[2] = (unsigned long) save_r2;
507 	if (err)
508 		return 1;
509 
510 	/* if doing signal return, restore the previous little-endian mode */
511 	if (sig)
512 		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
513 
514 #ifdef CONFIG_ALTIVEC
515 	/*
516 	 * Force the process to reload the altivec registers from
517 	 * current->thread when it next does altivec instructions
518 	 */
519 	regs->msr &= ~MSR_VEC;
520 	if (msr & MSR_VEC) {
521 		/* restore altivec registers from the stack */
522 		if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
523 				     sizeof(sr->mc_vregs)))
524 			return 1;
525 		current->thread.used_vr = true;
526 	} else if (current->thread.used_vr)
527 		memset(&current->thread.vr_state, 0,
528 		       ELF_NVRREG * sizeof(vector128));
529 
530 	/* Always get VRSAVE back */
531 	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
532 		return 1;
533 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
534 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
535 #endif /* CONFIG_ALTIVEC */
536 	if (copy_fpr_from_user(current, &sr->mc_fregs))
537 		return 1;
538 
539 #ifdef CONFIG_VSX
540 	/*
541 	 * Force the process to reload the VSX registers from
542 	 * current->thread when it next does VSX instruction.
543 	 */
544 	regs->msr &= ~MSR_VSX;
545 	if (msr & MSR_VSX) {
546 		/*
547 		 * Restore altivec registers from the stack to a local
548 		 * buffer, then write this out to the thread_struct
549 		 */
550 		if (copy_vsx_from_user(current, &sr->mc_vsregs))
551 			return 1;
552 		current->thread.used_vsr = true;
553 	} else if (current->thread.used_vsr)
554 		for (i = 0; i < 32 ; i++)
555 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
556 #endif /* CONFIG_VSX */
557 	/*
558 	 * force the process to reload the FP registers from
559 	 * current->thread when it next does FP instructions
560 	 */
561 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
562 
563 #ifdef CONFIG_SPE
564 	/* force the process to reload the spe registers from
565 	   current->thread when it next does spe instructions */
566 	regs->msr &= ~MSR_SPE;
567 	if (msr & MSR_SPE) {
568 		/* restore spe registers from the stack */
569 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
570 				     ELF_NEVRREG * sizeof(u32)))
571 			return 1;
572 		current->thread.used_spe = true;
573 	} else if (current->thread.used_spe)
574 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
575 
576 	/* Always get SPEFSCR back */
577 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
578 		return 1;
579 #endif /* CONFIG_SPE */
580 
581 	return 0;
582 }
583 
584 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
585 /*
586  * Restore the current user register values from the user stack, except for
587  * MSR, and recheckpoint the original checkpointed register state for processes
588  * in transactions.
589  */
590 static long restore_tm_user_regs(struct pt_regs *regs,
591 				 struct mcontext __user *sr,
592 				 struct mcontext __user *tm_sr)
593 {
594 	long err;
595 	unsigned long msr, msr_hi;
596 #ifdef CONFIG_VSX
597 	int i;
598 #endif
599 
600 	if (tm_suspend_disabled)
601 		return 1;
602 	/*
603 	 * restore general registers but not including MSR or SOFTE. Also
604 	 * take care of keeping r2 (TLS) intact if not a signal.
605 	 * See comment in signal_64.c:restore_tm_sigcontexts();
606 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
607 	 * were set by the signal delivery.
608 	 */
609 	err = restore_general_regs(regs, tm_sr);
610 	err |= restore_general_regs(&current->thread.ckpt_regs, sr);
611 
612 	err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
613 
614 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
615 	if (err)
616 		return 1;
617 
618 	/* Restore the previous little-endian mode */
619 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
620 
621 #ifdef CONFIG_ALTIVEC
622 	regs->msr &= ~MSR_VEC;
623 	if (msr & MSR_VEC) {
624 		/* restore altivec registers from the stack */
625 		if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
626 				     sizeof(sr->mc_vregs)) ||
627 		    __copy_from_user(&current->thread.vr_state,
628 				     &tm_sr->mc_vregs,
629 				     sizeof(sr->mc_vregs)))
630 			return 1;
631 		current->thread.used_vr = true;
632 	} else if (current->thread.used_vr) {
633 		memset(&current->thread.vr_state, 0,
634 		       ELF_NVRREG * sizeof(vector128));
635 		memset(&current->thread.ckvr_state, 0,
636 		       ELF_NVRREG * sizeof(vector128));
637 	}
638 
639 	/* Always get VRSAVE back */
640 	if (__get_user(current->thread.ckvrsave,
641 		       (u32 __user *)&sr->mc_vregs[32]) ||
642 	    __get_user(current->thread.vrsave,
643 		       (u32 __user *)&tm_sr->mc_vregs[32]))
644 		return 1;
645 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
646 		mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
647 #endif /* CONFIG_ALTIVEC */
648 
649 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
650 
651 	if (copy_fpr_from_user(current, &sr->mc_fregs) ||
652 	    copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
653 		return 1;
654 
655 #ifdef CONFIG_VSX
656 	regs->msr &= ~MSR_VSX;
657 	if (msr & MSR_VSX) {
658 		/*
659 		 * Restore altivec registers from the stack to a local
660 		 * buffer, then write this out to the thread_struct
661 		 */
662 		if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
663 		    copy_ckvsx_from_user(current, &sr->mc_vsregs))
664 			return 1;
665 		current->thread.used_vsr = true;
666 	} else if (current->thread.used_vsr)
667 		for (i = 0; i < 32 ; i++) {
668 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
669 			current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
670 		}
671 #endif /* CONFIG_VSX */
672 
673 #ifdef CONFIG_SPE
674 	/* SPE regs are not checkpointed with TM, so this section is
675 	 * simply the same as in restore_user_regs().
676 	 */
677 	regs->msr &= ~MSR_SPE;
678 	if (msr & MSR_SPE) {
679 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
680 				     ELF_NEVRREG * sizeof(u32)))
681 			return 1;
682 		current->thread.used_spe = true;
683 	} else if (current->thread.used_spe)
684 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
685 
686 	/* Always get SPEFSCR back */
687 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
688 		       + ELF_NEVRREG))
689 		return 1;
690 #endif /* CONFIG_SPE */
691 
692 	/* Get the top half of the MSR from the user context */
693 	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
694 		return 1;
695 	msr_hi <<= 32;
696 	/* If TM bits are set to the reserved value, it's an invalid context */
697 	if (MSR_TM_RESV(msr_hi))
698 		return 1;
699 
700 	/*
701 	 * Disabling preemption, since it is unsafe to be preempted
702 	 * with MSR[TS] set without recheckpointing.
703 	 */
704 	preempt_disable();
705 
706 	/*
707 	 * CAUTION:
708 	 * After regs->MSR[TS] being updated, make sure that get_user(),
709 	 * put_user() or similar functions are *not* called. These
710 	 * functions can generate page faults which will cause the process
711 	 * to be de-scheduled with MSR[TS] set but without calling
712 	 * tm_recheckpoint(). This can cause a bug.
713 	 *
714 	 * Pull in the MSR TM bits from the user context
715 	 */
716 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
717 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
718 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
719 	 * transactional versions should be loaded.
720 	 */
721 	tm_enable();
722 	/* Make sure the transaction is marked as failed */
723 	current->thread.tm_texasr |= TEXASR_FS;
724 	/* This loads the checkpointed FP/VEC state, if used */
725 	tm_recheckpoint(&current->thread);
726 
727 	/* This loads the speculative FP/VEC state, if used */
728 	msr_check_and_set(msr & (MSR_FP | MSR_VEC));
729 	if (msr & MSR_FP) {
730 		load_fp_state(&current->thread.fp_state);
731 		regs->msr |= (MSR_FP | current->thread.fpexc_mode);
732 	}
733 #ifdef CONFIG_ALTIVEC
734 	if (msr & MSR_VEC) {
735 		load_vr_state(&current->thread.vr_state);
736 		regs->msr |= MSR_VEC;
737 	}
738 #endif
739 
740 	preempt_enable();
741 
742 	return 0;
743 }
744 #endif
745 
746 #ifdef CONFIG_PPC64
747 
748 #define copy_siginfo_to_user	copy_siginfo_to_user32
749 
750 #endif /* CONFIG_PPC64 */
751 
752 /*
753  * Set up a signal frame for a "real-time" signal handler
754  * (one which gets siginfo).
755  */
756 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
757 		       struct task_struct *tsk)
758 {
759 	struct rt_sigframe __user *rt_sf;
760 	struct mcontext __user *frame;
761 	struct mcontext __user *tm_frame = NULL;
762 	void __user *addr;
763 	unsigned long newsp = 0;
764 	int sigret;
765 	unsigned long tramp;
766 	struct pt_regs *regs = tsk->thread.regs;
767 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
768 	/* Save the thread's msr before get_tm_stackpointer() changes it */
769 	unsigned long msr = regs->msr;
770 #endif
771 
772 	BUG_ON(tsk != current);
773 
774 	/* Set up Signal Frame */
775 	/* Put a Real Time Context onto stack */
776 	rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
777 	addr = rt_sf;
778 	if (unlikely(rt_sf == NULL))
779 		goto badframe;
780 
781 	/* Put the siginfo & fill in most of the ucontext */
782 	if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
783 	    || __put_user(0, &rt_sf->uc.uc_flags)
784 	    || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
785 	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
786 		    &rt_sf->uc.uc_regs)
787 	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
788 		goto badframe;
789 
790 	/* Save user registers on the stack */
791 	frame = &rt_sf->uc.uc_mcontext;
792 	addr = frame;
793 	if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
794 		sigret = 0;
795 		tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
796 	} else {
797 		sigret = __NR_rt_sigreturn;
798 		tramp = (unsigned long) frame->tramp;
799 	}
800 
801 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
802 	tm_frame = &rt_sf->uc_transact.uc_mcontext;
803 	if (MSR_TM_ACTIVE(msr)) {
804 		if (__put_user((unsigned long)&rt_sf->uc_transact,
805 			       &rt_sf->uc.uc_link) ||
806 		    __put_user((unsigned long)tm_frame,
807 			       &rt_sf->uc_transact.uc_regs))
808 			goto badframe;
809 		if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
810 			goto badframe;
811 	}
812 	else
813 #endif
814 	{
815 		if (__put_user(0, &rt_sf->uc.uc_link))
816 			goto badframe;
817 		if (save_user_regs(regs, frame, tm_frame, sigret, 1))
818 			goto badframe;
819 	}
820 	regs->link = tramp;
821 
822 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
823 
824 	/* create a stack frame for the caller of the handler */
825 	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
826 	addr = (void __user *)regs->gpr[1];
827 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
828 		goto badframe;
829 
830 	/* Fill registers for signal handler */
831 	regs->gpr[1] = newsp;
832 	regs->gpr[3] = ksig->sig;
833 	regs->gpr[4] = (unsigned long) &rt_sf->info;
834 	regs->gpr[5] = (unsigned long) &rt_sf->uc;
835 	regs->gpr[6] = (unsigned long) rt_sf;
836 	regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
837 	/* enter the signal handler in native-endian mode */
838 	regs->msr &= ~MSR_LE;
839 	regs->msr |= (MSR_KERNEL & MSR_LE);
840 	return 0;
841 
842 badframe:
843 	if (show_unhandled_signals)
844 		printk_ratelimited(KERN_INFO
845 				   "%s[%d]: bad frame in handle_rt_signal32: "
846 				   "%p nip %08lx lr %08lx\n",
847 				   tsk->comm, tsk->pid,
848 				   addr, regs->nip, regs->link);
849 
850 	return 1;
851 }
852 
853 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
854 {
855 	sigset_t set;
856 	struct mcontext __user *mcp;
857 
858 	if (get_sigset_t(&set, &ucp->uc_sigmask))
859 		return -EFAULT;
860 #ifdef CONFIG_PPC64
861 	{
862 		u32 cmcp;
863 
864 		if (__get_user(cmcp, &ucp->uc_regs))
865 			return -EFAULT;
866 		mcp = (struct mcontext __user *)(u64)cmcp;
867 		/* no need to check access_ok(mcp), since mcp < 4GB */
868 	}
869 #else
870 	if (__get_user(mcp, &ucp->uc_regs))
871 		return -EFAULT;
872 	if (!access_ok(mcp, sizeof(*mcp)))
873 		return -EFAULT;
874 #endif
875 	set_current_blocked(&set);
876 	if (restore_user_regs(regs, mcp, sig))
877 		return -EFAULT;
878 
879 	return 0;
880 }
881 
882 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
883 static int do_setcontext_tm(struct ucontext __user *ucp,
884 			    struct ucontext __user *tm_ucp,
885 			    struct pt_regs *regs)
886 {
887 	sigset_t set;
888 	struct mcontext __user *mcp;
889 	struct mcontext __user *tm_mcp;
890 	u32 cmcp;
891 	u32 tm_cmcp;
892 
893 	if (get_sigset_t(&set, &ucp->uc_sigmask))
894 		return -EFAULT;
895 
896 	if (__get_user(cmcp, &ucp->uc_regs) ||
897 	    __get_user(tm_cmcp, &tm_ucp->uc_regs))
898 		return -EFAULT;
899 	mcp = (struct mcontext __user *)(u64)cmcp;
900 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
901 	/* no need to check access_ok(mcp), since mcp < 4GB */
902 
903 	set_current_blocked(&set);
904 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
905 		return -EFAULT;
906 
907 	return 0;
908 }
909 #endif
910 
911 #ifdef CONFIG_PPC64
912 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
913 		       struct ucontext __user *, new_ctx, int, ctx_size)
914 #else
915 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
916 		       struct ucontext __user *, new_ctx, long, ctx_size)
917 #endif
918 {
919 	struct pt_regs *regs = current_pt_regs();
920 	int ctx_has_vsx_region = 0;
921 
922 #ifdef CONFIG_PPC64
923 	unsigned long new_msr = 0;
924 
925 	if (new_ctx) {
926 		struct mcontext __user *mcp;
927 		u32 cmcp;
928 
929 		/*
930 		 * Get pointer to the real mcontext.  No need for
931 		 * access_ok since we are dealing with compat
932 		 * pointers.
933 		 */
934 		if (__get_user(cmcp, &new_ctx->uc_regs))
935 			return -EFAULT;
936 		mcp = (struct mcontext __user *)(u64)cmcp;
937 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
938 			return -EFAULT;
939 	}
940 	/*
941 	 * Check that the context is not smaller than the original
942 	 * size (with VMX but without VSX)
943 	 */
944 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
945 		return -EINVAL;
946 	/*
947 	 * If the new context state sets the MSR VSX bits but
948 	 * it doesn't provide VSX state.
949 	 */
950 	if ((ctx_size < sizeof(struct ucontext)) &&
951 	    (new_msr & MSR_VSX))
952 		return -EINVAL;
953 	/* Does the context have enough room to store VSX data? */
954 	if (ctx_size >= sizeof(struct ucontext))
955 		ctx_has_vsx_region = 1;
956 #else
957 	/* Context size is for future use. Right now, we only make sure
958 	 * we are passed something we understand
959 	 */
960 	if (ctx_size < sizeof(struct ucontext))
961 		return -EINVAL;
962 #endif
963 	if (old_ctx != NULL) {
964 		struct mcontext __user *mctx;
965 
966 		/*
967 		 * old_ctx might not be 16-byte aligned, in which
968 		 * case old_ctx->uc_mcontext won't be either.
969 		 * Because we have the old_ctx->uc_pad2 field
970 		 * before old_ctx->uc_mcontext, we need to round down
971 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
972 		 */
973 		mctx = (struct mcontext __user *)
974 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
975 		if (!access_ok(old_ctx, ctx_size)
976 		    || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
977 		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
978 		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
979 			return -EFAULT;
980 	}
981 	if (new_ctx == NULL)
982 		return 0;
983 	if (!access_ok(new_ctx, ctx_size) ||
984 	    fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
985 		return -EFAULT;
986 
987 	/*
988 	 * If we get a fault copying the context into the kernel's
989 	 * image of the user's registers, we can't just return -EFAULT
990 	 * because the user's registers will be corrupted.  For instance
991 	 * the NIP value may have been updated but not some of the
992 	 * other registers.  Given that we have done the access_ok
993 	 * and successfully read the first and last bytes of the region
994 	 * above, this should only happen in an out-of-memory situation
995 	 * or if another thread unmaps the region containing the context.
996 	 * We kill the task with a SIGSEGV in this situation.
997 	 */
998 	if (do_setcontext(new_ctx, regs, 0))
999 		do_exit(SIGSEGV);
1000 
1001 	set_thread_flag(TIF_RESTOREALL);
1002 	return 0;
1003 }
1004 
1005 #ifdef CONFIG_PPC64
1006 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1007 #else
1008 SYSCALL_DEFINE0(rt_sigreturn)
1009 #endif
1010 {
1011 	struct rt_sigframe __user *rt_sf;
1012 	struct pt_regs *regs = current_pt_regs();
1013 	int tm_restore = 0;
1014 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1015 	struct ucontext __user *uc_transact;
1016 	unsigned long msr_hi;
1017 	unsigned long tmp;
1018 #endif
1019 	/* Always make any pending restarted system calls return -EINTR */
1020 	current->restart_block.fn = do_no_restart_syscall;
1021 
1022 	rt_sf = (struct rt_sigframe __user *)
1023 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1024 	if (!access_ok(rt_sf, sizeof(*rt_sf)))
1025 		goto bad;
1026 
1027 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1028 	/*
1029 	 * If there is a transactional state then throw it away.
1030 	 * The purpose of a sigreturn is to destroy all traces of the
1031 	 * signal frame, this includes any transactional state created
1032 	 * within in. We only check for suspended as we can never be
1033 	 * active in the kernel, we are active, there is nothing better to
1034 	 * do than go ahead and Bad Thing later.
1035 	 * The cause is not important as there will never be a
1036 	 * recheckpoint so it's not user visible.
1037 	 */
1038 	if (MSR_TM_SUSPENDED(mfmsr()))
1039 		tm_reclaim_current(0);
1040 
1041 	if (__get_user(tmp, &rt_sf->uc.uc_link))
1042 		goto bad;
1043 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1044 	if (uc_transact) {
1045 		u32 cmcp;
1046 		struct mcontext __user *mcp;
1047 
1048 		if (__get_user(cmcp, &uc_transact->uc_regs))
1049 			return -EFAULT;
1050 		mcp = (struct mcontext __user *)(u64)cmcp;
1051 		/* The top 32 bits of the MSR are stashed in the transactional
1052 		 * ucontext. */
1053 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1054 			goto bad;
1055 
1056 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1057 			/* Trying to start TM on non TM system */
1058 			if (!cpu_has_feature(CPU_FTR_TM))
1059 				goto bad;
1060 			/* We only recheckpoint on return if we're
1061 			 * transaction.
1062 			 */
1063 			tm_restore = 1;
1064 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1065 				goto bad;
1066 		}
1067 	}
1068 	if (!tm_restore) {
1069 		/*
1070 		 * Unset regs->msr because ucontext MSR TS is not
1071 		 * set, and recheckpoint was not called. This avoid
1072 		 * hitting a TM Bad thing at RFID
1073 		 */
1074 		regs->msr &= ~MSR_TS_MASK;
1075 	}
1076 	/* Fall through, for non-TM restore */
1077 #endif
1078 	if (!tm_restore)
1079 		if (do_setcontext(&rt_sf->uc, regs, 1))
1080 			goto bad;
1081 
1082 	/*
1083 	 * It's not clear whether or why it is desirable to save the
1084 	 * sigaltstack setting on signal delivery and restore it on
1085 	 * signal return.  But other architectures do this and we have
1086 	 * always done it up until now so it is probably better not to
1087 	 * change it.  -- paulus
1088 	 */
1089 #ifdef CONFIG_PPC64
1090 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1091 		goto bad;
1092 #else
1093 	if (restore_altstack(&rt_sf->uc.uc_stack))
1094 		goto bad;
1095 #endif
1096 	set_thread_flag(TIF_RESTOREALL);
1097 	return 0;
1098 
1099  bad:
1100 	if (show_unhandled_signals)
1101 		printk_ratelimited(KERN_INFO
1102 				   "%s[%d]: bad frame in sys_rt_sigreturn: "
1103 				   "%p nip %08lx lr %08lx\n",
1104 				   current->comm, current->pid,
1105 				   rt_sf, regs->nip, regs->link);
1106 
1107 	force_sig(SIGSEGV);
1108 	return 0;
1109 }
1110 
1111 #ifdef CONFIG_PPC32
1112 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1113 			 int, ndbg, struct sig_dbg_op __user *, dbg)
1114 {
1115 	struct pt_regs *regs = current_pt_regs();
1116 	struct sig_dbg_op op;
1117 	int i;
1118 	unsigned long new_msr = regs->msr;
1119 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1120 	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1121 #endif
1122 
1123 	for (i=0; i<ndbg; i++) {
1124 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1125 			return -EFAULT;
1126 		switch (op.dbg_type) {
1127 		case SIG_DBG_SINGLE_STEPPING:
1128 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1129 			if (op.dbg_value) {
1130 				new_msr |= MSR_DE;
1131 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1132 			} else {
1133 				new_dbcr0 &= ~DBCR0_IC;
1134 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1135 						current->thread.debug.dbcr1)) {
1136 					new_msr &= ~MSR_DE;
1137 					new_dbcr0 &= ~DBCR0_IDM;
1138 				}
1139 			}
1140 #else
1141 			if (op.dbg_value)
1142 				new_msr |= MSR_SE;
1143 			else
1144 				new_msr &= ~MSR_SE;
1145 #endif
1146 			break;
1147 		case SIG_DBG_BRANCH_TRACING:
1148 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1149 			return -EINVAL;
1150 #else
1151 			if (op.dbg_value)
1152 				new_msr |= MSR_BE;
1153 			else
1154 				new_msr &= ~MSR_BE;
1155 #endif
1156 			break;
1157 
1158 		default:
1159 			return -EINVAL;
1160 		}
1161 	}
1162 
1163 	/* We wait until here to actually install the values in the
1164 	   registers so if we fail in the above loop, it will not
1165 	   affect the contents of these registers.  After this point,
1166 	   failure is a problem, anyway, and it's very unlikely unless
1167 	   the user is really doing something wrong. */
1168 	regs->msr = new_msr;
1169 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1170 	current->thread.debug.dbcr0 = new_dbcr0;
1171 #endif
1172 
1173 	if (!access_ok(ctx, sizeof(*ctx)) ||
1174 	    fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1175 		return -EFAULT;
1176 
1177 	/*
1178 	 * If we get a fault copying the context into the kernel's
1179 	 * image of the user's registers, we can't just return -EFAULT
1180 	 * because the user's registers will be corrupted.  For instance
1181 	 * the NIP value may have been updated but not some of the
1182 	 * other registers.  Given that we have done the access_ok
1183 	 * and successfully read the first and last bytes of the region
1184 	 * above, this should only happen in an out-of-memory situation
1185 	 * or if another thread unmaps the region containing the context.
1186 	 * We kill the task with a SIGSEGV in this situation.
1187 	 */
1188 	if (do_setcontext(ctx, regs, 1)) {
1189 		if (show_unhandled_signals)
1190 			printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1191 					   "sys_debug_setcontext: %p nip %08lx "
1192 					   "lr %08lx\n",
1193 					   current->comm, current->pid,
1194 					   ctx, regs->nip, regs->link);
1195 
1196 		force_sig(SIGSEGV);
1197 		goto out;
1198 	}
1199 
1200 	/*
1201 	 * It's not clear whether or why it is desirable to save the
1202 	 * sigaltstack setting on signal delivery and restore it on
1203 	 * signal return.  But other architectures do this and we have
1204 	 * always done it up until now so it is probably better not to
1205 	 * change it.  -- paulus
1206 	 */
1207 	restore_altstack(&ctx->uc_stack);
1208 
1209 	set_thread_flag(TIF_RESTOREALL);
1210  out:
1211 	return 0;
1212 }
1213 #endif
1214 
1215 /*
1216  * OK, we're invoking a handler
1217  */
1218 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1219 		struct task_struct *tsk)
1220 {
1221 	struct sigcontext __user *sc;
1222 	struct sigframe __user *frame;
1223 	struct mcontext __user *tm_mctx = NULL;
1224 	unsigned long newsp = 0;
1225 	int sigret;
1226 	unsigned long tramp;
1227 	struct pt_regs *regs = tsk->thread.regs;
1228 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1229 	/* Save the thread's msr before get_tm_stackpointer() changes it */
1230 	unsigned long msr = regs->msr;
1231 #endif
1232 
1233 	BUG_ON(tsk != current);
1234 
1235 	/* Set up Signal Frame */
1236 	frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1237 	if (unlikely(frame == NULL))
1238 		goto badframe;
1239 	sc = (struct sigcontext __user *) &frame->sctx;
1240 
1241 #if _NSIG != 64
1242 #error "Please adjust handle_signal()"
1243 #endif
1244 	if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1245 	    || __put_user(oldset->sig[0], &sc->oldmask)
1246 #ifdef CONFIG_PPC64
1247 	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1248 #else
1249 	    || __put_user(oldset->sig[1], &sc->_unused[3])
1250 #endif
1251 	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1252 	    || __put_user(ksig->sig, &sc->signal))
1253 		goto badframe;
1254 
1255 	if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1256 		sigret = 0;
1257 		tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1258 	} else {
1259 		sigret = __NR_sigreturn;
1260 		tramp = (unsigned long) frame->mctx.tramp;
1261 	}
1262 
1263 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1264 	tm_mctx = &frame->mctx_transact;
1265 	if (MSR_TM_ACTIVE(msr)) {
1266 		if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1267 				      sigret, msr))
1268 			goto badframe;
1269 	}
1270 	else
1271 #endif
1272 	{
1273 		if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1274 			goto badframe;
1275 	}
1276 
1277 	regs->link = tramp;
1278 
1279 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
1280 
1281 	/* create a stack frame for the caller of the handler */
1282 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1283 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1284 		goto badframe;
1285 
1286 	regs->gpr[1] = newsp;
1287 	regs->gpr[3] = ksig->sig;
1288 	regs->gpr[4] = (unsigned long) sc;
1289 	regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1290 	/* enter the signal handler in big-endian mode */
1291 	regs->msr &= ~MSR_LE;
1292 	return 0;
1293 
1294 badframe:
1295 	if (show_unhandled_signals)
1296 		printk_ratelimited(KERN_INFO
1297 				   "%s[%d]: bad frame in handle_signal32: "
1298 				   "%p nip %08lx lr %08lx\n",
1299 				   tsk->comm, tsk->pid,
1300 				   frame, regs->nip, regs->link);
1301 
1302 	return 1;
1303 }
1304 
1305 /*
1306  * Do a signal return; undo the signal stack.
1307  */
1308 #ifdef CONFIG_PPC64
1309 COMPAT_SYSCALL_DEFINE0(sigreturn)
1310 #else
1311 SYSCALL_DEFINE0(sigreturn)
1312 #endif
1313 {
1314 	struct pt_regs *regs = current_pt_regs();
1315 	struct sigframe __user *sf;
1316 	struct sigcontext __user *sc;
1317 	struct sigcontext sigctx;
1318 	struct mcontext __user *sr;
1319 	void __user *addr;
1320 	sigset_t set;
1321 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1322 	struct mcontext __user *mcp, *tm_mcp;
1323 	unsigned long msr_hi;
1324 #endif
1325 
1326 	/* Always make any pending restarted system calls return -EINTR */
1327 	current->restart_block.fn = do_no_restart_syscall;
1328 
1329 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1330 	sc = &sf->sctx;
1331 	addr = sc;
1332 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1333 		goto badframe;
1334 
1335 #ifdef CONFIG_PPC64
1336 	/*
1337 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1338 	 * unused part of the signal stackframe
1339 	 */
1340 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1341 #else
1342 	set.sig[0] = sigctx.oldmask;
1343 	set.sig[1] = sigctx._unused[3];
1344 #endif
1345 	set_current_blocked(&set);
1346 
1347 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1348 	mcp = (struct mcontext __user *)&sf->mctx;
1349 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1350 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1351 		goto badframe;
1352 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
1353 		if (!cpu_has_feature(CPU_FTR_TM))
1354 			goto badframe;
1355 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
1356 			goto badframe;
1357 	} else
1358 #endif
1359 	{
1360 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1361 		addr = sr;
1362 		if (!access_ok(sr, sizeof(*sr))
1363 		    || restore_user_regs(regs, sr, 1))
1364 			goto badframe;
1365 	}
1366 
1367 	set_thread_flag(TIF_RESTOREALL);
1368 	return 0;
1369 
1370 badframe:
1371 	if (show_unhandled_signals)
1372 		printk_ratelimited(KERN_INFO
1373 				   "%s[%d]: bad frame in sys_sigreturn: "
1374 				   "%p nip %08lx lr %08lx\n",
1375 				   current->comm, current->pid,
1376 				   addr, regs->nip, regs->link);
1377 
1378 	force_sig(SIGSEGV);
1379 	return 0;
1380 }
1381