xref: /openbmc/linux/arch/powerpc/kernel/signal_32.c (revision 4a3fad70)
1 /*
2  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3  *
4  *  PowerPC version
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  * Copyright (C) 2001 IBM
7  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9  *
10  *  Derived from "arch/i386/kernel/signal.c"
11  *    Copyright (C) 1991, 1992 Linus Torvalds
12  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
13  *
14  *  This program is free software; you can redistribute it and/or
15  *  modify it under the terms of the GNU General Public License
16  *  as published by the Free Software Foundation; either version
17  *  2 of the License, or (at your option) any later version.
18  */
19 
20 #include <linux/sched.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/kernel.h>
24 #include <linux/signal.h>
25 #include <linux/errno.h>
26 #include <linux/elf.h>
27 #include <linux/ptrace.h>
28 #include <linux/ratelimit.h>
29 #ifdef CONFIG_PPC64
30 #include <linux/syscalls.h>
31 #include <linux/compat.h>
32 #else
33 #include <linux/wait.h>
34 #include <linux/unistd.h>
35 #include <linux/stddef.h>
36 #include <linux/tty.h>
37 #include <linux/binfmts.h>
38 #endif
39 
40 #include <linux/uaccess.h>
41 #include <asm/cacheflush.h>
42 #include <asm/syscalls.h>
43 #include <asm/sigcontext.h>
44 #include <asm/vdso.h>
45 #include <asm/switch_to.h>
46 #include <asm/tm.h>
47 #include <asm/asm-prototypes.h>
48 #ifdef CONFIG_PPC64
49 #include "ppc32.h"
50 #include <asm/unistd.h>
51 #else
52 #include <asm/ucontext.h>
53 #include <asm/pgtable.h>
54 #endif
55 
56 #include "signal.h"
57 
58 
59 #ifdef CONFIG_PPC64
60 #define sys_rt_sigreturn	compat_sys_rt_sigreturn
61 #define sys_swapcontext	compat_sys_swapcontext
62 #define sys_sigreturn	compat_sys_sigreturn
63 
64 #define old_sigaction	old_sigaction32
65 #define sigcontext	sigcontext32
66 #define mcontext	mcontext32
67 #define ucontext	ucontext32
68 
69 #define __save_altstack __compat_save_altstack
70 
71 /*
72  * Userspace code may pass a ucontext which doesn't include VSX added
73  * at the end.  We need to check for this case.
74  */
75 #define UCONTEXTSIZEWITHOUTVSX \
76 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
77 
78 /*
79  * Returning 0 means we return to userspace via
80  * ret_from_except and thus restore all user
81  * registers from *regs.  This is what we need
82  * to do when a signal has been delivered.
83  */
84 
85 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
86 #undef __SIGNAL_FRAMESIZE
87 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
88 #undef ELF_NVRREG
89 #define ELF_NVRREG	ELF_NVRREG32
90 
91 /*
92  * Functions for flipping sigsets (thanks to brain dead generic
93  * implementation that makes things simple for little endian only)
94  */
95 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
96 {
97 	return put_compat_sigset(uset, set, sizeof(*uset));
98 }
99 
100 static inline int get_sigset_t(sigset_t *set,
101 			       const compat_sigset_t __user *uset)
102 {
103 	return get_compat_sigset(set, uset);
104 }
105 
106 #define to_user_ptr(p)		ptr_to_compat(p)
107 #define from_user_ptr(p)	compat_ptr(p)
108 
109 static inline int save_general_regs(struct pt_regs *regs,
110 		struct mcontext __user *frame)
111 {
112 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
113 	int i;
114 
115 	WARN_ON(!FULL_REGS(regs));
116 
117 	for (i = 0; i <= PT_RESULT; i ++) {
118 		if (i == 14 && !FULL_REGS(regs))
119 			i = 32;
120 		if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
121 			return -EFAULT;
122 	}
123 	return 0;
124 }
125 
126 static inline int restore_general_regs(struct pt_regs *regs,
127 		struct mcontext __user *sr)
128 {
129 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
130 	int i;
131 
132 	for (i = 0; i <= PT_RESULT; i++) {
133 		if ((i == PT_MSR) || (i == PT_SOFTE))
134 			continue;
135 		if (__get_user(gregs[i], &sr->mc_gregs[i]))
136 			return -EFAULT;
137 	}
138 	return 0;
139 }
140 
141 #else /* CONFIG_PPC64 */
142 
143 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
144 
145 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
146 {
147 	return copy_to_user(uset, set, sizeof(*uset));
148 }
149 
150 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
151 {
152 	return copy_from_user(set, uset, sizeof(*uset));
153 }
154 
155 #define to_user_ptr(p)		((unsigned long)(p))
156 #define from_user_ptr(p)	((void __user *)(p))
157 
158 static inline int save_general_regs(struct pt_regs *regs,
159 		struct mcontext __user *frame)
160 {
161 	WARN_ON(!FULL_REGS(regs));
162 	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
163 }
164 
165 static inline int restore_general_regs(struct pt_regs *regs,
166 		struct mcontext __user *sr)
167 {
168 	/* copy up to but not including MSR */
169 	if (__copy_from_user(regs, &sr->mc_gregs,
170 				PT_MSR * sizeof(elf_greg_t)))
171 		return -EFAULT;
172 	/* copy from orig_r3 (the word after the MSR) up to the end */
173 	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
174 				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
175 		return -EFAULT;
176 	return 0;
177 }
178 #endif
179 
180 /*
181  * When we have signals to deliver, we set up on the
182  * user stack, going down from the original stack pointer:
183  *	an ABI gap of 56 words
184  *	an mcontext struct
185  *	a sigcontext struct
186  *	a gap of __SIGNAL_FRAMESIZE bytes
187  *
188  * Each of these things must be a multiple of 16 bytes in size. The following
189  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
190  *
191  */
192 struct sigframe {
193 	struct sigcontext sctx;		/* the sigcontext */
194 	struct mcontext	mctx;		/* all the register values */
195 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
196 	struct sigcontext sctx_transact;
197 	struct mcontext	mctx_transact;
198 #endif
199 	/*
200 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
201 	 * regs and 18 fp regs below sp before decrementing it.
202 	 */
203 	int			abigap[56];
204 };
205 
206 /* We use the mc_pad field for the signal return trampoline. */
207 #define tramp	mc_pad
208 
209 /*
210  *  When we have rt signals to deliver, we set up on the
211  *  user stack, going down from the original stack pointer:
212  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
213  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
214  *  (the +16 is to get the siginfo and ucontext in the same
215  *  positions as in older kernels).
216  *
217  *  Each of these things must be a multiple of 16 bytes in size.
218  *
219  */
220 struct rt_sigframe {
221 #ifdef CONFIG_PPC64
222 	compat_siginfo_t info;
223 #else
224 	struct siginfo info;
225 #endif
226 	struct ucontext	uc;
227 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
228 	struct ucontext	uc_transact;
229 #endif
230 	/*
231 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
232 	 * regs and 18 fp regs below sp before decrementing it.
233 	 */
234 	int			abigap[56];
235 };
236 
237 #ifdef CONFIG_VSX
238 unsigned long copy_fpr_to_user(void __user *to,
239 			       struct task_struct *task)
240 {
241 	u64 buf[ELF_NFPREG];
242 	int i;
243 
244 	/* save FPR copy to local buffer then write to the thread_struct */
245 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
246 		buf[i] = task->thread.TS_FPR(i);
247 	buf[i] = task->thread.fp_state.fpscr;
248 	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
249 }
250 
251 unsigned long copy_fpr_from_user(struct task_struct *task,
252 				 void __user *from)
253 {
254 	u64 buf[ELF_NFPREG];
255 	int i;
256 
257 	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
258 		return 1;
259 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
260 		task->thread.TS_FPR(i) = buf[i];
261 	task->thread.fp_state.fpscr = buf[i];
262 
263 	return 0;
264 }
265 
266 unsigned long copy_vsx_to_user(void __user *to,
267 			       struct task_struct *task)
268 {
269 	u64 buf[ELF_NVSRHALFREG];
270 	int i;
271 
272 	/* save FPR copy to local buffer then write to the thread_struct */
273 	for (i = 0; i < ELF_NVSRHALFREG; i++)
274 		buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
275 	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
276 }
277 
278 unsigned long copy_vsx_from_user(struct task_struct *task,
279 				 void __user *from)
280 {
281 	u64 buf[ELF_NVSRHALFREG];
282 	int i;
283 
284 	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
285 		return 1;
286 	for (i = 0; i < ELF_NVSRHALFREG ; i++)
287 		task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
288 	return 0;
289 }
290 
291 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
292 unsigned long copy_ckfpr_to_user(void __user *to,
293 				  struct task_struct *task)
294 {
295 	u64 buf[ELF_NFPREG];
296 	int i;
297 
298 	/* save FPR copy to local buffer then write to the thread_struct */
299 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
300 		buf[i] = task->thread.TS_CKFPR(i);
301 	buf[i] = task->thread.ckfp_state.fpscr;
302 	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
303 }
304 
305 unsigned long copy_ckfpr_from_user(struct task_struct *task,
306 					  void __user *from)
307 {
308 	u64 buf[ELF_NFPREG];
309 	int i;
310 
311 	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
312 		return 1;
313 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
314 		task->thread.TS_CKFPR(i) = buf[i];
315 	task->thread.ckfp_state.fpscr = buf[i];
316 
317 	return 0;
318 }
319 
320 unsigned long copy_ckvsx_to_user(void __user *to,
321 				  struct task_struct *task)
322 {
323 	u64 buf[ELF_NVSRHALFREG];
324 	int i;
325 
326 	/* save FPR copy to local buffer then write to the thread_struct */
327 	for (i = 0; i < ELF_NVSRHALFREG; i++)
328 		buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
329 	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
330 }
331 
332 unsigned long copy_ckvsx_from_user(struct task_struct *task,
333 					  void __user *from)
334 {
335 	u64 buf[ELF_NVSRHALFREG];
336 	int i;
337 
338 	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
339 		return 1;
340 	for (i = 0; i < ELF_NVSRHALFREG ; i++)
341 		task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
342 	return 0;
343 }
344 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
345 #else
346 inline unsigned long copy_fpr_to_user(void __user *to,
347 				      struct task_struct *task)
348 {
349 	return __copy_to_user(to, task->thread.fp_state.fpr,
350 			      ELF_NFPREG * sizeof(double));
351 }
352 
353 inline unsigned long copy_fpr_from_user(struct task_struct *task,
354 					void __user *from)
355 {
356 	return __copy_from_user(task->thread.fp_state.fpr, from,
357 			      ELF_NFPREG * sizeof(double));
358 }
359 
360 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
361 inline unsigned long copy_ckfpr_to_user(void __user *to,
362 					 struct task_struct *task)
363 {
364 	return __copy_to_user(to, task->thread.ckfp_state.fpr,
365 			      ELF_NFPREG * sizeof(double));
366 }
367 
368 inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
369 						 void __user *from)
370 {
371 	return __copy_from_user(task->thread.ckfp_state.fpr, from,
372 				ELF_NFPREG * sizeof(double));
373 }
374 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
375 #endif
376 
377 /*
378  * Save the current user registers on the user stack.
379  * We only save the altivec/spe registers if the process has used
380  * altivec/spe instructions at some point.
381  */
382 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
383 			  struct mcontext __user *tm_frame, int sigret,
384 			  int ctx_has_vsx_region)
385 {
386 	unsigned long msr = regs->msr;
387 
388 	/* Make sure floating point registers are stored in regs */
389 	flush_fp_to_thread(current);
390 
391 	/* save general registers */
392 	if (save_general_regs(regs, frame))
393 		return 1;
394 
395 #ifdef CONFIG_ALTIVEC
396 	/* save altivec registers */
397 	if (current->thread.used_vr) {
398 		flush_altivec_to_thread(current);
399 		if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
400 				   ELF_NVRREG * sizeof(vector128)))
401 			return 1;
402 		/* set MSR_VEC in the saved MSR value to indicate that
403 		   frame->mc_vregs contains valid data */
404 		msr |= MSR_VEC;
405 	}
406 	/* else assert((regs->msr & MSR_VEC) == 0) */
407 
408 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
409 	 * use altivec. Since VSCR only contains 32 bits saved in the least
410 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
411 	 * most significant bits of that same vector. --BenH
412 	 * Note that the current VRSAVE value is in the SPR at this point.
413 	 */
414 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
415 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
416 	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
417 		return 1;
418 #endif /* CONFIG_ALTIVEC */
419 	if (copy_fpr_to_user(&frame->mc_fregs, current))
420 		return 1;
421 
422 	/*
423 	 * Clear the MSR VSX bit to indicate there is no valid state attached
424 	 * to this context, except in the specific case below where we set it.
425 	 */
426 	msr &= ~MSR_VSX;
427 #ifdef CONFIG_VSX
428 	/*
429 	 * Copy VSR 0-31 upper half from thread_struct to local
430 	 * buffer, then write that to userspace.  Also set MSR_VSX in
431 	 * the saved MSR value to indicate that frame->mc_vregs
432 	 * contains valid data
433 	 */
434 	if (current->thread.used_vsr && ctx_has_vsx_region) {
435 		flush_vsx_to_thread(current);
436 		if (copy_vsx_to_user(&frame->mc_vsregs, current))
437 			return 1;
438 		msr |= MSR_VSX;
439 	}
440 #endif /* CONFIG_VSX */
441 #ifdef CONFIG_SPE
442 	/* save spe registers */
443 	if (current->thread.used_spe) {
444 		flush_spe_to_thread(current);
445 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
446 				   ELF_NEVRREG * sizeof(u32)))
447 			return 1;
448 		/* set MSR_SPE in the saved MSR value to indicate that
449 		   frame->mc_vregs contains valid data */
450 		msr |= MSR_SPE;
451 	}
452 	/* else assert((regs->msr & MSR_SPE) == 0) */
453 
454 	/* We always copy to/from spefscr */
455 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
456 		return 1;
457 #endif /* CONFIG_SPE */
458 
459 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
460 		return 1;
461 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
462 	 * can check it on the restore to see if TM is active
463 	 */
464 	if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
465 		return 1;
466 
467 	if (sigret) {
468 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
469 		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
470 		    || __put_user(0x44000002UL, &frame->tramp[1]))
471 			return 1;
472 		flush_icache_range((unsigned long) &frame->tramp[0],
473 				   (unsigned long) &frame->tramp[2]);
474 	}
475 
476 	return 0;
477 }
478 
479 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
480 /*
481  * Save the current user registers on the user stack.
482  * We only save the altivec/spe registers if the process has used
483  * altivec/spe instructions at some point.
484  * We also save the transactional registers to a second ucontext in the
485  * frame.
486  *
487  * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
488  */
489 static int save_tm_user_regs(struct pt_regs *regs,
490 			     struct mcontext __user *frame,
491 			     struct mcontext __user *tm_frame, int sigret)
492 {
493 	unsigned long msr = regs->msr;
494 
495 	WARN_ON(tm_suspend_disabled);
496 
497 	/* Remove TM bits from thread's MSR.  The MSR in the sigcontext
498 	 * just indicates to userland that we were doing a transaction, but we
499 	 * don't want to return in transactional state.  This also ensures
500 	 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
501 	 */
502 	regs->msr &= ~MSR_TS_MASK;
503 
504 	/* Save both sets of general registers */
505 	if (save_general_regs(&current->thread.ckpt_regs, frame)
506 	    || save_general_regs(regs, tm_frame))
507 		return 1;
508 
509 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
510 	 * of the transactional mcontext.  This way we have a backward-compatible
511 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
512 	 * also look at what type of transaction (T or S) was active at the
513 	 * time of the signal.
514 	 */
515 	if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
516 		return 1;
517 
518 #ifdef CONFIG_ALTIVEC
519 	/* save altivec registers */
520 	if (current->thread.used_vr) {
521 		if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
522 				   ELF_NVRREG * sizeof(vector128)))
523 			return 1;
524 		if (msr & MSR_VEC) {
525 			if (__copy_to_user(&tm_frame->mc_vregs,
526 					   &current->thread.vr_state,
527 					   ELF_NVRREG * sizeof(vector128)))
528 				return 1;
529 		} else {
530 			if (__copy_to_user(&tm_frame->mc_vregs,
531 					   &current->thread.ckvr_state,
532 					   ELF_NVRREG * sizeof(vector128)))
533 				return 1;
534 		}
535 
536 		/* set MSR_VEC in the saved MSR value to indicate that
537 		 * frame->mc_vregs contains valid data
538 		 */
539 		msr |= MSR_VEC;
540 	}
541 
542 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
543 	 * use altivec. Since VSCR only contains 32 bits saved in the least
544 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
545 	 * most significant bits of that same vector. --BenH
546 	 */
547 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
548 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
549 	if (__put_user(current->thread.ckvrsave,
550 		       (u32 __user *)&frame->mc_vregs[32]))
551 		return 1;
552 	if (msr & MSR_VEC) {
553 		if (__put_user(current->thread.vrsave,
554 			       (u32 __user *)&tm_frame->mc_vregs[32]))
555 			return 1;
556 	} else {
557 		if (__put_user(current->thread.ckvrsave,
558 			       (u32 __user *)&tm_frame->mc_vregs[32]))
559 			return 1;
560 	}
561 #endif /* CONFIG_ALTIVEC */
562 
563 	if (copy_ckfpr_to_user(&frame->mc_fregs, current))
564 		return 1;
565 	if (msr & MSR_FP) {
566 		if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
567 			return 1;
568 	} else {
569 		if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
570 			return 1;
571 	}
572 
573 #ifdef CONFIG_VSX
574 	/*
575 	 * Copy VSR 0-31 upper half from thread_struct to local
576 	 * buffer, then write that to userspace.  Also set MSR_VSX in
577 	 * the saved MSR value to indicate that frame->mc_vregs
578 	 * contains valid data
579 	 */
580 	if (current->thread.used_vsr) {
581 		if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
582 			return 1;
583 		if (msr & MSR_VSX) {
584 			if (copy_vsx_to_user(&tm_frame->mc_vsregs,
585 						      current))
586 				return 1;
587 		} else {
588 			if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
589 				return 1;
590 		}
591 
592 		msr |= MSR_VSX;
593 	}
594 #endif /* CONFIG_VSX */
595 #ifdef CONFIG_SPE
596 	/* SPE regs are not checkpointed with TM, so this section is
597 	 * simply the same as in save_user_regs().
598 	 */
599 	if (current->thread.used_spe) {
600 		flush_spe_to_thread(current);
601 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
602 				   ELF_NEVRREG * sizeof(u32)))
603 			return 1;
604 		/* set MSR_SPE in the saved MSR value to indicate that
605 		 * frame->mc_vregs contains valid data */
606 		msr |= MSR_SPE;
607 	}
608 
609 	/* We always copy to/from spefscr */
610 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
611 		return 1;
612 #endif /* CONFIG_SPE */
613 
614 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
615 		return 1;
616 	if (sigret) {
617 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
618 		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
619 		    || __put_user(0x44000002UL, &frame->tramp[1]))
620 			return 1;
621 		flush_icache_range((unsigned long) &frame->tramp[0],
622 				   (unsigned long) &frame->tramp[2]);
623 	}
624 
625 	return 0;
626 }
627 #endif
628 
629 /*
630  * Restore the current user register values from the user stack,
631  * (except for MSR).
632  */
633 static long restore_user_regs(struct pt_regs *regs,
634 			      struct mcontext __user *sr, int sig)
635 {
636 	long err;
637 	unsigned int save_r2 = 0;
638 	unsigned long msr;
639 #ifdef CONFIG_VSX
640 	int i;
641 #endif
642 
643 	/*
644 	 * restore general registers but not including MSR or SOFTE. Also
645 	 * take care of keeping r2 (TLS) intact if not a signal
646 	 */
647 	if (!sig)
648 		save_r2 = (unsigned int)regs->gpr[2];
649 	err = restore_general_regs(regs, sr);
650 	regs->trap = 0;
651 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
652 	if (!sig)
653 		regs->gpr[2] = (unsigned long) save_r2;
654 	if (err)
655 		return 1;
656 
657 	/* if doing signal return, restore the previous little-endian mode */
658 	if (sig)
659 		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
660 
661 #ifdef CONFIG_ALTIVEC
662 	/*
663 	 * Force the process to reload the altivec registers from
664 	 * current->thread when it next does altivec instructions
665 	 */
666 	regs->msr &= ~MSR_VEC;
667 	if (msr & MSR_VEC) {
668 		/* restore altivec registers from the stack */
669 		if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
670 				     sizeof(sr->mc_vregs)))
671 			return 1;
672 		current->thread.used_vr = true;
673 	} else if (current->thread.used_vr)
674 		memset(&current->thread.vr_state, 0,
675 		       ELF_NVRREG * sizeof(vector128));
676 
677 	/* Always get VRSAVE back */
678 	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
679 		return 1;
680 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
681 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
682 #endif /* CONFIG_ALTIVEC */
683 	if (copy_fpr_from_user(current, &sr->mc_fregs))
684 		return 1;
685 
686 #ifdef CONFIG_VSX
687 	/*
688 	 * Force the process to reload the VSX registers from
689 	 * current->thread when it next does VSX instruction.
690 	 */
691 	regs->msr &= ~MSR_VSX;
692 	if (msr & MSR_VSX) {
693 		/*
694 		 * Restore altivec registers from the stack to a local
695 		 * buffer, then write this out to the thread_struct
696 		 */
697 		if (copy_vsx_from_user(current, &sr->mc_vsregs))
698 			return 1;
699 		current->thread.used_vsr = true;
700 	} else if (current->thread.used_vsr)
701 		for (i = 0; i < 32 ; i++)
702 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
703 #endif /* CONFIG_VSX */
704 	/*
705 	 * force the process to reload the FP registers from
706 	 * current->thread when it next does FP instructions
707 	 */
708 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
709 
710 #ifdef CONFIG_SPE
711 	/* force the process to reload the spe registers from
712 	   current->thread when it next does spe instructions */
713 	regs->msr &= ~MSR_SPE;
714 	if (msr & MSR_SPE) {
715 		/* restore spe registers from the stack */
716 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
717 				     ELF_NEVRREG * sizeof(u32)))
718 			return 1;
719 		current->thread.used_spe = true;
720 	} else if (current->thread.used_spe)
721 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
722 
723 	/* Always get SPEFSCR back */
724 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
725 		return 1;
726 #endif /* CONFIG_SPE */
727 
728 	return 0;
729 }
730 
731 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
732 /*
733  * Restore the current user register values from the user stack, except for
734  * MSR, and recheckpoint the original checkpointed register state for processes
735  * in transactions.
736  */
737 static long restore_tm_user_regs(struct pt_regs *regs,
738 				 struct mcontext __user *sr,
739 				 struct mcontext __user *tm_sr)
740 {
741 	long err;
742 	unsigned long msr, msr_hi;
743 #ifdef CONFIG_VSX
744 	int i;
745 #endif
746 
747 	if (tm_suspend_disabled)
748 		return 1;
749 	/*
750 	 * restore general registers but not including MSR or SOFTE. Also
751 	 * take care of keeping r2 (TLS) intact if not a signal.
752 	 * See comment in signal_64.c:restore_tm_sigcontexts();
753 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
754 	 * were set by the signal delivery.
755 	 */
756 	err = restore_general_regs(regs, tm_sr);
757 	err |= restore_general_regs(&current->thread.ckpt_regs, sr);
758 
759 	err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
760 
761 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
762 	if (err)
763 		return 1;
764 
765 	/* Restore the previous little-endian mode */
766 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
767 
768 #ifdef CONFIG_ALTIVEC
769 	regs->msr &= ~MSR_VEC;
770 	if (msr & MSR_VEC) {
771 		/* restore altivec registers from the stack */
772 		if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
773 				     sizeof(sr->mc_vregs)) ||
774 		    __copy_from_user(&current->thread.vr_state,
775 				     &tm_sr->mc_vregs,
776 				     sizeof(sr->mc_vregs)))
777 			return 1;
778 		current->thread.used_vr = true;
779 	} else if (current->thread.used_vr) {
780 		memset(&current->thread.vr_state, 0,
781 		       ELF_NVRREG * sizeof(vector128));
782 		memset(&current->thread.ckvr_state, 0,
783 		       ELF_NVRREG * sizeof(vector128));
784 	}
785 
786 	/* Always get VRSAVE back */
787 	if (__get_user(current->thread.ckvrsave,
788 		       (u32 __user *)&sr->mc_vregs[32]) ||
789 	    __get_user(current->thread.vrsave,
790 		       (u32 __user *)&tm_sr->mc_vregs[32]))
791 		return 1;
792 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
793 		mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
794 #endif /* CONFIG_ALTIVEC */
795 
796 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
797 
798 	if (copy_fpr_from_user(current, &sr->mc_fregs) ||
799 	    copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
800 		return 1;
801 
802 #ifdef CONFIG_VSX
803 	regs->msr &= ~MSR_VSX;
804 	if (msr & MSR_VSX) {
805 		/*
806 		 * Restore altivec registers from the stack to a local
807 		 * buffer, then write this out to the thread_struct
808 		 */
809 		if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
810 		    copy_ckvsx_from_user(current, &sr->mc_vsregs))
811 			return 1;
812 		current->thread.used_vsr = true;
813 	} else if (current->thread.used_vsr)
814 		for (i = 0; i < 32 ; i++) {
815 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
816 			current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
817 		}
818 #endif /* CONFIG_VSX */
819 
820 #ifdef CONFIG_SPE
821 	/* SPE regs are not checkpointed with TM, so this section is
822 	 * simply the same as in restore_user_regs().
823 	 */
824 	regs->msr &= ~MSR_SPE;
825 	if (msr & MSR_SPE) {
826 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
827 				     ELF_NEVRREG * sizeof(u32)))
828 			return 1;
829 		current->thread.used_spe = true;
830 	} else if (current->thread.used_spe)
831 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
832 
833 	/* Always get SPEFSCR back */
834 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
835 		       + ELF_NEVRREG))
836 		return 1;
837 #endif /* CONFIG_SPE */
838 
839 	/* Get the top half of the MSR from the user context */
840 	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
841 		return 1;
842 	msr_hi <<= 32;
843 	/* If TM bits are set to the reserved value, it's an invalid context */
844 	if (MSR_TM_RESV(msr_hi))
845 		return 1;
846 	/* Pull in the MSR TM bits from the user context */
847 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
848 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
849 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
850 	 * transactional versions should be loaded.
851 	 */
852 	tm_enable();
853 	/* Make sure the transaction is marked as failed */
854 	current->thread.tm_texasr |= TEXASR_FS;
855 	/* This loads the checkpointed FP/VEC state, if used */
856 	tm_recheckpoint(&current->thread);
857 
858 	/* This loads the speculative FP/VEC state, if used */
859 	msr_check_and_set(msr & (MSR_FP | MSR_VEC));
860 	if (msr & MSR_FP) {
861 		load_fp_state(&current->thread.fp_state);
862 		regs->msr |= (MSR_FP | current->thread.fpexc_mode);
863 	}
864 #ifdef CONFIG_ALTIVEC
865 	if (msr & MSR_VEC) {
866 		load_vr_state(&current->thread.vr_state);
867 		regs->msr |= MSR_VEC;
868 	}
869 #endif
870 
871 	return 0;
872 }
873 #endif
874 
875 #ifdef CONFIG_PPC64
876 int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
877 {
878 	int err;
879 
880 	if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
881 		return -EFAULT;
882 
883 	/* If you change siginfo_t structure, please be sure
884 	 * this code is fixed accordingly.
885 	 * It should never copy any pad contained in the structure
886 	 * to avoid security leaks, but must copy the generic
887 	 * 3 ints plus the relevant union member.
888 	 * This routine must convert siginfo from 64bit to 32bit as well
889 	 * at the same time.
890 	 */
891 	err = __put_user(s->si_signo, &d->si_signo);
892 	err |= __put_user(s->si_errno, &d->si_errno);
893 	err |= __put_user(s->si_code, &d->si_code);
894 	if (s->si_code < 0)
895 		err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
896 				      SI_PAD_SIZE32);
897 	else switch(siginfo_layout(s->si_signo, s->si_code)) {
898 	case SIL_CHLD:
899 		err |= __put_user(s->si_pid, &d->si_pid);
900 		err |= __put_user(s->si_uid, &d->si_uid);
901 		err |= __put_user(s->si_utime, &d->si_utime);
902 		err |= __put_user(s->si_stime, &d->si_stime);
903 		err |= __put_user(s->si_status, &d->si_status);
904 		break;
905 	case SIL_FAULT:
906 		err |= __put_user((unsigned int)(unsigned long)s->si_addr,
907 				  &d->si_addr);
908 		break;
909 	case SIL_POLL:
910 		err |= __put_user(s->si_band, &d->si_band);
911 		err |= __put_user(s->si_fd, &d->si_fd);
912 		break;
913 	case SIL_TIMER:
914 		err |= __put_user(s->si_tid, &d->si_tid);
915 		err |= __put_user(s->si_overrun, &d->si_overrun);
916 		err |= __put_user(s->si_int, &d->si_int);
917 		break;
918 	case SIL_SYS:
919 		err |= __put_user(ptr_to_compat(s->si_call_addr), &d->si_call_addr);
920 		err |= __put_user(s->si_syscall, &d->si_syscall);
921 		err |= __put_user(s->si_arch, &d->si_arch);
922 		break;
923 	case SIL_RT:
924 		err |= __put_user(s->si_int, &d->si_int);
925 		/* fallthrough */
926 	case SIL_KILL:
927 		err |= __put_user(s->si_pid, &d->si_pid);
928 		err |= __put_user(s->si_uid, &d->si_uid);
929 		break;
930 	}
931 	return err;
932 }
933 
934 #define copy_siginfo_to_user	copy_siginfo_to_user32
935 
936 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
937 {
938 	if (copy_from_user(to, from, 3*sizeof(int)) ||
939 	    copy_from_user(to->_sifields._pad,
940 			   from->_sifields._pad, SI_PAD_SIZE32))
941 		return -EFAULT;
942 
943 	return 0;
944 }
945 #endif /* CONFIG_PPC64 */
946 
947 /*
948  * Set up a signal frame for a "real-time" signal handler
949  * (one which gets siginfo).
950  */
951 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
952 		       struct task_struct *tsk)
953 {
954 	struct rt_sigframe __user *rt_sf;
955 	struct mcontext __user *frame;
956 	struct mcontext __user *tm_frame = NULL;
957 	void __user *addr;
958 	unsigned long newsp = 0;
959 	int sigret;
960 	unsigned long tramp;
961 	struct pt_regs *regs = tsk->thread.regs;
962 
963 	BUG_ON(tsk != current);
964 
965 	/* Set up Signal Frame */
966 	/* Put a Real Time Context onto stack */
967 	rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
968 	addr = rt_sf;
969 	if (unlikely(rt_sf == NULL))
970 		goto badframe;
971 
972 	/* Put the siginfo & fill in most of the ucontext */
973 	if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
974 	    || __put_user(0, &rt_sf->uc.uc_flags)
975 	    || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
976 	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
977 		    &rt_sf->uc.uc_regs)
978 	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
979 		goto badframe;
980 
981 	/* Save user registers on the stack */
982 	frame = &rt_sf->uc.uc_mcontext;
983 	addr = frame;
984 	if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
985 		sigret = 0;
986 		tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
987 	} else {
988 		sigret = __NR_rt_sigreturn;
989 		tramp = (unsigned long) frame->tramp;
990 	}
991 
992 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
993 	tm_frame = &rt_sf->uc_transact.uc_mcontext;
994 	if (MSR_TM_ACTIVE(regs->msr)) {
995 		if (__put_user((unsigned long)&rt_sf->uc_transact,
996 			       &rt_sf->uc.uc_link) ||
997 		    __put_user((unsigned long)tm_frame,
998 			       &rt_sf->uc_transact.uc_regs))
999 			goto badframe;
1000 		if (save_tm_user_regs(regs, frame, tm_frame, sigret))
1001 			goto badframe;
1002 	}
1003 	else
1004 #endif
1005 	{
1006 		if (__put_user(0, &rt_sf->uc.uc_link))
1007 			goto badframe;
1008 		if (save_user_regs(regs, frame, tm_frame, sigret, 1))
1009 			goto badframe;
1010 	}
1011 	regs->link = tramp;
1012 
1013 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
1014 
1015 	/* create a stack frame for the caller of the handler */
1016 	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
1017 	addr = (void __user *)regs->gpr[1];
1018 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1019 		goto badframe;
1020 
1021 	/* Fill registers for signal handler */
1022 	regs->gpr[1] = newsp;
1023 	regs->gpr[3] = ksig->sig;
1024 	regs->gpr[4] = (unsigned long) &rt_sf->info;
1025 	regs->gpr[5] = (unsigned long) &rt_sf->uc;
1026 	regs->gpr[6] = (unsigned long) rt_sf;
1027 	regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
1028 	/* enter the signal handler in native-endian mode */
1029 	regs->msr &= ~MSR_LE;
1030 	regs->msr |= (MSR_KERNEL & MSR_LE);
1031 	return 0;
1032 
1033 badframe:
1034 	if (show_unhandled_signals)
1035 		printk_ratelimited(KERN_INFO
1036 				   "%s[%d]: bad frame in handle_rt_signal32: "
1037 				   "%p nip %08lx lr %08lx\n",
1038 				   tsk->comm, tsk->pid,
1039 				   addr, regs->nip, regs->link);
1040 
1041 	return 1;
1042 }
1043 
1044 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
1045 {
1046 	sigset_t set;
1047 	struct mcontext __user *mcp;
1048 
1049 	if (get_sigset_t(&set, &ucp->uc_sigmask))
1050 		return -EFAULT;
1051 #ifdef CONFIG_PPC64
1052 	{
1053 		u32 cmcp;
1054 
1055 		if (__get_user(cmcp, &ucp->uc_regs))
1056 			return -EFAULT;
1057 		mcp = (struct mcontext __user *)(u64)cmcp;
1058 		/* no need to check access_ok(mcp), since mcp < 4GB */
1059 	}
1060 #else
1061 	if (__get_user(mcp, &ucp->uc_regs))
1062 		return -EFAULT;
1063 	if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
1064 		return -EFAULT;
1065 #endif
1066 	set_current_blocked(&set);
1067 	if (restore_user_regs(regs, mcp, sig))
1068 		return -EFAULT;
1069 
1070 	return 0;
1071 }
1072 
1073 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1074 static int do_setcontext_tm(struct ucontext __user *ucp,
1075 			    struct ucontext __user *tm_ucp,
1076 			    struct pt_regs *regs)
1077 {
1078 	sigset_t set;
1079 	struct mcontext __user *mcp;
1080 	struct mcontext __user *tm_mcp;
1081 	u32 cmcp;
1082 	u32 tm_cmcp;
1083 
1084 	if (get_sigset_t(&set, &ucp->uc_sigmask))
1085 		return -EFAULT;
1086 
1087 	if (__get_user(cmcp, &ucp->uc_regs) ||
1088 	    __get_user(tm_cmcp, &tm_ucp->uc_regs))
1089 		return -EFAULT;
1090 	mcp = (struct mcontext __user *)(u64)cmcp;
1091 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1092 	/* no need to check access_ok(mcp), since mcp < 4GB */
1093 
1094 	set_current_blocked(&set);
1095 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
1096 		return -EFAULT;
1097 
1098 	return 0;
1099 }
1100 #endif
1101 
1102 long sys_swapcontext(struct ucontext __user *old_ctx,
1103 		     struct ucontext __user *new_ctx,
1104 		     int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
1105 {
1106 	unsigned char tmp;
1107 	int ctx_has_vsx_region = 0;
1108 
1109 #ifdef CONFIG_PPC64
1110 	unsigned long new_msr = 0;
1111 
1112 	if (new_ctx) {
1113 		struct mcontext __user *mcp;
1114 		u32 cmcp;
1115 
1116 		/*
1117 		 * Get pointer to the real mcontext.  No need for
1118 		 * access_ok since we are dealing with compat
1119 		 * pointers.
1120 		 */
1121 		if (__get_user(cmcp, &new_ctx->uc_regs))
1122 			return -EFAULT;
1123 		mcp = (struct mcontext __user *)(u64)cmcp;
1124 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1125 			return -EFAULT;
1126 	}
1127 	/*
1128 	 * Check that the context is not smaller than the original
1129 	 * size (with VMX but without VSX)
1130 	 */
1131 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1132 		return -EINVAL;
1133 	/*
1134 	 * If the new context state sets the MSR VSX bits but
1135 	 * it doesn't provide VSX state.
1136 	 */
1137 	if ((ctx_size < sizeof(struct ucontext)) &&
1138 	    (new_msr & MSR_VSX))
1139 		return -EINVAL;
1140 	/* Does the context have enough room to store VSX data? */
1141 	if (ctx_size >= sizeof(struct ucontext))
1142 		ctx_has_vsx_region = 1;
1143 #else
1144 	/* Context size is for future use. Right now, we only make sure
1145 	 * we are passed something we understand
1146 	 */
1147 	if (ctx_size < sizeof(struct ucontext))
1148 		return -EINVAL;
1149 #endif
1150 	if (old_ctx != NULL) {
1151 		struct mcontext __user *mctx;
1152 
1153 		/*
1154 		 * old_ctx might not be 16-byte aligned, in which
1155 		 * case old_ctx->uc_mcontext won't be either.
1156 		 * Because we have the old_ctx->uc_pad2 field
1157 		 * before old_ctx->uc_mcontext, we need to round down
1158 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1159 		 */
1160 		mctx = (struct mcontext __user *)
1161 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1162 		if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
1163 		    || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1164 		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
1165 		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1166 			return -EFAULT;
1167 	}
1168 	if (new_ctx == NULL)
1169 		return 0;
1170 	if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
1171 	    || __get_user(tmp, (u8 __user *) new_ctx)
1172 	    || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
1173 		return -EFAULT;
1174 
1175 	/*
1176 	 * If we get a fault copying the context into the kernel's
1177 	 * image of the user's registers, we can't just return -EFAULT
1178 	 * because the user's registers will be corrupted.  For instance
1179 	 * the NIP value may have been updated but not some of the
1180 	 * other registers.  Given that we have done the access_ok
1181 	 * and successfully read the first and last bytes of the region
1182 	 * above, this should only happen in an out-of-memory situation
1183 	 * or if another thread unmaps the region containing the context.
1184 	 * We kill the task with a SIGSEGV in this situation.
1185 	 */
1186 	if (do_setcontext(new_ctx, regs, 0))
1187 		do_exit(SIGSEGV);
1188 
1189 	set_thread_flag(TIF_RESTOREALL);
1190 	return 0;
1191 }
1192 
1193 long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1194 		     struct pt_regs *regs)
1195 {
1196 	struct rt_sigframe __user *rt_sf;
1197 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1198 	struct ucontext __user *uc_transact;
1199 	unsigned long msr_hi;
1200 	unsigned long tmp;
1201 	int tm_restore = 0;
1202 #endif
1203 	/* Always make any pending restarted system calls return -EINTR */
1204 	current->restart_block.fn = do_no_restart_syscall;
1205 
1206 	rt_sf = (struct rt_sigframe __user *)
1207 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1208 	if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1209 		goto bad;
1210 
1211 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1212 	/*
1213 	 * If there is a transactional state then throw it away.
1214 	 * The purpose of a sigreturn is to destroy all traces of the
1215 	 * signal frame, this includes any transactional state created
1216 	 * within in. We only check for suspended as we can never be
1217 	 * active in the kernel, we are active, there is nothing better to
1218 	 * do than go ahead and Bad Thing later.
1219 	 * The cause is not important as there will never be a
1220 	 * recheckpoint so it's not user visible.
1221 	 */
1222 	if (MSR_TM_SUSPENDED(mfmsr()))
1223 		tm_reclaim_current(0);
1224 
1225 	if (__get_user(tmp, &rt_sf->uc.uc_link))
1226 		goto bad;
1227 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1228 	if (uc_transact) {
1229 		u32 cmcp;
1230 		struct mcontext __user *mcp;
1231 
1232 		if (__get_user(cmcp, &uc_transact->uc_regs))
1233 			return -EFAULT;
1234 		mcp = (struct mcontext __user *)(u64)cmcp;
1235 		/* The top 32 bits of the MSR are stashed in the transactional
1236 		 * ucontext. */
1237 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1238 			goto bad;
1239 
1240 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1241 			/* We only recheckpoint on return if we're
1242 			 * transaction.
1243 			 */
1244 			tm_restore = 1;
1245 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1246 				goto bad;
1247 		}
1248 	}
1249 	if (!tm_restore)
1250 		/* Fall through, for non-TM restore */
1251 #endif
1252 	if (do_setcontext(&rt_sf->uc, regs, 1))
1253 		goto bad;
1254 
1255 	/*
1256 	 * It's not clear whether or why it is desirable to save the
1257 	 * sigaltstack setting on signal delivery and restore it on
1258 	 * signal return.  But other architectures do this and we have
1259 	 * always done it up until now so it is probably better not to
1260 	 * change it.  -- paulus
1261 	 */
1262 #ifdef CONFIG_PPC64
1263 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1264 		goto bad;
1265 #else
1266 	if (restore_altstack(&rt_sf->uc.uc_stack))
1267 		goto bad;
1268 #endif
1269 	set_thread_flag(TIF_RESTOREALL);
1270 	return 0;
1271 
1272  bad:
1273 	if (show_unhandled_signals)
1274 		printk_ratelimited(KERN_INFO
1275 				   "%s[%d]: bad frame in sys_rt_sigreturn: "
1276 				   "%p nip %08lx lr %08lx\n",
1277 				   current->comm, current->pid,
1278 				   rt_sf, regs->nip, regs->link);
1279 
1280 	force_sig(SIGSEGV, current);
1281 	return 0;
1282 }
1283 
1284 #ifdef CONFIG_PPC32
1285 int sys_debug_setcontext(struct ucontext __user *ctx,
1286 			 int ndbg, struct sig_dbg_op __user *dbg,
1287 			 int r6, int r7, int r8,
1288 			 struct pt_regs *regs)
1289 {
1290 	struct sig_dbg_op op;
1291 	int i;
1292 	unsigned char tmp;
1293 	unsigned long new_msr = regs->msr;
1294 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1295 	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1296 #endif
1297 
1298 	for (i=0; i<ndbg; i++) {
1299 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1300 			return -EFAULT;
1301 		switch (op.dbg_type) {
1302 		case SIG_DBG_SINGLE_STEPPING:
1303 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1304 			if (op.dbg_value) {
1305 				new_msr |= MSR_DE;
1306 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1307 			} else {
1308 				new_dbcr0 &= ~DBCR0_IC;
1309 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1310 						current->thread.debug.dbcr1)) {
1311 					new_msr &= ~MSR_DE;
1312 					new_dbcr0 &= ~DBCR0_IDM;
1313 				}
1314 			}
1315 #else
1316 			if (op.dbg_value)
1317 				new_msr |= MSR_SE;
1318 			else
1319 				new_msr &= ~MSR_SE;
1320 #endif
1321 			break;
1322 		case SIG_DBG_BRANCH_TRACING:
1323 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1324 			return -EINVAL;
1325 #else
1326 			if (op.dbg_value)
1327 				new_msr |= MSR_BE;
1328 			else
1329 				new_msr &= ~MSR_BE;
1330 #endif
1331 			break;
1332 
1333 		default:
1334 			return -EINVAL;
1335 		}
1336 	}
1337 
1338 	/* We wait until here to actually install the values in the
1339 	   registers so if we fail in the above loop, it will not
1340 	   affect the contents of these registers.  After this point,
1341 	   failure is a problem, anyway, and it's very unlikely unless
1342 	   the user is really doing something wrong. */
1343 	regs->msr = new_msr;
1344 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1345 	current->thread.debug.dbcr0 = new_dbcr0;
1346 #endif
1347 
1348 	if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1349 	    || __get_user(tmp, (u8 __user *) ctx)
1350 	    || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1351 		return -EFAULT;
1352 
1353 	/*
1354 	 * If we get a fault copying the context into the kernel's
1355 	 * image of the user's registers, we can't just return -EFAULT
1356 	 * because the user's registers will be corrupted.  For instance
1357 	 * the NIP value may have been updated but not some of the
1358 	 * other registers.  Given that we have done the access_ok
1359 	 * and successfully read the first and last bytes of the region
1360 	 * above, this should only happen in an out-of-memory situation
1361 	 * or if another thread unmaps the region containing the context.
1362 	 * We kill the task with a SIGSEGV in this situation.
1363 	 */
1364 	if (do_setcontext(ctx, regs, 1)) {
1365 		if (show_unhandled_signals)
1366 			printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1367 					   "sys_debug_setcontext: %p nip %08lx "
1368 					   "lr %08lx\n",
1369 					   current->comm, current->pid,
1370 					   ctx, regs->nip, regs->link);
1371 
1372 		force_sig(SIGSEGV, current);
1373 		goto out;
1374 	}
1375 
1376 	/*
1377 	 * It's not clear whether or why it is desirable to save the
1378 	 * sigaltstack setting on signal delivery and restore it on
1379 	 * signal return.  But other architectures do this and we have
1380 	 * always done it up until now so it is probably better not to
1381 	 * change it.  -- paulus
1382 	 */
1383 	restore_altstack(&ctx->uc_stack);
1384 
1385 	set_thread_flag(TIF_RESTOREALL);
1386  out:
1387 	return 0;
1388 }
1389 #endif
1390 
1391 /*
1392  * OK, we're invoking a handler
1393  */
1394 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1395 		struct task_struct *tsk)
1396 {
1397 	struct sigcontext __user *sc;
1398 	struct sigframe __user *frame;
1399 	struct mcontext __user *tm_mctx = NULL;
1400 	unsigned long newsp = 0;
1401 	int sigret;
1402 	unsigned long tramp;
1403 	struct pt_regs *regs = tsk->thread.regs;
1404 
1405 	BUG_ON(tsk != current);
1406 
1407 	/* Set up Signal Frame */
1408 	frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1409 	if (unlikely(frame == NULL))
1410 		goto badframe;
1411 	sc = (struct sigcontext __user *) &frame->sctx;
1412 
1413 #if _NSIG != 64
1414 #error "Please adjust handle_signal()"
1415 #endif
1416 	if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1417 	    || __put_user(oldset->sig[0], &sc->oldmask)
1418 #ifdef CONFIG_PPC64
1419 	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1420 #else
1421 	    || __put_user(oldset->sig[1], &sc->_unused[3])
1422 #endif
1423 	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1424 	    || __put_user(ksig->sig, &sc->signal))
1425 		goto badframe;
1426 
1427 	if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1428 		sigret = 0;
1429 		tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1430 	} else {
1431 		sigret = __NR_sigreturn;
1432 		tramp = (unsigned long) frame->mctx.tramp;
1433 	}
1434 
1435 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1436 	tm_mctx = &frame->mctx_transact;
1437 	if (MSR_TM_ACTIVE(regs->msr)) {
1438 		if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1439 				      sigret))
1440 			goto badframe;
1441 	}
1442 	else
1443 #endif
1444 	{
1445 		if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1446 			goto badframe;
1447 	}
1448 
1449 	regs->link = tramp;
1450 
1451 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
1452 
1453 	/* create a stack frame for the caller of the handler */
1454 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1455 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1456 		goto badframe;
1457 
1458 	regs->gpr[1] = newsp;
1459 	regs->gpr[3] = ksig->sig;
1460 	regs->gpr[4] = (unsigned long) sc;
1461 	regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1462 	/* enter the signal handler in big-endian mode */
1463 	regs->msr &= ~MSR_LE;
1464 	return 0;
1465 
1466 badframe:
1467 	if (show_unhandled_signals)
1468 		printk_ratelimited(KERN_INFO
1469 				   "%s[%d]: bad frame in handle_signal32: "
1470 				   "%p nip %08lx lr %08lx\n",
1471 				   tsk->comm, tsk->pid,
1472 				   frame, regs->nip, regs->link);
1473 
1474 	return 1;
1475 }
1476 
1477 /*
1478  * Do a signal return; undo the signal stack.
1479  */
1480 long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1481 		       struct pt_regs *regs)
1482 {
1483 	struct sigframe __user *sf;
1484 	struct sigcontext __user *sc;
1485 	struct sigcontext sigctx;
1486 	struct mcontext __user *sr;
1487 	void __user *addr;
1488 	sigset_t set;
1489 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1490 	struct mcontext __user *mcp, *tm_mcp;
1491 	unsigned long msr_hi;
1492 #endif
1493 
1494 	/* Always make any pending restarted system calls return -EINTR */
1495 	current->restart_block.fn = do_no_restart_syscall;
1496 
1497 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1498 	sc = &sf->sctx;
1499 	addr = sc;
1500 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1501 		goto badframe;
1502 
1503 #ifdef CONFIG_PPC64
1504 	/*
1505 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1506 	 * unused part of the signal stackframe
1507 	 */
1508 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1509 #else
1510 	set.sig[0] = sigctx.oldmask;
1511 	set.sig[1] = sigctx._unused[3];
1512 #endif
1513 	set_current_blocked(&set);
1514 
1515 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1516 	mcp = (struct mcontext __user *)&sf->mctx;
1517 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1518 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1519 		goto badframe;
1520 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
1521 		if (!cpu_has_feature(CPU_FTR_TM))
1522 			goto badframe;
1523 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
1524 			goto badframe;
1525 	} else
1526 #endif
1527 	{
1528 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1529 		addr = sr;
1530 		if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1531 		    || restore_user_regs(regs, sr, 1))
1532 			goto badframe;
1533 	}
1534 
1535 	set_thread_flag(TIF_RESTOREALL);
1536 	return 0;
1537 
1538 badframe:
1539 	if (show_unhandled_signals)
1540 		printk_ratelimited(KERN_INFO
1541 				   "%s[%d]: bad frame in sys_sigreturn: "
1542 				   "%p nip %08lx lr %08lx\n",
1543 				   current->comm, current->pid,
1544 				   addr, regs->nip, regs->link);
1545 
1546 	force_sig(SIGSEGV, current);
1547 	return 0;
1548 }
1549