xref: /openbmc/linux/arch/powerpc/kernel/signal_32.c (revision 2e7c04aec86758e0adfcad4a24c86593b45807a3)
1 /*
2  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3  *
4  *  PowerPC version
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  * Copyright (C) 2001 IBM
7  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9  *
10  *  Derived from "arch/i386/kernel/signal.c"
11  *    Copyright (C) 1991, 1992 Linus Torvalds
12  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
13  *
14  *  This program is free software; you can redistribute it and/or
15  *  modify it under the terms of the GNU General Public License
16  *  as published by the Free Software Foundation; either version
17  *  2 of the License, or (at your option) any later version.
18  */
19 
20 #include <linux/sched.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/kernel.h>
24 #include <linux/signal.h>
25 #include <linux/errno.h>
26 #include <linux/elf.h>
27 #include <linux/ptrace.h>
28 #include <linux/pagemap.h>
29 #include <linux/ratelimit.h>
30 #include <linux/syscalls.h>
31 #ifdef CONFIG_PPC64
32 #include <linux/compat.h>
33 #else
34 #include <linux/wait.h>
35 #include <linux/unistd.h>
36 #include <linux/stddef.h>
37 #include <linux/tty.h>
38 #include <linux/binfmts.h>
39 #endif
40 
41 #include <linux/uaccess.h>
42 #include <asm/cacheflush.h>
43 #include <asm/syscalls.h>
44 #include <asm/sigcontext.h>
45 #include <asm/vdso.h>
46 #include <asm/switch_to.h>
47 #include <asm/tm.h>
48 #include <asm/asm-prototypes.h>
49 #ifdef CONFIG_PPC64
50 #include "ppc32.h"
51 #include <asm/unistd.h>
52 #else
53 #include <asm/ucontext.h>
54 #include <asm/pgtable.h>
55 #endif
56 
57 #include "signal.h"
58 
59 
60 #ifdef CONFIG_PPC64
61 #define old_sigaction	old_sigaction32
62 #define sigcontext	sigcontext32
63 #define mcontext	mcontext32
64 #define ucontext	ucontext32
65 
66 #define __save_altstack __compat_save_altstack
67 
68 /*
69  * Userspace code may pass a ucontext which doesn't include VSX added
70  * at the end.  We need to check for this case.
71  */
72 #define UCONTEXTSIZEWITHOUTVSX \
73 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
74 
75 /*
76  * Returning 0 means we return to userspace via
77  * ret_from_except and thus restore all user
78  * registers from *regs.  This is what we need
79  * to do when a signal has been delivered.
80  */
81 
82 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
83 #undef __SIGNAL_FRAMESIZE
84 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
85 #undef ELF_NVRREG
86 #define ELF_NVRREG	ELF_NVRREG32
87 
88 /*
89  * Functions for flipping sigsets (thanks to brain dead generic
90  * implementation that makes things simple for little endian only)
91  */
92 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
93 {
94 	return put_compat_sigset(uset, set, sizeof(*uset));
95 }
96 
97 static inline int get_sigset_t(sigset_t *set,
98 			       const compat_sigset_t __user *uset)
99 {
100 	return get_compat_sigset(set, uset);
101 }
102 
103 #define to_user_ptr(p)		ptr_to_compat(p)
104 #define from_user_ptr(p)	compat_ptr(p)
105 
106 static inline int save_general_regs(struct pt_regs *regs,
107 		struct mcontext __user *frame)
108 {
109 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
110 	int i;
111 	/* Force usr to alway see softe as 1 (interrupts enabled) */
112 	elf_greg_t64 softe = 0x1;
113 
114 	WARN_ON(!FULL_REGS(regs));
115 
116 	for (i = 0; i <= PT_RESULT; i ++) {
117 		if (i == 14 && !FULL_REGS(regs))
118 			i = 32;
119 		if ( i == PT_SOFTE) {
120 			if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
121 				return -EFAULT;
122 			else
123 				continue;
124 		}
125 		if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
126 			return -EFAULT;
127 	}
128 	return 0;
129 }
130 
131 static inline int restore_general_regs(struct pt_regs *regs,
132 		struct mcontext __user *sr)
133 {
134 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
135 	int i;
136 
137 	for (i = 0; i <= PT_RESULT; i++) {
138 		if ((i == PT_MSR) || (i == PT_SOFTE))
139 			continue;
140 		if (__get_user(gregs[i], &sr->mc_gregs[i]))
141 			return -EFAULT;
142 	}
143 	return 0;
144 }
145 
146 #else /* CONFIG_PPC64 */
147 
148 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
149 
150 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
151 {
152 	return copy_to_user(uset, set, sizeof(*uset));
153 }
154 
155 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
156 {
157 	return copy_from_user(set, uset, sizeof(*uset));
158 }
159 
160 #define to_user_ptr(p)		((unsigned long)(p))
161 #define from_user_ptr(p)	((void __user *)(p))
162 
163 static inline int save_general_regs(struct pt_regs *regs,
164 		struct mcontext __user *frame)
165 {
166 	WARN_ON(!FULL_REGS(regs));
167 	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
168 }
169 
170 static inline int restore_general_regs(struct pt_regs *regs,
171 		struct mcontext __user *sr)
172 {
173 	/* copy up to but not including MSR */
174 	if (__copy_from_user(regs, &sr->mc_gregs,
175 				PT_MSR * sizeof(elf_greg_t)))
176 		return -EFAULT;
177 	/* copy from orig_r3 (the word after the MSR) up to the end */
178 	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
179 				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
180 		return -EFAULT;
181 	return 0;
182 }
183 #endif
184 
185 /*
186  * When we have signals to deliver, we set up on the
187  * user stack, going down from the original stack pointer:
188  *	an ABI gap of 56 words
189  *	an mcontext struct
190  *	a sigcontext struct
191  *	a gap of __SIGNAL_FRAMESIZE bytes
192  *
193  * Each of these things must be a multiple of 16 bytes in size. The following
194  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
195  *
196  */
197 struct sigframe {
198 	struct sigcontext sctx;		/* the sigcontext */
199 	struct mcontext	mctx;		/* all the register values */
200 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
201 	struct sigcontext sctx_transact;
202 	struct mcontext	mctx_transact;
203 #endif
204 	/*
205 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
206 	 * regs and 18 fp regs below sp before decrementing it.
207 	 */
208 	int			abigap[56];
209 };
210 
211 /* We use the mc_pad field for the signal return trampoline. */
212 #define tramp	mc_pad
213 
214 /*
215  *  When we have rt signals to deliver, we set up on the
216  *  user stack, going down from the original stack pointer:
217  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
218  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
219  *  (the +16 is to get the siginfo and ucontext in the same
220  *  positions as in older kernels).
221  *
222  *  Each of these things must be a multiple of 16 bytes in size.
223  *
224  */
225 struct rt_sigframe {
226 #ifdef CONFIG_PPC64
227 	compat_siginfo_t info;
228 #else
229 	struct siginfo info;
230 #endif
231 	struct ucontext	uc;
232 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
233 	struct ucontext	uc_transact;
234 #endif
235 	/*
236 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
237 	 * regs and 18 fp regs below sp before decrementing it.
238 	 */
239 	int			abigap[56];
240 };
241 
242 #ifdef CONFIG_VSX
243 unsigned long copy_fpr_to_user(void __user *to,
244 			       struct task_struct *task)
245 {
246 	u64 buf[ELF_NFPREG];
247 	int i;
248 
249 	/* save FPR copy to local buffer then write to the thread_struct */
250 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
251 		buf[i] = task->thread.TS_FPR(i);
252 	buf[i] = task->thread.fp_state.fpscr;
253 	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
254 }
255 
256 unsigned long copy_fpr_from_user(struct task_struct *task,
257 				 void __user *from)
258 {
259 	u64 buf[ELF_NFPREG];
260 	int i;
261 
262 	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
263 		return 1;
264 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
265 		task->thread.TS_FPR(i) = buf[i];
266 	task->thread.fp_state.fpscr = buf[i];
267 
268 	return 0;
269 }
270 
271 unsigned long copy_vsx_to_user(void __user *to,
272 			       struct task_struct *task)
273 {
274 	u64 buf[ELF_NVSRHALFREG];
275 	int i;
276 
277 	/* save FPR copy to local buffer then write to the thread_struct */
278 	for (i = 0; i < ELF_NVSRHALFREG; i++)
279 		buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
280 	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
281 }
282 
283 unsigned long copy_vsx_from_user(struct task_struct *task,
284 				 void __user *from)
285 {
286 	u64 buf[ELF_NVSRHALFREG];
287 	int i;
288 
289 	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
290 		return 1;
291 	for (i = 0; i < ELF_NVSRHALFREG ; i++)
292 		task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
293 	return 0;
294 }
295 
296 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
297 unsigned long copy_ckfpr_to_user(void __user *to,
298 				  struct task_struct *task)
299 {
300 	u64 buf[ELF_NFPREG];
301 	int i;
302 
303 	/* save FPR copy to local buffer then write to the thread_struct */
304 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
305 		buf[i] = task->thread.TS_CKFPR(i);
306 	buf[i] = task->thread.ckfp_state.fpscr;
307 	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
308 }
309 
310 unsigned long copy_ckfpr_from_user(struct task_struct *task,
311 					  void __user *from)
312 {
313 	u64 buf[ELF_NFPREG];
314 	int i;
315 
316 	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
317 		return 1;
318 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
319 		task->thread.TS_CKFPR(i) = buf[i];
320 	task->thread.ckfp_state.fpscr = buf[i];
321 
322 	return 0;
323 }
324 
325 unsigned long copy_ckvsx_to_user(void __user *to,
326 				  struct task_struct *task)
327 {
328 	u64 buf[ELF_NVSRHALFREG];
329 	int i;
330 
331 	/* save FPR copy to local buffer then write to the thread_struct */
332 	for (i = 0; i < ELF_NVSRHALFREG; i++)
333 		buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
334 	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
335 }
336 
337 unsigned long copy_ckvsx_from_user(struct task_struct *task,
338 					  void __user *from)
339 {
340 	u64 buf[ELF_NVSRHALFREG];
341 	int i;
342 
343 	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
344 		return 1;
345 	for (i = 0; i < ELF_NVSRHALFREG ; i++)
346 		task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
347 	return 0;
348 }
349 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
350 #else
351 inline unsigned long copy_fpr_to_user(void __user *to,
352 				      struct task_struct *task)
353 {
354 	return __copy_to_user(to, task->thread.fp_state.fpr,
355 			      ELF_NFPREG * sizeof(double));
356 }
357 
358 inline unsigned long copy_fpr_from_user(struct task_struct *task,
359 					void __user *from)
360 {
361 	return __copy_from_user(task->thread.fp_state.fpr, from,
362 			      ELF_NFPREG * sizeof(double));
363 }
364 
365 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
366 inline unsigned long copy_ckfpr_to_user(void __user *to,
367 					 struct task_struct *task)
368 {
369 	return __copy_to_user(to, task->thread.ckfp_state.fpr,
370 			      ELF_NFPREG * sizeof(double));
371 }
372 
373 inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
374 						 void __user *from)
375 {
376 	return __copy_from_user(task->thread.ckfp_state.fpr, from,
377 				ELF_NFPREG * sizeof(double));
378 }
379 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
380 #endif
381 
382 /*
383  * Save the current user registers on the user stack.
384  * We only save the altivec/spe registers if the process has used
385  * altivec/spe instructions at some point.
386  */
387 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
388 			  struct mcontext __user *tm_frame, int sigret,
389 			  int ctx_has_vsx_region)
390 {
391 	unsigned long msr = regs->msr;
392 
393 	/* Make sure floating point registers are stored in regs */
394 	flush_fp_to_thread(current);
395 
396 	/* save general registers */
397 	if (save_general_regs(regs, frame))
398 		return 1;
399 
400 #ifdef CONFIG_ALTIVEC
401 	/* save altivec registers */
402 	if (current->thread.used_vr) {
403 		flush_altivec_to_thread(current);
404 		if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
405 				   ELF_NVRREG * sizeof(vector128)))
406 			return 1;
407 		/* set MSR_VEC in the saved MSR value to indicate that
408 		   frame->mc_vregs contains valid data */
409 		msr |= MSR_VEC;
410 	}
411 	/* else assert((regs->msr & MSR_VEC) == 0) */
412 
413 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
414 	 * use altivec. Since VSCR only contains 32 bits saved in the least
415 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
416 	 * most significant bits of that same vector. --BenH
417 	 * Note that the current VRSAVE value is in the SPR at this point.
418 	 */
419 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
420 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
421 	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
422 		return 1;
423 #endif /* CONFIG_ALTIVEC */
424 	if (copy_fpr_to_user(&frame->mc_fregs, current))
425 		return 1;
426 
427 	/*
428 	 * Clear the MSR VSX bit to indicate there is no valid state attached
429 	 * to this context, except in the specific case below where we set it.
430 	 */
431 	msr &= ~MSR_VSX;
432 #ifdef CONFIG_VSX
433 	/*
434 	 * Copy VSR 0-31 upper half from thread_struct to local
435 	 * buffer, then write that to userspace.  Also set MSR_VSX in
436 	 * the saved MSR value to indicate that frame->mc_vregs
437 	 * contains valid data
438 	 */
439 	if (current->thread.used_vsr && ctx_has_vsx_region) {
440 		flush_vsx_to_thread(current);
441 		if (copy_vsx_to_user(&frame->mc_vsregs, current))
442 			return 1;
443 		msr |= MSR_VSX;
444 	}
445 #endif /* CONFIG_VSX */
446 #ifdef CONFIG_SPE
447 	/* save spe registers */
448 	if (current->thread.used_spe) {
449 		flush_spe_to_thread(current);
450 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
451 				   ELF_NEVRREG * sizeof(u32)))
452 			return 1;
453 		/* set MSR_SPE in the saved MSR value to indicate that
454 		   frame->mc_vregs contains valid data */
455 		msr |= MSR_SPE;
456 	}
457 	/* else assert((regs->msr & MSR_SPE) == 0) */
458 
459 	/* We always copy to/from spefscr */
460 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
461 		return 1;
462 #endif /* CONFIG_SPE */
463 
464 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
465 		return 1;
466 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
467 	 * can check it on the restore to see if TM is active
468 	 */
469 	if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
470 		return 1;
471 
472 	if (sigret) {
473 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
474 		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
475 		    || __put_user(0x44000002UL, &frame->tramp[1]))
476 			return 1;
477 		flush_icache_range((unsigned long) &frame->tramp[0],
478 				   (unsigned long) &frame->tramp[2]);
479 	}
480 
481 	return 0;
482 }
483 
484 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
485 /*
486  * Save the current user registers on the user stack.
487  * We only save the altivec/spe registers if the process has used
488  * altivec/spe instructions at some point.
489  * We also save the transactional registers to a second ucontext in the
490  * frame.
491  *
492  * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
493  */
494 static int save_tm_user_regs(struct pt_regs *regs,
495 			     struct mcontext __user *frame,
496 			     struct mcontext __user *tm_frame, int sigret)
497 {
498 	unsigned long msr = regs->msr;
499 
500 	WARN_ON(tm_suspend_disabled);
501 
502 	/* Remove TM bits from thread's MSR.  The MSR in the sigcontext
503 	 * just indicates to userland that we were doing a transaction, but we
504 	 * don't want to return in transactional state.  This also ensures
505 	 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
506 	 */
507 	regs->msr &= ~MSR_TS_MASK;
508 
509 	/* Save both sets of general registers */
510 	if (save_general_regs(&current->thread.ckpt_regs, frame)
511 	    || save_general_regs(regs, tm_frame))
512 		return 1;
513 
514 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
515 	 * of the transactional mcontext.  This way we have a backward-compatible
516 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
517 	 * also look at what type of transaction (T or S) was active at the
518 	 * time of the signal.
519 	 */
520 	if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
521 		return 1;
522 
523 #ifdef CONFIG_ALTIVEC
524 	/* save altivec registers */
525 	if (current->thread.used_vr) {
526 		if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
527 				   ELF_NVRREG * sizeof(vector128)))
528 			return 1;
529 		if (msr & MSR_VEC) {
530 			if (__copy_to_user(&tm_frame->mc_vregs,
531 					   &current->thread.vr_state,
532 					   ELF_NVRREG * sizeof(vector128)))
533 				return 1;
534 		} else {
535 			if (__copy_to_user(&tm_frame->mc_vregs,
536 					   &current->thread.ckvr_state,
537 					   ELF_NVRREG * sizeof(vector128)))
538 				return 1;
539 		}
540 
541 		/* set MSR_VEC in the saved MSR value to indicate that
542 		 * frame->mc_vregs contains valid data
543 		 */
544 		msr |= MSR_VEC;
545 	}
546 
547 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
548 	 * use altivec. Since VSCR only contains 32 bits saved in the least
549 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
550 	 * most significant bits of that same vector. --BenH
551 	 */
552 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
553 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
554 	if (__put_user(current->thread.ckvrsave,
555 		       (u32 __user *)&frame->mc_vregs[32]))
556 		return 1;
557 	if (msr & MSR_VEC) {
558 		if (__put_user(current->thread.vrsave,
559 			       (u32 __user *)&tm_frame->mc_vregs[32]))
560 			return 1;
561 	} else {
562 		if (__put_user(current->thread.ckvrsave,
563 			       (u32 __user *)&tm_frame->mc_vregs[32]))
564 			return 1;
565 	}
566 #endif /* CONFIG_ALTIVEC */
567 
568 	if (copy_ckfpr_to_user(&frame->mc_fregs, current))
569 		return 1;
570 	if (msr & MSR_FP) {
571 		if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
572 			return 1;
573 	} else {
574 		if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
575 			return 1;
576 	}
577 
578 #ifdef CONFIG_VSX
579 	/*
580 	 * Copy VSR 0-31 upper half from thread_struct to local
581 	 * buffer, then write that to userspace.  Also set MSR_VSX in
582 	 * the saved MSR value to indicate that frame->mc_vregs
583 	 * contains valid data
584 	 */
585 	if (current->thread.used_vsr) {
586 		if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
587 			return 1;
588 		if (msr & MSR_VSX) {
589 			if (copy_vsx_to_user(&tm_frame->mc_vsregs,
590 						      current))
591 				return 1;
592 		} else {
593 			if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
594 				return 1;
595 		}
596 
597 		msr |= MSR_VSX;
598 	}
599 #endif /* CONFIG_VSX */
600 #ifdef CONFIG_SPE
601 	/* SPE regs are not checkpointed with TM, so this section is
602 	 * simply the same as in save_user_regs().
603 	 */
604 	if (current->thread.used_spe) {
605 		flush_spe_to_thread(current);
606 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
607 				   ELF_NEVRREG * sizeof(u32)))
608 			return 1;
609 		/* set MSR_SPE in the saved MSR value to indicate that
610 		 * frame->mc_vregs contains valid data */
611 		msr |= MSR_SPE;
612 	}
613 
614 	/* We always copy to/from spefscr */
615 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
616 		return 1;
617 #endif /* CONFIG_SPE */
618 
619 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
620 		return 1;
621 	if (sigret) {
622 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
623 		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
624 		    || __put_user(0x44000002UL, &frame->tramp[1]))
625 			return 1;
626 		flush_icache_range((unsigned long) &frame->tramp[0],
627 				   (unsigned long) &frame->tramp[2]);
628 	}
629 
630 	return 0;
631 }
632 #endif
633 
634 /*
635  * Restore the current user register values from the user stack,
636  * (except for MSR).
637  */
638 static long restore_user_regs(struct pt_regs *regs,
639 			      struct mcontext __user *sr, int sig)
640 {
641 	long err;
642 	unsigned int save_r2 = 0;
643 	unsigned long msr;
644 #ifdef CONFIG_VSX
645 	int i;
646 #endif
647 
648 	/*
649 	 * restore general registers but not including MSR or SOFTE. Also
650 	 * take care of keeping r2 (TLS) intact if not a signal
651 	 */
652 	if (!sig)
653 		save_r2 = (unsigned int)regs->gpr[2];
654 	err = restore_general_regs(regs, sr);
655 	regs->trap = 0;
656 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
657 	if (!sig)
658 		regs->gpr[2] = (unsigned long) save_r2;
659 	if (err)
660 		return 1;
661 
662 	/* if doing signal return, restore the previous little-endian mode */
663 	if (sig)
664 		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
665 
666 #ifdef CONFIG_ALTIVEC
667 	/*
668 	 * Force the process to reload the altivec registers from
669 	 * current->thread when it next does altivec instructions
670 	 */
671 	regs->msr &= ~MSR_VEC;
672 	if (msr & MSR_VEC) {
673 		/* restore altivec registers from the stack */
674 		if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
675 				     sizeof(sr->mc_vregs)))
676 			return 1;
677 		current->thread.used_vr = true;
678 	} else if (current->thread.used_vr)
679 		memset(&current->thread.vr_state, 0,
680 		       ELF_NVRREG * sizeof(vector128));
681 
682 	/* Always get VRSAVE back */
683 	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
684 		return 1;
685 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
686 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
687 #endif /* CONFIG_ALTIVEC */
688 	if (copy_fpr_from_user(current, &sr->mc_fregs))
689 		return 1;
690 
691 #ifdef CONFIG_VSX
692 	/*
693 	 * Force the process to reload the VSX registers from
694 	 * current->thread when it next does VSX instruction.
695 	 */
696 	regs->msr &= ~MSR_VSX;
697 	if (msr & MSR_VSX) {
698 		/*
699 		 * Restore altivec registers from the stack to a local
700 		 * buffer, then write this out to the thread_struct
701 		 */
702 		if (copy_vsx_from_user(current, &sr->mc_vsregs))
703 			return 1;
704 		current->thread.used_vsr = true;
705 	} else if (current->thread.used_vsr)
706 		for (i = 0; i < 32 ; i++)
707 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
708 #endif /* CONFIG_VSX */
709 	/*
710 	 * force the process to reload the FP registers from
711 	 * current->thread when it next does FP instructions
712 	 */
713 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
714 
715 #ifdef CONFIG_SPE
716 	/* force the process to reload the spe registers from
717 	   current->thread when it next does spe instructions */
718 	regs->msr &= ~MSR_SPE;
719 	if (msr & MSR_SPE) {
720 		/* restore spe registers from the stack */
721 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
722 				     ELF_NEVRREG * sizeof(u32)))
723 			return 1;
724 		current->thread.used_spe = true;
725 	} else if (current->thread.used_spe)
726 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
727 
728 	/* Always get SPEFSCR back */
729 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
730 		return 1;
731 #endif /* CONFIG_SPE */
732 
733 	return 0;
734 }
735 
736 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
737 /*
738  * Restore the current user register values from the user stack, except for
739  * MSR, and recheckpoint the original checkpointed register state for processes
740  * in transactions.
741  */
742 static long restore_tm_user_regs(struct pt_regs *regs,
743 				 struct mcontext __user *sr,
744 				 struct mcontext __user *tm_sr)
745 {
746 	long err;
747 	unsigned long msr, msr_hi;
748 #ifdef CONFIG_VSX
749 	int i;
750 #endif
751 
752 	if (tm_suspend_disabled)
753 		return 1;
754 	/*
755 	 * restore general registers but not including MSR or SOFTE. Also
756 	 * take care of keeping r2 (TLS) intact if not a signal.
757 	 * See comment in signal_64.c:restore_tm_sigcontexts();
758 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
759 	 * were set by the signal delivery.
760 	 */
761 	err = restore_general_regs(regs, tm_sr);
762 	err |= restore_general_regs(&current->thread.ckpt_regs, sr);
763 
764 	err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
765 
766 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
767 	if (err)
768 		return 1;
769 
770 	/* Restore the previous little-endian mode */
771 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
772 
773 #ifdef CONFIG_ALTIVEC
774 	regs->msr &= ~MSR_VEC;
775 	if (msr & MSR_VEC) {
776 		/* restore altivec registers from the stack */
777 		if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
778 				     sizeof(sr->mc_vregs)) ||
779 		    __copy_from_user(&current->thread.vr_state,
780 				     &tm_sr->mc_vregs,
781 				     sizeof(sr->mc_vregs)))
782 			return 1;
783 		current->thread.used_vr = true;
784 	} else if (current->thread.used_vr) {
785 		memset(&current->thread.vr_state, 0,
786 		       ELF_NVRREG * sizeof(vector128));
787 		memset(&current->thread.ckvr_state, 0,
788 		       ELF_NVRREG * sizeof(vector128));
789 	}
790 
791 	/* Always get VRSAVE back */
792 	if (__get_user(current->thread.ckvrsave,
793 		       (u32 __user *)&sr->mc_vregs[32]) ||
794 	    __get_user(current->thread.vrsave,
795 		       (u32 __user *)&tm_sr->mc_vregs[32]))
796 		return 1;
797 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
798 		mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
799 #endif /* CONFIG_ALTIVEC */
800 
801 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
802 
803 	if (copy_fpr_from_user(current, &sr->mc_fregs) ||
804 	    copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
805 		return 1;
806 
807 #ifdef CONFIG_VSX
808 	regs->msr &= ~MSR_VSX;
809 	if (msr & MSR_VSX) {
810 		/*
811 		 * Restore altivec registers from the stack to a local
812 		 * buffer, then write this out to the thread_struct
813 		 */
814 		if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
815 		    copy_ckvsx_from_user(current, &sr->mc_vsregs))
816 			return 1;
817 		current->thread.used_vsr = true;
818 	} else if (current->thread.used_vsr)
819 		for (i = 0; i < 32 ; i++) {
820 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
821 			current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
822 		}
823 #endif /* CONFIG_VSX */
824 
825 #ifdef CONFIG_SPE
826 	/* SPE regs are not checkpointed with TM, so this section is
827 	 * simply the same as in restore_user_regs().
828 	 */
829 	regs->msr &= ~MSR_SPE;
830 	if (msr & MSR_SPE) {
831 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
832 				     ELF_NEVRREG * sizeof(u32)))
833 			return 1;
834 		current->thread.used_spe = true;
835 	} else if (current->thread.used_spe)
836 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
837 
838 	/* Always get SPEFSCR back */
839 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
840 		       + ELF_NEVRREG))
841 		return 1;
842 #endif /* CONFIG_SPE */
843 
844 	/* Get the top half of the MSR from the user context */
845 	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
846 		return 1;
847 	msr_hi <<= 32;
848 	/* If TM bits are set to the reserved value, it's an invalid context */
849 	if (MSR_TM_RESV(msr_hi))
850 		return 1;
851 	/* Pull in the MSR TM bits from the user context */
852 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
853 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
854 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
855 	 * transactional versions should be loaded.
856 	 */
857 	tm_enable();
858 	/* Make sure the transaction is marked as failed */
859 	current->thread.tm_texasr |= TEXASR_FS;
860 	/* This loads the checkpointed FP/VEC state, if used */
861 	tm_recheckpoint(&current->thread);
862 
863 	/* This loads the speculative FP/VEC state, if used */
864 	msr_check_and_set(msr & (MSR_FP | MSR_VEC));
865 	if (msr & MSR_FP) {
866 		load_fp_state(&current->thread.fp_state);
867 		regs->msr |= (MSR_FP | current->thread.fpexc_mode);
868 	}
869 #ifdef CONFIG_ALTIVEC
870 	if (msr & MSR_VEC) {
871 		load_vr_state(&current->thread.vr_state);
872 		regs->msr |= MSR_VEC;
873 	}
874 #endif
875 
876 	return 0;
877 }
878 #endif
879 
880 #ifdef CONFIG_PPC64
881 
882 #define copy_siginfo_to_user	copy_siginfo_to_user32
883 
884 #endif /* CONFIG_PPC64 */
885 
886 /*
887  * Set up a signal frame for a "real-time" signal handler
888  * (one which gets siginfo).
889  */
890 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
891 		       struct task_struct *tsk)
892 {
893 	struct rt_sigframe __user *rt_sf;
894 	struct mcontext __user *frame;
895 	struct mcontext __user *tm_frame = NULL;
896 	void __user *addr;
897 	unsigned long newsp = 0;
898 	int sigret;
899 	unsigned long tramp;
900 	struct pt_regs *regs = tsk->thread.regs;
901 
902 	BUG_ON(tsk != current);
903 
904 	/* Set up Signal Frame */
905 	/* Put a Real Time Context onto stack */
906 	rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
907 	addr = rt_sf;
908 	if (unlikely(rt_sf == NULL))
909 		goto badframe;
910 
911 	/* Put the siginfo & fill in most of the ucontext */
912 	if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
913 	    || __put_user(0, &rt_sf->uc.uc_flags)
914 	    || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
915 	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
916 		    &rt_sf->uc.uc_regs)
917 	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
918 		goto badframe;
919 
920 	/* Save user registers on the stack */
921 	frame = &rt_sf->uc.uc_mcontext;
922 	addr = frame;
923 	if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
924 		sigret = 0;
925 		tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
926 	} else {
927 		sigret = __NR_rt_sigreturn;
928 		tramp = (unsigned long) frame->tramp;
929 	}
930 
931 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
932 	tm_frame = &rt_sf->uc_transact.uc_mcontext;
933 	if (MSR_TM_ACTIVE(regs->msr)) {
934 		if (__put_user((unsigned long)&rt_sf->uc_transact,
935 			       &rt_sf->uc.uc_link) ||
936 		    __put_user((unsigned long)tm_frame,
937 			       &rt_sf->uc_transact.uc_regs))
938 			goto badframe;
939 		if (save_tm_user_regs(regs, frame, tm_frame, sigret))
940 			goto badframe;
941 	}
942 	else
943 #endif
944 	{
945 		if (__put_user(0, &rt_sf->uc.uc_link))
946 			goto badframe;
947 		if (save_user_regs(regs, frame, tm_frame, sigret, 1))
948 			goto badframe;
949 	}
950 	regs->link = tramp;
951 
952 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
953 
954 	/* create a stack frame for the caller of the handler */
955 	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
956 	addr = (void __user *)regs->gpr[1];
957 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
958 		goto badframe;
959 
960 	/* Fill registers for signal handler */
961 	regs->gpr[1] = newsp;
962 	regs->gpr[3] = ksig->sig;
963 	regs->gpr[4] = (unsigned long) &rt_sf->info;
964 	regs->gpr[5] = (unsigned long) &rt_sf->uc;
965 	regs->gpr[6] = (unsigned long) rt_sf;
966 	regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
967 	/* enter the signal handler in native-endian mode */
968 	regs->msr &= ~MSR_LE;
969 	regs->msr |= (MSR_KERNEL & MSR_LE);
970 	return 0;
971 
972 badframe:
973 	if (show_unhandled_signals)
974 		printk_ratelimited(KERN_INFO
975 				   "%s[%d]: bad frame in handle_rt_signal32: "
976 				   "%p nip %08lx lr %08lx\n",
977 				   tsk->comm, tsk->pid,
978 				   addr, regs->nip, regs->link);
979 
980 	return 1;
981 }
982 
983 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
984 {
985 	sigset_t set;
986 	struct mcontext __user *mcp;
987 
988 	if (get_sigset_t(&set, &ucp->uc_sigmask))
989 		return -EFAULT;
990 #ifdef CONFIG_PPC64
991 	{
992 		u32 cmcp;
993 
994 		if (__get_user(cmcp, &ucp->uc_regs))
995 			return -EFAULT;
996 		mcp = (struct mcontext __user *)(u64)cmcp;
997 		/* no need to check access_ok(mcp), since mcp < 4GB */
998 	}
999 #else
1000 	if (__get_user(mcp, &ucp->uc_regs))
1001 		return -EFAULT;
1002 	if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
1003 		return -EFAULT;
1004 #endif
1005 	set_current_blocked(&set);
1006 	if (restore_user_regs(regs, mcp, sig))
1007 		return -EFAULT;
1008 
1009 	return 0;
1010 }
1011 
1012 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1013 static int do_setcontext_tm(struct ucontext __user *ucp,
1014 			    struct ucontext __user *tm_ucp,
1015 			    struct pt_regs *regs)
1016 {
1017 	sigset_t set;
1018 	struct mcontext __user *mcp;
1019 	struct mcontext __user *tm_mcp;
1020 	u32 cmcp;
1021 	u32 tm_cmcp;
1022 
1023 	if (get_sigset_t(&set, &ucp->uc_sigmask))
1024 		return -EFAULT;
1025 
1026 	if (__get_user(cmcp, &ucp->uc_regs) ||
1027 	    __get_user(tm_cmcp, &tm_ucp->uc_regs))
1028 		return -EFAULT;
1029 	mcp = (struct mcontext __user *)(u64)cmcp;
1030 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1031 	/* no need to check access_ok(mcp), since mcp < 4GB */
1032 
1033 	set_current_blocked(&set);
1034 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
1035 		return -EFAULT;
1036 
1037 	return 0;
1038 }
1039 #endif
1040 
1041 #ifdef CONFIG_PPC64
1042 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1043 		       struct ucontext __user *, new_ctx, int, ctx_size)
1044 #else
1045 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1046 		       struct ucontext __user *, new_ctx, long, ctx_size)
1047 #endif
1048 {
1049 	struct pt_regs *regs = current_pt_regs();
1050 	int ctx_has_vsx_region = 0;
1051 
1052 #ifdef CONFIG_PPC64
1053 	unsigned long new_msr = 0;
1054 
1055 	if (new_ctx) {
1056 		struct mcontext __user *mcp;
1057 		u32 cmcp;
1058 
1059 		/*
1060 		 * Get pointer to the real mcontext.  No need for
1061 		 * access_ok since we are dealing with compat
1062 		 * pointers.
1063 		 */
1064 		if (__get_user(cmcp, &new_ctx->uc_regs))
1065 			return -EFAULT;
1066 		mcp = (struct mcontext __user *)(u64)cmcp;
1067 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1068 			return -EFAULT;
1069 	}
1070 	/*
1071 	 * Check that the context is not smaller than the original
1072 	 * size (with VMX but without VSX)
1073 	 */
1074 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1075 		return -EINVAL;
1076 	/*
1077 	 * If the new context state sets the MSR VSX bits but
1078 	 * it doesn't provide VSX state.
1079 	 */
1080 	if ((ctx_size < sizeof(struct ucontext)) &&
1081 	    (new_msr & MSR_VSX))
1082 		return -EINVAL;
1083 	/* Does the context have enough room to store VSX data? */
1084 	if (ctx_size >= sizeof(struct ucontext))
1085 		ctx_has_vsx_region = 1;
1086 #else
1087 	/* Context size is for future use. Right now, we only make sure
1088 	 * we are passed something we understand
1089 	 */
1090 	if (ctx_size < sizeof(struct ucontext))
1091 		return -EINVAL;
1092 #endif
1093 	if (old_ctx != NULL) {
1094 		struct mcontext __user *mctx;
1095 
1096 		/*
1097 		 * old_ctx might not be 16-byte aligned, in which
1098 		 * case old_ctx->uc_mcontext won't be either.
1099 		 * Because we have the old_ctx->uc_pad2 field
1100 		 * before old_ctx->uc_mcontext, we need to round down
1101 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1102 		 */
1103 		mctx = (struct mcontext __user *)
1104 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1105 		if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
1106 		    || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1107 		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
1108 		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1109 			return -EFAULT;
1110 	}
1111 	if (new_ctx == NULL)
1112 		return 0;
1113 	if (!access_ok(VERIFY_READ, new_ctx, ctx_size) ||
1114 	    fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
1115 		return -EFAULT;
1116 
1117 	/*
1118 	 * If we get a fault copying the context into the kernel's
1119 	 * image of the user's registers, we can't just return -EFAULT
1120 	 * because the user's registers will be corrupted.  For instance
1121 	 * the NIP value may have been updated but not some of the
1122 	 * other registers.  Given that we have done the access_ok
1123 	 * and successfully read the first and last bytes of the region
1124 	 * above, this should only happen in an out-of-memory situation
1125 	 * or if another thread unmaps the region containing the context.
1126 	 * We kill the task with a SIGSEGV in this situation.
1127 	 */
1128 	if (do_setcontext(new_ctx, regs, 0))
1129 		do_exit(SIGSEGV);
1130 
1131 	set_thread_flag(TIF_RESTOREALL);
1132 	return 0;
1133 }
1134 
1135 #ifdef CONFIG_PPC64
1136 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1137 #else
1138 SYSCALL_DEFINE0(rt_sigreturn)
1139 #endif
1140 {
1141 	struct rt_sigframe __user *rt_sf;
1142 	struct pt_regs *regs = current_pt_regs();
1143 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1144 	struct ucontext __user *uc_transact;
1145 	unsigned long msr_hi;
1146 	unsigned long tmp;
1147 	int tm_restore = 0;
1148 #endif
1149 	/* Always make any pending restarted system calls return -EINTR */
1150 	current->restart_block.fn = do_no_restart_syscall;
1151 
1152 	rt_sf = (struct rt_sigframe __user *)
1153 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1154 	if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1155 		goto bad;
1156 
1157 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1158 	/*
1159 	 * If there is a transactional state then throw it away.
1160 	 * The purpose of a sigreturn is to destroy all traces of the
1161 	 * signal frame, this includes any transactional state created
1162 	 * within in. We only check for suspended as we can never be
1163 	 * active in the kernel, we are active, there is nothing better to
1164 	 * do than go ahead and Bad Thing later.
1165 	 * The cause is not important as there will never be a
1166 	 * recheckpoint so it's not user visible.
1167 	 */
1168 	if (MSR_TM_SUSPENDED(mfmsr()))
1169 		tm_reclaim_current(0);
1170 
1171 	if (__get_user(tmp, &rt_sf->uc.uc_link))
1172 		goto bad;
1173 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1174 	if (uc_transact) {
1175 		u32 cmcp;
1176 		struct mcontext __user *mcp;
1177 
1178 		if (__get_user(cmcp, &uc_transact->uc_regs))
1179 			return -EFAULT;
1180 		mcp = (struct mcontext __user *)(u64)cmcp;
1181 		/* The top 32 bits of the MSR are stashed in the transactional
1182 		 * ucontext. */
1183 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1184 			goto bad;
1185 
1186 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1187 			/* We only recheckpoint on return if we're
1188 			 * transaction.
1189 			 */
1190 			tm_restore = 1;
1191 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1192 				goto bad;
1193 		}
1194 	}
1195 	if (!tm_restore)
1196 		/* Fall through, for non-TM restore */
1197 #endif
1198 	if (do_setcontext(&rt_sf->uc, regs, 1))
1199 		goto bad;
1200 
1201 	/*
1202 	 * It's not clear whether or why it is desirable to save the
1203 	 * sigaltstack setting on signal delivery and restore it on
1204 	 * signal return.  But other architectures do this and we have
1205 	 * always done it up until now so it is probably better not to
1206 	 * change it.  -- paulus
1207 	 */
1208 #ifdef CONFIG_PPC64
1209 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1210 		goto bad;
1211 #else
1212 	if (restore_altstack(&rt_sf->uc.uc_stack))
1213 		goto bad;
1214 #endif
1215 	set_thread_flag(TIF_RESTOREALL);
1216 	return 0;
1217 
1218  bad:
1219 	if (show_unhandled_signals)
1220 		printk_ratelimited(KERN_INFO
1221 				   "%s[%d]: bad frame in sys_rt_sigreturn: "
1222 				   "%p nip %08lx lr %08lx\n",
1223 				   current->comm, current->pid,
1224 				   rt_sf, regs->nip, regs->link);
1225 
1226 	force_sig(SIGSEGV, current);
1227 	return 0;
1228 }
1229 
1230 #ifdef CONFIG_PPC32
1231 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1232 			 int, ndbg, struct sig_dbg_op __user *, dbg)
1233 {
1234 	struct pt_regs *regs = current_pt_regs();
1235 	struct sig_dbg_op op;
1236 	int i;
1237 	unsigned long new_msr = regs->msr;
1238 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1239 	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1240 #endif
1241 
1242 	for (i=0; i<ndbg; i++) {
1243 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1244 			return -EFAULT;
1245 		switch (op.dbg_type) {
1246 		case SIG_DBG_SINGLE_STEPPING:
1247 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1248 			if (op.dbg_value) {
1249 				new_msr |= MSR_DE;
1250 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1251 			} else {
1252 				new_dbcr0 &= ~DBCR0_IC;
1253 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1254 						current->thread.debug.dbcr1)) {
1255 					new_msr &= ~MSR_DE;
1256 					new_dbcr0 &= ~DBCR0_IDM;
1257 				}
1258 			}
1259 #else
1260 			if (op.dbg_value)
1261 				new_msr |= MSR_SE;
1262 			else
1263 				new_msr &= ~MSR_SE;
1264 #endif
1265 			break;
1266 		case SIG_DBG_BRANCH_TRACING:
1267 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1268 			return -EINVAL;
1269 #else
1270 			if (op.dbg_value)
1271 				new_msr |= MSR_BE;
1272 			else
1273 				new_msr &= ~MSR_BE;
1274 #endif
1275 			break;
1276 
1277 		default:
1278 			return -EINVAL;
1279 		}
1280 	}
1281 
1282 	/* We wait until here to actually install the values in the
1283 	   registers so if we fail in the above loop, it will not
1284 	   affect the contents of these registers.  After this point,
1285 	   failure is a problem, anyway, and it's very unlikely unless
1286 	   the user is really doing something wrong. */
1287 	regs->msr = new_msr;
1288 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1289 	current->thread.debug.dbcr0 = new_dbcr0;
1290 #endif
1291 
1292 	if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) ||
1293 	    fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1294 		return -EFAULT;
1295 
1296 	/*
1297 	 * If we get a fault copying the context into the kernel's
1298 	 * image of the user's registers, we can't just return -EFAULT
1299 	 * because the user's registers will be corrupted.  For instance
1300 	 * the NIP value may have been updated but not some of the
1301 	 * other registers.  Given that we have done the access_ok
1302 	 * and successfully read the first and last bytes of the region
1303 	 * above, this should only happen in an out-of-memory situation
1304 	 * or if another thread unmaps the region containing the context.
1305 	 * We kill the task with a SIGSEGV in this situation.
1306 	 */
1307 	if (do_setcontext(ctx, regs, 1)) {
1308 		if (show_unhandled_signals)
1309 			printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1310 					   "sys_debug_setcontext: %p nip %08lx "
1311 					   "lr %08lx\n",
1312 					   current->comm, current->pid,
1313 					   ctx, regs->nip, regs->link);
1314 
1315 		force_sig(SIGSEGV, current);
1316 		goto out;
1317 	}
1318 
1319 	/*
1320 	 * It's not clear whether or why it is desirable to save the
1321 	 * sigaltstack setting on signal delivery and restore it on
1322 	 * signal return.  But other architectures do this and we have
1323 	 * always done it up until now so it is probably better not to
1324 	 * change it.  -- paulus
1325 	 */
1326 	restore_altstack(&ctx->uc_stack);
1327 
1328 	set_thread_flag(TIF_RESTOREALL);
1329  out:
1330 	return 0;
1331 }
1332 #endif
1333 
1334 /*
1335  * OK, we're invoking a handler
1336  */
1337 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1338 		struct task_struct *tsk)
1339 {
1340 	struct sigcontext __user *sc;
1341 	struct sigframe __user *frame;
1342 	struct mcontext __user *tm_mctx = NULL;
1343 	unsigned long newsp = 0;
1344 	int sigret;
1345 	unsigned long tramp;
1346 	struct pt_regs *regs = tsk->thread.regs;
1347 
1348 	BUG_ON(tsk != current);
1349 
1350 	/* Set up Signal Frame */
1351 	frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1352 	if (unlikely(frame == NULL))
1353 		goto badframe;
1354 	sc = (struct sigcontext __user *) &frame->sctx;
1355 
1356 #if _NSIG != 64
1357 #error "Please adjust handle_signal()"
1358 #endif
1359 	if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1360 	    || __put_user(oldset->sig[0], &sc->oldmask)
1361 #ifdef CONFIG_PPC64
1362 	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1363 #else
1364 	    || __put_user(oldset->sig[1], &sc->_unused[3])
1365 #endif
1366 	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1367 	    || __put_user(ksig->sig, &sc->signal))
1368 		goto badframe;
1369 
1370 	if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1371 		sigret = 0;
1372 		tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1373 	} else {
1374 		sigret = __NR_sigreturn;
1375 		tramp = (unsigned long) frame->mctx.tramp;
1376 	}
1377 
1378 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1379 	tm_mctx = &frame->mctx_transact;
1380 	if (MSR_TM_ACTIVE(regs->msr)) {
1381 		if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1382 				      sigret))
1383 			goto badframe;
1384 	}
1385 	else
1386 #endif
1387 	{
1388 		if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1389 			goto badframe;
1390 	}
1391 
1392 	regs->link = tramp;
1393 
1394 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
1395 
1396 	/* create a stack frame for the caller of the handler */
1397 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1398 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1399 		goto badframe;
1400 
1401 	regs->gpr[1] = newsp;
1402 	regs->gpr[3] = ksig->sig;
1403 	regs->gpr[4] = (unsigned long) sc;
1404 	regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1405 	/* enter the signal handler in big-endian mode */
1406 	regs->msr &= ~MSR_LE;
1407 	return 0;
1408 
1409 badframe:
1410 	if (show_unhandled_signals)
1411 		printk_ratelimited(KERN_INFO
1412 				   "%s[%d]: bad frame in handle_signal32: "
1413 				   "%p nip %08lx lr %08lx\n",
1414 				   tsk->comm, tsk->pid,
1415 				   frame, regs->nip, regs->link);
1416 
1417 	return 1;
1418 }
1419 
1420 /*
1421  * Do a signal return; undo the signal stack.
1422  */
1423 #ifdef CONFIG_PPC64
1424 COMPAT_SYSCALL_DEFINE0(sigreturn)
1425 #else
1426 SYSCALL_DEFINE0(sigreturn)
1427 #endif
1428 {
1429 	struct pt_regs *regs = current_pt_regs();
1430 	struct sigframe __user *sf;
1431 	struct sigcontext __user *sc;
1432 	struct sigcontext sigctx;
1433 	struct mcontext __user *sr;
1434 	void __user *addr;
1435 	sigset_t set;
1436 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1437 	struct mcontext __user *mcp, *tm_mcp;
1438 	unsigned long msr_hi;
1439 #endif
1440 
1441 	/* Always make any pending restarted system calls return -EINTR */
1442 	current->restart_block.fn = do_no_restart_syscall;
1443 
1444 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1445 	sc = &sf->sctx;
1446 	addr = sc;
1447 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1448 		goto badframe;
1449 
1450 #ifdef CONFIG_PPC64
1451 	/*
1452 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1453 	 * unused part of the signal stackframe
1454 	 */
1455 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1456 #else
1457 	set.sig[0] = sigctx.oldmask;
1458 	set.sig[1] = sigctx._unused[3];
1459 #endif
1460 	set_current_blocked(&set);
1461 
1462 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1463 	mcp = (struct mcontext __user *)&sf->mctx;
1464 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1465 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1466 		goto badframe;
1467 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
1468 		if (!cpu_has_feature(CPU_FTR_TM))
1469 			goto badframe;
1470 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
1471 			goto badframe;
1472 	} else
1473 #endif
1474 	{
1475 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1476 		addr = sr;
1477 		if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1478 		    || restore_user_regs(regs, sr, 1))
1479 			goto badframe;
1480 	}
1481 
1482 	set_thread_flag(TIF_RESTOREALL);
1483 	return 0;
1484 
1485 badframe:
1486 	if (show_unhandled_signals)
1487 		printk_ratelimited(KERN_INFO
1488 				   "%s[%d]: bad frame in sys_sigreturn: "
1489 				   "%p nip %08lx lr %08lx\n",
1490 				   current->comm, current->pid,
1491 				   addr, regs->nip, regs->link);
1492 
1493 	force_sig(SIGSEGV, current);
1494 	return 0;
1495 }
1496