xref: /openbmc/linux/arch/powerpc/kernel/signal_32.c (revision d2574c33)
1 /*
2  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3  *
4  *  PowerPC version
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  * Copyright (C) 2001 IBM
7  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9  *
10  *  Derived from "arch/i386/kernel/signal.c"
11  *    Copyright (C) 1991, 1992 Linus Torvalds
12  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
13  *
14  *  This program is free software; you can redistribute it and/or
15  *  modify it under the terms of the GNU General Public License
16  *  as published by the Free Software Foundation; either version
17  *  2 of the License, or (at your option) any later version.
18  */
19 
20 #include <linux/sched.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/kernel.h>
24 #include <linux/signal.h>
25 #include <linux/errno.h>
26 #include <linux/elf.h>
27 #include <linux/ptrace.h>
28 #include <linux/pagemap.h>
29 #include <linux/ratelimit.h>
30 #include <linux/syscalls.h>
31 #ifdef CONFIG_PPC64
32 #include <linux/compat.h>
33 #else
34 #include <linux/wait.h>
35 #include <linux/unistd.h>
36 #include <linux/stddef.h>
37 #include <linux/tty.h>
38 #include <linux/binfmts.h>
39 #endif
40 
41 #include <linux/uaccess.h>
42 #include <asm/cacheflush.h>
43 #include <asm/syscalls.h>
44 #include <asm/sigcontext.h>
45 #include <asm/vdso.h>
46 #include <asm/switch_to.h>
47 #include <asm/tm.h>
48 #include <asm/asm-prototypes.h>
49 #ifdef CONFIG_PPC64
50 #include "ppc32.h"
51 #include <asm/unistd.h>
52 #else
53 #include <asm/ucontext.h>
54 #include <asm/pgtable.h>
55 #endif
56 
57 #include "signal.h"
58 
59 
60 #ifdef CONFIG_PPC64
61 #define old_sigaction	old_sigaction32
62 #define sigcontext	sigcontext32
63 #define mcontext	mcontext32
64 #define ucontext	ucontext32
65 
66 #define __save_altstack __compat_save_altstack
67 
68 /*
69  * Userspace code may pass a ucontext which doesn't include VSX added
70  * at the end.  We need to check for this case.
71  */
72 #define UCONTEXTSIZEWITHOUTVSX \
73 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
74 
75 /*
76  * Returning 0 means we return to userspace via
77  * ret_from_except and thus restore all user
78  * registers from *regs.  This is what we need
79  * to do when a signal has been delivered.
80  */
81 
82 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
83 #undef __SIGNAL_FRAMESIZE
84 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
85 #undef ELF_NVRREG
86 #define ELF_NVRREG	ELF_NVRREG32
87 
88 /*
89  * Functions for flipping sigsets (thanks to brain dead generic
90  * implementation that makes things simple for little endian only)
91  */
92 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
93 {
94 	return put_compat_sigset(uset, set, sizeof(*uset));
95 }
96 
97 static inline int get_sigset_t(sigset_t *set,
98 			       const compat_sigset_t __user *uset)
99 {
100 	return get_compat_sigset(set, uset);
101 }
102 
103 #define to_user_ptr(p)		ptr_to_compat(p)
104 #define from_user_ptr(p)	compat_ptr(p)
105 
106 static inline int save_general_regs(struct pt_regs *regs,
107 		struct mcontext __user *frame)
108 {
109 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
110 	int i;
111 	/* Force usr to alway see softe as 1 (interrupts enabled) */
112 	elf_greg_t64 softe = 0x1;
113 
114 	WARN_ON(!FULL_REGS(regs));
115 
116 	for (i = 0; i <= PT_RESULT; i ++) {
117 		if (i == 14 && !FULL_REGS(regs))
118 			i = 32;
119 		if ( i == PT_SOFTE) {
120 			if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
121 				return -EFAULT;
122 			else
123 				continue;
124 		}
125 		if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
126 			return -EFAULT;
127 	}
128 	return 0;
129 }
130 
131 static inline int restore_general_regs(struct pt_regs *regs,
132 		struct mcontext __user *sr)
133 {
134 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
135 	int i;
136 
137 	for (i = 0; i <= PT_RESULT; i++) {
138 		if ((i == PT_MSR) || (i == PT_SOFTE))
139 			continue;
140 		if (__get_user(gregs[i], &sr->mc_gregs[i]))
141 			return -EFAULT;
142 	}
143 	return 0;
144 }
145 
146 #else /* CONFIG_PPC64 */
147 
148 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
149 
150 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
151 {
152 	return copy_to_user(uset, set, sizeof(*uset));
153 }
154 
155 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
156 {
157 	return copy_from_user(set, uset, sizeof(*uset));
158 }
159 
160 #define to_user_ptr(p)		((unsigned long)(p))
161 #define from_user_ptr(p)	((void __user *)(p))
162 
163 static inline int save_general_regs(struct pt_regs *regs,
164 		struct mcontext __user *frame)
165 {
166 	WARN_ON(!FULL_REGS(regs));
167 	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
168 }
169 
170 static inline int restore_general_regs(struct pt_regs *regs,
171 		struct mcontext __user *sr)
172 {
173 	/* copy up to but not including MSR */
174 	if (__copy_from_user(regs, &sr->mc_gregs,
175 				PT_MSR * sizeof(elf_greg_t)))
176 		return -EFAULT;
177 	/* copy from orig_r3 (the word after the MSR) up to the end */
178 	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
179 				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
180 		return -EFAULT;
181 	return 0;
182 }
183 #endif
184 
185 /*
186  * When we have signals to deliver, we set up on the
187  * user stack, going down from the original stack pointer:
188  *	an ABI gap of 56 words
189  *	an mcontext struct
190  *	a sigcontext struct
191  *	a gap of __SIGNAL_FRAMESIZE bytes
192  *
193  * Each of these things must be a multiple of 16 bytes in size. The following
194  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
195  *
196  */
197 struct sigframe {
198 	struct sigcontext sctx;		/* the sigcontext */
199 	struct mcontext	mctx;		/* all the register values */
200 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
201 	struct sigcontext sctx_transact;
202 	struct mcontext	mctx_transact;
203 #endif
204 	/*
205 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
206 	 * regs and 18 fp regs below sp before decrementing it.
207 	 */
208 	int			abigap[56];
209 };
210 
211 /* We use the mc_pad field for the signal return trampoline. */
212 #define tramp	mc_pad
213 
214 /*
215  *  When we have rt signals to deliver, we set up on the
216  *  user stack, going down from the original stack pointer:
217  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
218  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
219  *  (the +16 is to get the siginfo and ucontext in the same
220  *  positions as in older kernels).
221  *
222  *  Each of these things must be a multiple of 16 bytes in size.
223  *
224  */
225 struct rt_sigframe {
226 #ifdef CONFIG_PPC64
227 	compat_siginfo_t info;
228 #else
229 	struct siginfo info;
230 #endif
231 	struct ucontext	uc;
232 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
233 	struct ucontext	uc_transact;
234 #endif
235 	/*
236 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
237 	 * regs and 18 fp regs below sp before decrementing it.
238 	 */
239 	int			abigap[56];
240 };
241 
242 #ifdef CONFIG_VSX
243 unsigned long copy_fpr_to_user(void __user *to,
244 			       struct task_struct *task)
245 {
246 	u64 buf[ELF_NFPREG];
247 	int i;
248 
249 	/* save FPR copy to local buffer then write to the thread_struct */
250 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
251 		buf[i] = task->thread.TS_FPR(i);
252 	buf[i] = task->thread.fp_state.fpscr;
253 	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
254 }
255 
256 unsigned long copy_fpr_from_user(struct task_struct *task,
257 				 void __user *from)
258 {
259 	u64 buf[ELF_NFPREG];
260 	int i;
261 
262 	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
263 		return 1;
264 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
265 		task->thread.TS_FPR(i) = buf[i];
266 	task->thread.fp_state.fpscr = buf[i];
267 
268 	return 0;
269 }
270 
271 unsigned long copy_vsx_to_user(void __user *to,
272 			       struct task_struct *task)
273 {
274 	u64 buf[ELF_NVSRHALFREG];
275 	int i;
276 
277 	/* save FPR copy to local buffer then write to the thread_struct */
278 	for (i = 0; i < ELF_NVSRHALFREG; i++)
279 		buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
280 	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
281 }
282 
283 unsigned long copy_vsx_from_user(struct task_struct *task,
284 				 void __user *from)
285 {
286 	u64 buf[ELF_NVSRHALFREG];
287 	int i;
288 
289 	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
290 		return 1;
291 	for (i = 0; i < ELF_NVSRHALFREG ; i++)
292 		task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
293 	return 0;
294 }
295 
296 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
297 unsigned long copy_ckfpr_to_user(void __user *to,
298 				  struct task_struct *task)
299 {
300 	u64 buf[ELF_NFPREG];
301 	int i;
302 
303 	/* save FPR copy to local buffer then write to the thread_struct */
304 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
305 		buf[i] = task->thread.TS_CKFPR(i);
306 	buf[i] = task->thread.ckfp_state.fpscr;
307 	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
308 }
309 
310 unsigned long copy_ckfpr_from_user(struct task_struct *task,
311 					  void __user *from)
312 {
313 	u64 buf[ELF_NFPREG];
314 	int i;
315 
316 	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
317 		return 1;
318 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
319 		task->thread.TS_CKFPR(i) = buf[i];
320 	task->thread.ckfp_state.fpscr = buf[i];
321 
322 	return 0;
323 }
324 
325 unsigned long copy_ckvsx_to_user(void __user *to,
326 				  struct task_struct *task)
327 {
328 	u64 buf[ELF_NVSRHALFREG];
329 	int i;
330 
331 	/* save FPR copy to local buffer then write to the thread_struct */
332 	for (i = 0; i < ELF_NVSRHALFREG; i++)
333 		buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
334 	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
335 }
336 
337 unsigned long copy_ckvsx_from_user(struct task_struct *task,
338 					  void __user *from)
339 {
340 	u64 buf[ELF_NVSRHALFREG];
341 	int i;
342 
343 	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
344 		return 1;
345 	for (i = 0; i < ELF_NVSRHALFREG ; i++)
346 		task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
347 	return 0;
348 }
349 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
350 #else
351 inline unsigned long copy_fpr_to_user(void __user *to,
352 				      struct task_struct *task)
353 {
354 	return __copy_to_user(to, task->thread.fp_state.fpr,
355 			      ELF_NFPREG * sizeof(double));
356 }
357 
358 inline unsigned long copy_fpr_from_user(struct task_struct *task,
359 					void __user *from)
360 {
361 	return __copy_from_user(task->thread.fp_state.fpr, from,
362 			      ELF_NFPREG * sizeof(double));
363 }
364 
365 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
366 inline unsigned long copy_ckfpr_to_user(void __user *to,
367 					 struct task_struct *task)
368 {
369 	return __copy_to_user(to, task->thread.ckfp_state.fpr,
370 			      ELF_NFPREG * sizeof(double));
371 }
372 
373 inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
374 						 void __user *from)
375 {
376 	return __copy_from_user(task->thread.ckfp_state.fpr, from,
377 				ELF_NFPREG * sizeof(double));
378 }
379 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
380 #endif
381 
382 /*
383  * Save the current user registers on the user stack.
384  * We only save the altivec/spe registers if the process has used
385  * altivec/spe instructions at some point.
386  */
387 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
388 			  struct mcontext __user *tm_frame, int sigret,
389 			  int ctx_has_vsx_region)
390 {
391 	unsigned long msr = regs->msr;
392 
393 	/* Make sure floating point registers are stored in regs */
394 	flush_fp_to_thread(current);
395 
396 	/* save general registers */
397 	if (save_general_regs(regs, frame))
398 		return 1;
399 
400 #ifdef CONFIG_ALTIVEC
401 	/* save altivec registers */
402 	if (current->thread.used_vr) {
403 		flush_altivec_to_thread(current);
404 		if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
405 				   ELF_NVRREG * sizeof(vector128)))
406 			return 1;
407 		/* set MSR_VEC in the saved MSR value to indicate that
408 		   frame->mc_vregs contains valid data */
409 		msr |= MSR_VEC;
410 	}
411 	/* else assert((regs->msr & MSR_VEC) == 0) */
412 
413 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
414 	 * use altivec. Since VSCR only contains 32 bits saved in the least
415 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
416 	 * most significant bits of that same vector. --BenH
417 	 * Note that the current VRSAVE value is in the SPR at this point.
418 	 */
419 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
420 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
421 	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
422 		return 1;
423 #endif /* CONFIG_ALTIVEC */
424 	if (copy_fpr_to_user(&frame->mc_fregs, current))
425 		return 1;
426 
427 	/*
428 	 * Clear the MSR VSX bit to indicate there is no valid state attached
429 	 * to this context, except in the specific case below where we set it.
430 	 */
431 	msr &= ~MSR_VSX;
432 #ifdef CONFIG_VSX
433 	/*
434 	 * Copy VSR 0-31 upper half from thread_struct to local
435 	 * buffer, then write that to userspace.  Also set MSR_VSX in
436 	 * the saved MSR value to indicate that frame->mc_vregs
437 	 * contains valid data
438 	 */
439 	if (current->thread.used_vsr && ctx_has_vsx_region) {
440 		flush_vsx_to_thread(current);
441 		if (copy_vsx_to_user(&frame->mc_vsregs, current))
442 			return 1;
443 		msr |= MSR_VSX;
444 	}
445 #endif /* CONFIG_VSX */
446 #ifdef CONFIG_SPE
447 	/* save spe registers */
448 	if (current->thread.used_spe) {
449 		flush_spe_to_thread(current);
450 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
451 				   ELF_NEVRREG * sizeof(u32)))
452 			return 1;
453 		/* set MSR_SPE in the saved MSR value to indicate that
454 		   frame->mc_vregs contains valid data */
455 		msr |= MSR_SPE;
456 	}
457 	/* else assert((regs->msr & MSR_SPE) == 0) */
458 
459 	/* We always copy to/from spefscr */
460 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
461 		return 1;
462 #endif /* CONFIG_SPE */
463 
464 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
465 		return 1;
466 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
467 	 * can check it on the restore to see if TM is active
468 	 */
469 	if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
470 		return 1;
471 
472 	if (sigret) {
473 		/* Set up the sigreturn trampoline: li 0,sigret; sc */
474 		if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
475 		    || __put_user(PPC_INST_SC, &frame->tramp[1]))
476 			return 1;
477 		flush_icache_range((unsigned long) &frame->tramp[0],
478 				   (unsigned long) &frame->tramp[2]);
479 	}
480 
481 	return 0;
482 }
483 
484 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
485 /*
486  * Save the current user registers on the user stack.
487  * We only save the altivec/spe registers if the process has used
488  * altivec/spe instructions at some point.
489  * We also save the transactional registers to a second ucontext in the
490  * frame.
491  *
492  * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
493  */
494 static int save_tm_user_regs(struct pt_regs *regs,
495 			     struct mcontext __user *frame,
496 			     struct mcontext __user *tm_frame, int sigret)
497 {
498 	unsigned long msr = regs->msr;
499 
500 	WARN_ON(tm_suspend_disabled);
501 
502 	/* Remove TM bits from thread's MSR.  The MSR in the sigcontext
503 	 * just indicates to userland that we were doing a transaction, but we
504 	 * don't want to return in transactional state.  This also ensures
505 	 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
506 	 */
507 	regs->msr &= ~MSR_TS_MASK;
508 
509 	/* Save both sets of general registers */
510 	if (save_general_regs(&current->thread.ckpt_regs, frame)
511 	    || save_general_regs(regs, tm_frame))
512 		return 1;
513 
514 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
515 	 * of the transactional mcontext.  This way we have a backward-compatible
516 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
517 	 * also look at what type of transaction (T or S) was active at the
518 	 * time of the signal.
519 	 */
520 	if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
521 		return 1;
522 
523 #ifdef CONFIG_ALTIVEC
524 	/* save altivec registers */
525 	if (current->thread.used_vr) {
526 		if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
527 				   ELF_NVRREG * sizeof(vector128)))
528 			return 1;
529 		if (msr & MSR_VEC) {
530 			if (__copy_to_user(&tm_frame->mc_vregs,
531 					   &current->thread.vr_state,
532 					   ELF_NVRREG * sizeof(vector128)))
533 				return 1;
534 		} else {
535 			if (__copy_to_user(&tm_frame->mc_vregs,
536 					   &current->thread.ckvr_state,
537 					   ELF_NVRREG * sizeof(vector128)))
538 				return 1;
539 		}
540 
541 		/* set MSR_VEC in the saved MSR value to indicate that
542 		 * frame->mc_vregs contains valid data
543 		 */
544 		msr |= MSR_VEC;
545 	}
546 
547 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
548 	 * use altivec. Since VSCR only contains 32 bits saved in the least
549 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
550 	 * most significant bits of that same vector. --BenH
551 	 */
552 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
553 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
554 	if (__put_user(current->thread.ckvrsave,
555 		       (u32 __user *)&frame->mc_vregs[32]))
556 		return 1;
557 	if (msr & MSR_VEC) {
558 		if (__put_user(current->thread.vrsave,
559 			       (u32 __user *)&tm_frame->mc_vregs[32]))
560 			return 1;
561 	} else {
562 		if (__put_user(current->thread.ckvrsave,
563 			       (u32 __user *)&tm_frame->mc_vregs[32]))
564 			return 1;
565 	}
566 #endif /* CONFIG_ALTIVEC */
567 
568 	if (copy_ckfpr_to_user(&frame->mc_fregs, current))
569 		return 1;
570 	if (msr & MSR_FP) {
571 		if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
572 			return 1;
573 	} else {
574 		if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
575 			return 1;
576 	}
577 
578 #ifdef CONFIG_VSX
579 	/*
580 	 * Copy VSR 0-31 upper half from thread_struct to local
581 	 * buffer, then write that to userspace.  Also set MSR_VSX in
582 	 * the saved MSR value to indicate that frame->mc_vregs
583 	 * contains valid data
584 	 */
585 	if (current->thread.used_vsr) {
586 		if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
587 			return 1;
588 		if (msr & MSR_VSX) {
589 			if (copy_vsx_to_user(&tm_frame->mc_vsregs,
590 						      current))
591 				return 1;
592 		} else {
593 			if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
594 				return 1;
595 		}
596 
597 		msr |= MSR_VSX;
598 	}
599 #endif /* CONFIG_VSX */
600 #ifdef CONFIG_SPE
601 	/* SPE regs are not checkpointed with TM, so this section is
602 	 * simply the same as in save_user_regs().
603 	 */
604 	if (current->thread.used_spe) {
605 		flush_spe_to_thread(current);
606 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
607 				   ELF_NEVRREG * sizeof(u32)))
608 			return 1;
609 		/* set MSR_SPE in the saved MSR value to indicate that
610 		 * frame->mc_vregs contains valid data */
611 		msr |= MSR_SPE;
612 	}
613 
614 	/* We always copy to/from spefscr */
615 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
616 		return 1;
617 #endif /* CONFIG_SPE */
618 
619 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
620 		return 1;
621 	if (sigret) {
622 		/* Set up the sigreturn trampoline: li 0,sigret; sc */
623 		if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
624 		    || __put_user(PPC_INST_SC, &frame->tramp[1]))
625 			return 1;
626 		flush_icache_range((unsigned long) &frame->tramp[0],
627 				   (unsigned long) &frame->tramp[2]);
628 	}
629 
630 	return 0;
631 }
632 #endif
633 
634 /*
635  * Restore the current user register values from the user stack,
636  * (except for MSR).
637  */
638 static long restore_user_regs(struct pt_regs *regs,
639 			      struct mcontext __user *sr, int sig)
640 {
641 	long err;
642 	unsigned int save_r2 = 0;
643 	unsigned long msr;
644 #ifdef CONFIG_VSX
645 	int i;
646 #endif
647 
648 	/*
649 	 * restore general registers but not including MSR or SOFTE. Also
650 	 * take care of keeping r2 (TLS) intact if not a signal
651 	 */
652 	if (!sig)
653 		save_r2 = (unsigned int)regs->gpr[2];
654 	err = restore_general_regs(regs, sr);
655 	regs->trap = 0;
656 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
657 	if (!sig)
658 		regs->gpr[2] = (unsigned long) save_r2;
659 	if (err)
660 		return 1;
661 
662 	/* if doing signal return, restore the previous little-endian mode */
663 	if (sig)
664 		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
665 
666 #ifdef CONFIG_ALTIVEC
667 	/*
668 	 * Force the process to reload the altivec registers from
669 	 * current->thread when it next does altivec instructions
670 	 */
671 	regs->msr &= ~MSR_VEC;
672 	if (msr & MSR_VEC) {
673 		/* restore altivec registers from the stack */
674 		if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
675 				     sizeof(sr->mc_vregs)))
676 			return 1;
677 		current->thread.used_vr = true;
678 	} else if (current->thread.used_vr)
679 		memset(&current->thread.vr_state, 0,
680 		       ELF_NVRREG * sizeof(vector128));
681 
682 	/* Always get VRSAVE back */
683 	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
684 		return 1;
685 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
686 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
687 #endif /* CONFIG_ALTIVEC */
688 	if (copy_fpr_from_user(current, &sr->mc_fregs))
689 		return 1;
690 
691 #ifdef CONFIG_VSX
692 	/*
693 	 * Force the process to reload the VSX registers from
694 	 * current->thread when it next does VSX instruction.
695 	 */
696 	regs->msr &= ~MSR_VSX;
697 	if (msr & MSR_VSX) {
698 		/*
699 		 * Restore altivec registers from the stack to a local
700 		 * buffer, then write this out to the thread_struct
701 		 */
702 		if (copy_vsx_from_user(current, &sr->mc_vsregs))
703 			return 1;
704 		current->thread.used_vsr = true;
705 	} else if (current->thread.used_vsr)
706 		for (i = 0; i < 32 ; i++)
707 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
708 #endif /* CONFIG_VSX */
709 	/*
710 	 * force the process to reload the FP registers from
711 	 * current->thread when it next does FP instructions
712 	 */
713 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
714 
715 #ifdef CONFIG_SPE
716 	/* force the process to reload the spe registers from
717 	   current->thread when it next does spe instructions */
718 	regs->msr &= ~MSR_SPE;
719 	if (msr & MSR_SPE) {
720 		/* restore spe registers from the stack */
721 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
722 				     ELF_NEVRREG * sizeof(u32)))
723 			return 1;
724 		current->thread.used_spe = true;
725 	} else if (current->thread.used_spe)
726 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
727 
728 	/* Always get SPEFSCR back */
729 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
730 		return 1;
731 #endif /* CONFIG_SPE */
732 
733 	return 0;
734 }
735 
736 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
737 /*
738  * Restore the current user register values from the user stack, except for
739  * MSR, and recheckpoint the original checkpointed register state for processes
740  * in transactions.
741  */
742 static long restore_tm_user_regs(struct pt_regs *regs,
743 				 struct mcontext __user *sr,
744 				 struct mcontext __user *tm_sr)
745 {
746 	long err;
747 	unsigned long msr, msr_hi;
748 #ifdef CONFIG_VSX
749 	int i;
750 #endif
751 
752 	if (tm_suspend_disabled)
753 		return 1;
754 	/*
755 	 * restore general registers but not including MSR or SOFTE. Also
756 	 * take care of keeping r2 (TLS) intact if not a signal.
757 	 * See comment in signal_64.c:restore_tm_sigcontexts();
758 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
759 	 * were set by the signal delivery.
760 	 */
761 	err = restore_general_regs(regs, tm_sr);
762 	err |= restore_general_regs(&current->thread.ckpt_regs, sr);
763 
764 	err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
765 
766 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
767 	if (err)
768 		return 1;
769 
770 	/* Restore the previous little-endian mode */
771 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
772 
773 #ifdef CONFIG_ALTIVEC
774 	regs->msr &= ~MSR_VEC;
775 	if (msr & MSR_VEC) {
776 		/* restore altivec registers from the stack */
777 		if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
778 				     sizeof(sr->mc_vregs)) ||
779 		    __copy_from_user(&current->thread.vr_state,
780 				     &tm_sr->mc_vregs,
781 				     sizeof(sr->mc_vregs)))
782 			return 1;
783 		current->thread.used_vr = true;
784 	} else if (current->thread.used_vr) {
785 		memset(&current->thread.vr_state, 0,
786 		       ELF_NVRREG * sizeof(vector128));
787 		memset(&current->thread.ckvr_state, 0,
788 		       ELF_NVRREG * sizeof(vector128));
789 	}
790 
791 	/* Always get VRSAVE back */
792 	if (__get_user(current->thread.ckvrsave,
793 		       (u32 __user *)&sr->mc_vregs[32]) ||
794 	    __get_user(current->thread.vrsave,
795 		       (u32 __user *)&tm_sr->mc_vregs[32]))
796 		return 1;
797 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
798 		mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
799 #endif /* CONFIG_ALTIVEC */
800 
801 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
802 
803 	if (copy_fpr_from_user(current, &sr->mc_fregs) ||
804 	    copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
805 		return 1;
806 
807 #ifdef CONFIG_VSX
808 	regs->msr &= ~MSR_VSX;
809 	if (msr & MSR_VSX) {
810 		/*
811 		 * Restore altivec registers from the stack to a local
812 		 * buffer, then write this out to the thread_struct
813 		 */
814 		if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
815 		    copy_ckvsx_from_user(current, &sr->mc_vsregs))
816 			return 1;
817 		current->thread.used_vsr = true;
818 	} else if (current->thread.used_vsr)
819 		for (i = 0; i < 32 ; i++) {
820 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
821 			current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
822 		}
823 #endif /* CONFIG_VSX */
824 
825 #ifdef CONFIG_SPE
826 	/* SPE regs are not checkpointed with TM, so this section is
827 	 * simply the same as in restore_user_regs().
828 	 */
829 	regs->msr &= ~MSR_SPE;
830 	if (msr & MSR_SPE) {
831 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
832 				     ELF_NEVRREG * sizeof(u32)))
833 			return 1;
834 		current->thread.used_spe = true;
835 	} else if (current->thread.used_spe)
836 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
837 
838 	/* Always get SPEFSCR back */
839 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
840 		       + ELF_NEVRREG))
841 		return 1;
842 #endif /* CONFIG_SPE */
843 
844 	/* Get the top half of the MSR from the user context */
845 	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
846 		return 1;
847 	msr_hi <<= 32;
848 	/* If TM bits are set to the reserved value, it's an invalid context */
849 	if (MSR_TM_RESV(msr_hi))
850 		return 1;
851 
852 	/*
853 	 * Disabling preemption, since it is unsafe to be preempted
854 	 * with MSR[TS] set without recheckpointing.
855 	 */
856 	preempt_disable();
857 
858 	/*
859 	 * CAUTION:
860 	 * After regs->MSR[TS] being updated, make sure that get_user(),
861 	 * put_user() or similar functions are *not* called. These
862 	 * functions can generate page faults which will cause the process
863 	 * to be de-scheduled with MSR[TS] set but without calling
864 	 * tm_recheckpoint(). This can cause a bug.
865 	 *
866 	 * Pull in the MSR TM bits from the user context
867 	 */
868 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
869 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
870 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
871 	 * transactional versions should be loaded.
872 	 */
873 	tm_enable();
874 	/* Make sure the transaction is marked as failed */
875 	current->thread.tm_texasr |= TEXASR_FS;
876 	/* This loads the checkpointed FP/VEC state, if used */
877 	tm_recheckpoint(&current->thread);
878 
879 	/* This loads the speculative FP/VEC state, if used */
880 	msr_check_and_set(msr & (MSR_FP | MSR_VEC));
881 	if (msr & MSR_FP) {
882 		load_fp_state(&current->thread.fp_state);
883 		regs->msr |= (MSR_FP | current->thread.fpexc_mode);
884 	}
885 #ifdef CONFIG_ALTIVEC
886 	if (msr & MSR_VEC) {
887 		load_vr_state(&current->thread.vr_state);
888 		regs->msr |= MSR_VEC;
889 	}
890 #endif
891 
892 	preempt_enable();
893 
894 	return 0;
895 }
896 #endif
897 
898 #ifdef CONFIG_PPC64
899 
900 #define copy_siginfo_to_user	copy_siginfo_to_user32
901 
902 #endif /* CONFIG_PPC64 */
903 
904 /*
905  * Set up a signal frame for a "real-time" signal handler
906  * (one which gets siginfo).
907  */
908 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
909 		       struct task_struct *tsk)
910 {
911 	struct rt_sigframe __user *rt_sf;
912 	struct mcontext __user *frame;
913 	struct mcontext __user *tm_frame = NULL;
914 	void __user *addr;
915 	unsigned long newsp = 0;
916 	int sigret;
917 	unsigned long tramp;
918 	struct pt_regs *regs = tsk->thread.regs;
919 
920 	BUG_ON(tsk != current);
921 
922 	/* Set up Signal Frame */
923 	/* Put a Real Time Context onto stack */
924 	rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
925 	addr = rt_sf;
926 	if (unlikely(rt_sf == NULL))
927 		goto badframe;
928 
929 	/* Put the siginfo & fill in most of the ucontext */
930 	if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
931 	    || __put_user(0, &rt_sf->uc.uc_flags)
932 	    || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
933 	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
934 		    &rt_sf->uc.uc_regs)
935 	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
936 		goto badframe;
937 
938 	/* Save user registers on the stack */
939 	frame = &rt_sf->uc.uc_mcontext;
940 	addr = frame;
941 	if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
942 		sigret = 0;
943 		tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
944 	} else {
945 		sigret = __NR_rt_sigreturn;
946 		tramp = (unsigned long) frame->tramp;
947 	}
948 
949 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
950 	tm_frame = &rt_sf->uc_transact.uc_mcontext;
951 	if (MSR_TM_ACTIVE(regs->msr)) {
952 		if (__put_user((unsigned long)&rt_sf->uc_transact,
953 			       &rt_sf->uc.uc_link) ||
954 		    __put_user((unsigned long)tm_frame,
955 			       &rt_sf->uc_transact.uc_regs))
956 			goto badframe;
957 		if (save_tm_user_regs(regs, frame, tm_frame, sigret))
958 			goto badframe;
959 	}
960 	else
961 #endif
962 	{
963 		if (__put_user(0, &rt_sf->uc.uc_link))
964 			goto badframe;
965 		if (save_user_regs(regs, frame, tm_frame, sigret, 1))
966 			goto badframe;
967 	}
968 	regs->link = tramp;
969 
970 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
971 
972 	/* create a stack frame for the caller of the handler */
973 	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
974 	addr = (void __user *)regs->gpr[1];
975 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
976 		goto badframe;
977 
978 	/* Fill registers for signal handler */
979 	regs->gpr[1] = newsp;
980 	regs->gpr[3] = ksig->sig;
981 	regs->gpr[4] = (unsigned long) &rt_sf->info;
982 	regs->gpr[5] = (unsigned long) &rt_sf->uc;
983 	regs->gpr[6] = (unsigned long) rt_sf;
984 	regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
985 	/* enter the signal handler in native-endian mode */
986 	regs->msr &= ~MSR_LE;
987 	regs->msr |= (MSR_KERNEL & MSR_LE);
988 	return 0;
989 
990 badframe:
991 	if (show_unhandled_signals)
992 		printk_ratelimited(KERN_INFO
993 				   "%s[%d]: bad frame in handle_rt_signal32: "
994 				   "%p nip %08lx lr %08lx\n",
995 				   tsk->comm, tsk->pid,
996 				   addr, regs->nip, regs->link);
997 
998 	return 1;
999 }
1000 
1001 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
1002 {
1003 	sigset_t set;
1004 	struct mcontext __user *mcp;
1005 
1006 	if (get_sigset_t(&set, &ucp->uc_sigmask))
1007 		return -EFAULT;
1008 #ifdef CONFIG_PPC64
1009 	{
1010 		u32 cmcp;
1011 
1012 		if (__get_user(cmcp, &ucp->uc_regs))
1013 			return -EFAULT;
1014 		mcp = (struct mcontext __user *)(u64)cmcp;
1015 		/* no need to check access_ok(mcp), since mcp < 4GB */
1016 	}
1017 #else
1018 	if (__get_user(mcp, &ucp->uc_regs))
1019 		return -EFAULT;
1020 	if (!access_ok(mcp, sizeof(*mcp)))
1021 		return -EFAULT;
1022 #endif
1023 	set_current_blocked(&set);
1024 	if (restore_user_regs(regs, mcp, sig))
1025 		return -EFAULT;
1026 
1027 	return 0;
1028 }
1029 
1030 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1031 static int do_setcontext_tm(struct ucontext __user *ucp,
1032 			    struct ucontext __user *tm_ucp,
1033 			    struct pt_regs *regs)
1034 {
1035 	sigset_t set;
1036 	struct mcontext __user *mcp;
1037 	struct mcontext __user *tm_mcp;
1038 	u32 cmcp;
1039 	u32 tm_cmcp;
1040 
1041 	if (get_sigset_t(&set, &ucp->uc_sigmask))
1042 		return -EFAULT;
1043 
1044 	if (__get_user(cmcp, &ucp->uc_regs) ||
1045 	    __get_user(tm_cmcp, &tm_ucp->uc_regs))
1046 		return -EFAULT;
1047 	mcp = (struct mcontext __user *)(u64)cmcp;
1048 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1049 	/* no need to check access_ok(mcp), since mcp < 4GB */
1050 
1051 	set_current_blocked(&set);
1052 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
1053 		return -EFAULT;
1054 
1055 	return 0;
1056 }
1057 #endif
1058 
1059 #ifdef CONFIG_PPC64
1060 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1061 		       struct ucontext __user *, new_ctx, int, ctx_size)
1062 #else
1063 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1064 		       struct ucontext __user *, new_ctx, long, ctx_size)
1065 #endif
1066 {
1067 	struct pt_regs *regs = current_pt_regs();
1068 	int ctx_has_vsx_region = 0;
1069 
1070 #ifdef CONFIG_PPC64
1071 	unsigned long new_msr = 0;
1072 
1073 	if (new_ctx) {
1074 		struct mcontext __user *mcp;
1075 		u32 cmcp;
1076 
1077 		/*
1078 		 * Get pointer to the real mcontext.  No need for
1079 		 * access_ok since we are dealing with compat
1080 		 * pointers.
1081 		 */
1082 		if (__get_user(cmcp, &new_ctx->uc_regs))
1083 			return -EFAULT;
1084 		mcp = (struct mcontext __user *)(u64)cmcp;
1085 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1086 			return -EFAULT;
1087 	}
1088 	/*
1089 	 * Check that the context is not smaller than the original
1090 	 * size (with VMX but without VSX)
1091 	 */
1092 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1093 		return -EINVAL;
1094 	/*
1095 	 * If the new context state sets the MSR VSX bits but
1096 	 * it doesn't provide VSX state.
1097 	 */
1098 	if ((ctx_size < sizeof(struct ucontext)) &&
1099 	    (new_msr & MSR_VSX))
1100 		return -EINVAL;
1101 	/* Does the context have enough room to store VSX data? */
1102 	if (ctx_size >= sizeof(struct ucontext))
1103 		ctx_has_vsx_region = 1;
1104 #else
1105 	/* Context size is for future use. Right now, we only make sure
1106 	 * we are passed something we understand
1107 	 */
1108 	if (ctx_size < sizeof(struct ucontext))
1109 		return -EINVAL;
1110 #endif
1111 	if (old_ctx != NULL) {
1112 		struct mcontext __user *mctx;
1113 
1114 		/*
1115 		 * old_ctx might not be 16-byte aligned, in which
1116 		 * case old_ctx->uc_mcontext won't be either.
1117 		 * Because we have the old_ctx->uc_pad2 field
1118 		 * before old_ctx->uc_mcontext, we need to round down
1119 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1120 		 */
1121 		mctx = (struct mcontext __user *)
1122 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1123 		if (!access_ok(old_ctx, ctx_size)
1124 		    || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1125 		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
1126 		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1127 			return -EFAULT;
1128 	}
1129 	if (new_ctx == NULL)
1130 		return 0;
1131 	if (!access_ok(new_ctx, ctx_size) ||
1132 	    fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
1133 		return -EFAULT;
1134 
1135 	/*
1136 	 * If we get a fault copying the context into the kernel's
1137 	 * image of the user's registers, we can't just return -EFAULT
1138 	 * because the user's registers will be corrupted.  For instance
1139 	 * the NIP value may have been updated but not some of the
1140 	 * other registers.  Given that we have done the access_ok
1141 	 * and successfully read the first and last bytes of the region
1142 	 * above, this should only happen in an out-of-memory situation
1143 	 * or if another thread unmaps the region containing the context.
1144 	 * We kill the task with a SIGSEGV in this situation.
1145 	 */
1146 	if (do_setcontext(new_ctx, regs, 0))
1147 		do_exit(SIGSEGV);
1148 
1149 	set_thread_flag(TIF_RESTOREALL);
1150 	return 0;
1151 }
1152 
1153 #ifdef CONFIG_PPC64
1154 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1155 #else
1156 SYSCALL_DEFINE0(rt_sigreturn)
1157 #endif
1158 {
1159 	struct rt_sigframe __user *rt_sf;
1160 	struct pt_regs *regs = current_pt_regs();
1161 	int tm_restore = 0;
1162 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1163 	struct ucontext __user *uc_transact;
1164 	unsigned long msr_hi;
1165 	unsigned long tmp;
1166 #endif
1167 	/* Always make any pending restarted system calls return -EINTR */
1168 	current->restart_block.fn = do_no_restart_syscall;
1169 
1170 	rt_sf = (struct rt_sigframe __user *)
1171 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1172 	if (!access_ok(rt_sf, sizeof(*rt_sf)))
1173 		goto bad;
1174 
1175 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1176 	/*
1177 	 * If there is a transactional state then throw it away.
1178 	 * The purpose of a sigreturn is to destroy all traces of the
1179 	 * signal frame, this includes any transactional state created
1180 	 * within in. We only check for suspended as we can never be
1181 	 * active in the kernel, we are active, there is nothing better to
1182 	 * do than go ahead and Bad Thing later.
1183 	 * The cause is not important as there will never be a
1184 	 * recheckpoint so it's not user visible.
1185 	 */
1186 	if (MSR_TM_SUSPENDED(mfmsr()))
1187 		tm_reclaim_current(0);
1188 
1189 	if (__get_user(tmp, &rt_sf->uc.uc_link))
1190 		goto bad;
1191 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1192 	if (uc_transact) {
1193 		u32 cmcp;
1194 		struct mcontext __user *mcp;
1195 
1196 		if (__get_user(cmcp, &uc_transact->uc_regs))
1197 			return -EFAULT;
1198 		mcp = (struct mcontext __user *)(u64)cmcp;
1199 		/* The top 32 bits of the MSR are stashed in the transactional
1200 		 * ucontext. */
1201 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1202 			goto bad;
1203 
1204 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1205 			/* We only recheckpoint on return if we're
1206 			 * transaction.
1207 			 */
1208 			tm_restore = 1;
1209 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1210 				goto bad;
1211 		}
1212 	}
1213 	if (!tm_restore) {
1214 		/*
1215 		 * Unset regs->msr because ucontext MSR TS is not
1216 		 * set, and recheckpoint was not called. This avoid
1217 		 * hitting a TM Bad thing at RFID
1218 		 */
1219 		regs->msr &= ~MSR_TS_MASK;
1220 	}
1221 	/* Fall through, for non-TM restore */
1222 #endif
1223 	if (!tm_restore)
1224 		if (do_setcontext(&rt_sf->uc, regs, 1))
1225 			goto bad;
1226 
1227 	/*
1228 	 * It's not clear whether or why it is desirable to save the
1229 	 * sigaltstack setting on signal delivery and restore it on
1230 	 * signal return.  But other architectures do this and we have
1231 	 * always done it up until now so it is probably better not to
1232 	 * change it.  -- paulus
1233 	 */
1234 #ifdef CONFIG_PPC64
1235 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1236 		goto bad;
1237 #else
1238 	if (restore_altstack(&rt_sf->uc.uc_stack))
1239 		goto bad;
1240 #endif
1241 	set_thread_flag(TIF_RESTOREALL);
1242 	return 0;
1243 
1244  bad:
1245 	if (show_unhandled_signals)
1246 		printk_ratelimited(KERN_INFO
1247 				   "%s[%d]: bad frame in sys_rt_sigreturn: "
1248 				   "%p nip %08lx lr %08lx\n",
1249 				   current->comm, current->pid,
1250 				   rt_sf, regs->nip, regs->link);
1251 
1252 	force_sig(SIGSEGV, current);
1253 	return 0;
1254 }
1255 
1256 #ifdef CONFIG_PPC32
1257 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1258 			 int, ndbg, struct sig_dbg_op __user *, dbg)
1259 {
1260 	struct pt_regs *regs = current_pt_regs();
1261 	struct sig_dbg_op op;
1262 	int i;
1263 	unsigned long new_msr = regs->msr;
1264 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1265 	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1266 #endif
1267 
1268 	for (i=0; i<ndbg; i++) {
1269 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1270 			return -EFAULT;
1271 		switch (op.dbg_type) {
1272 		case SIG_DBG_SINGLE_STEPPING:
1273 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1274 			if (op.dbg_value) {
1275 				new_msr |= MSR_DE;
1276 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1277 			} else {
1278 				new_dbcr0 &= ~DBCR0_IC;
1279 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1280 						current->thread.debug.dbcr1)) {
1281 					new_msr &= ~MSR_DE;
1282 					new_dbcr0 &= ~DBCR0_IDM;
1283 				}
1284 			}
1285 #else
1286 			if (op.dbg_value)
1287 				new_msr |= MSR_SE;
1288 			else
1289 				new_msr &= ~MSR_SE;
1290 #endif
1291 			break;
1292 		case SIG_DBG_BRANCH_TRACING:
1293 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1294 			return -EINVAL;
1295 #else
1296 			if (op.dbg_value)
1297 				new_msr |= MSR_BE;
1298 			else
1299 				new_msr &= ~MSR_BE;
1300 #endif
1301 			break;
1302 
1303 		default:
1304 			return -EINVAL;
1305 		}
1306 	}
1307 
1308 	/* We wait until here to actually install the values in the
1309 	   registers so if we fail in the above loop, it will not
1310 	   affect the contents of these registers.  After this point,
1311 	   failure is a problem, anyway, and it's very unlikely unless
1312 	   the user is really doing something wrong. */
1313 	regs->msr = new_msr;
1314 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1315 	current->thread.debug.dbcr0 = new_dbcr0;
1316 #endif
1317 
1318 	if (!access_ok(ctx, sizeof(*ctx)) ||
1319 	    fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1320 		return -EFAULT;
1321 
1322 	/*
1323 	 * If we get a fault copying the context into the kernel's
1324 	 * image of the user's registers, we can't just return -EFAULT
1325 	 * because the user's registers will be corrupted.  For instance
1326 	 * the NIP value may have been updated but not some of the
1327 	 * other registers.  Given that we have done the access_ok
1328 	 * and successfully read the first and last bytes of the region
1329 	 * above, this should only happen in an out-of-memory situation
1330 	 * or if another thread unmaps the region containing the context.
1331 	 * We kill the task with a SIGSEGV in this situation.
1332 	 */
1333 	if (do_setcontext(ctx, regs, 1)) {
1334 		if (show_unhandled_signals)
1335 			printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1336 					   "sys_debug_setcontext: %p nip %08lx "
1337 					   "lr %08lx\n",
1338 					   current->comm, current->pid,
1339 					   ctx, regs->nip, regs->link);
1340 
1341 		force_sig(SIGSEGV, current);
1342 		goto out;
1343 	}
1344 
1345 	/*
1346 	 * It's not clear whether or why it is desirable to save the
1347 	 * sigaltstack setting on signal delivery and restore it on
1348 	 * signal return.  But other architectures do this and we have
1349 	 * always done it up until now so it is probably better not to
1350 	 * change it.  -- paulus
1351 	 */
1352 	restore_altstack(&ctx->uc_stack);
1353 
1354 	set_thread_flag(TIF_RESTOREALL);
1355  out:
1356 	return 0;
1357 }
1358 #endif
1359 
1360 /*
1361  * OK, we're invoking a handler
1362  */
1363 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1364 		struct task_struct *tsk)
1365 {
1366 	struct sigcontext __user *sc;
1367 	struct sigframe __user *frame;
1368 	struct mcontext __user *tm_mctx = NULL;
1369 	unsigned long newsp = 0;
1370 	int sigret;
1371 	unsigned long tramp;
1372 	struct pt_regs *regs = tsk->thread.regs;
1373 
1374 	BUG_ON(tsk != current);
1375 
1376 	/* Set up Signal Frame */
1377 	frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1378 	if (unlikely(frame == NULL))
1379 		goto badframe;
1380 	sc = (struct sigcontext __user *) &frame->sctx;
1381 
1382 #if _NSIG != 64
1383 #error "Please adjust handle_signal()"
1384 #endif
1385 	if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1386 	    || __put_user(oldset->sig[0], &sc->oldmask)
1387 #ifdef CONFIG_PPC64
1388 	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1389 #else
1390 	    || __put_user(oldset->sig[1], &sc->_unused[3])
1391 #endif
1392 	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1393 	    || __put_user(ksig->sig, &sc->signal))
1394 		goto badframe;
1395 
1396 	if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1397 		sigret = 0;
1398 		tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1399 	} else {
1400 		sigret = __NR_sigreturn;
1401 		tramp = (unsigned long) frame->mctx.tramp;
1402 	}
1403 
1404 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1405 	tm_mctx = &frame->mctx_transact;
1406 	if (MSR_TM_ACTIVE(regs->msr)) {
1407 		if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1408 				      sigret))
1409 			goto badframe;
1410 	}
1411 	else
1412 #endif
1413 	{
1414 		if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1415 			goto badframe;
1416 	}
1417 
1418 	regs->link = tramp;
1419 
1420 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
1421 
1422 	/* create a stack frame for the caller of the handler */
1423 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1424 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1425 		goto badframe;
1426 
1427 	regs->gpr[1] = newsp;
1428 	regs->gpr[3] = ksig->sig;
1429 	regs->gpr[4] = (unsigned long) sc;
1430 	regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1431 	/* enter the signal handler in big-endian mode */
1432 	regs->msr &= ~MSR_LE;
1433 	return 0;
1434 
1435 badframe:
1436 	if (show_unhandled_signals)
1437 		printk_ratelimited(KERN_INFO
1438 				   "%s[%d]: bad frame in handle_signal32: "
1439 				   "%p nip %08lx lr %08lx\n",
1440 				   tsk->comm, tsk->pid,
1441 				   frame, regs->nip, regs->link);
1442 
1443 	return 1;
1444 }
1445 
1446 /*
1447  * Do a signal return; undo the signal stack.
1448  */
1449 #ifdef CONFIG_PPC64
1450 COMPAT_SYSCALL_DEFINE0(sigreturn)
1451 #else
1452 SYSCALL_DEFINE0(sigreturn)
1453 #endif
1454 {
1455 	struct pt_regs *regs = current_pt_regs();
1456 	struct sigframe __user *sf;
1457 	struct sigcontext __user *sc;
1458 	struct sigcontext sigctx;
1459 	struct mcontext __user *sr;
1460 	void __user *addr;
1461 	sigset_t set;
1462 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1463 	struct mcontext __user *mcp, *tm_mcp;
1464 	unsigned long msr_hi;
1465 #endif
1466 
1467 	/* Always make any pending restarted system calls return -EINTR */
1468 	current->restart_block.fn = do_no_restart_syscall;
1469 
1470 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1471 	sc = &sf->sctx;
1472 	addr = sc;
1473 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1474 		goto badframe;
1475 
1476 #ifdef CONFIG_PPC64
1477 	/*
1478 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1479 	 * unused part of the signal stackframe
1480 	 */
1481 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1482 #else
1483 	set.sig[0] = sigctx.oldmask;
1484 	set.sig[1] = sigctx._unused[3];
1485 #endif
1486 	set_current_blocked(&set);
1487 
1488 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1489 	mcp = (struct mcontext __user *)&sf->mctx;
1490 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1491 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1492 		goto badframe;
1493 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
1494 		if (!cpu_has_feature(CPU_FTR_TM))
1495 			goto badframe;
1496 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
1497 			goto badframe;
1498 	} else
1499 #endif
1500 	{
1501 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1502 		addr = sr;
1503 		if (!access_ok(sr, sizeof(*sr))
1504 		    || restore_user_regs(regs, sr, 1))
1505 			goto badframe;
1506 	}
1507 
1508 	set_thread_flag(TIF_RESTOREALL);
1509 	return 0;
1510 
1511 badframe:
1512 	if (show_unhandled_signals)
1513 		printk_ratelimited(KERN_INFO
1514 				   "%s[%d]: bad frame in sys_sigreturn: "
1515 				   "%p nip %08lx lr %08lx\n",
1516 				   current->comm, current->pid,
1517 				   addr, regs->nip, regs->link);
1518 
1519 	force_sig(SIGSEGV, current);
1520 	return 0;
1521 }
1522