xref: /openbmc/linux/arch/powerpc/kernel/signal_32.c (revision f7777dcc)
1 /*
2  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3  *
4  *  PowerPC version
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  * Copyright (C) 2001 IBM
7  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9  *
10  *  Derived from "arch/i386/kernel/signal.c"
11  *    Copyright (C) 1991, 1992 Linus Torvalds
12  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
13  *
14  *  This program is free software; you can redistribute it and/or
15  *  modify it under the terms of the GNU General Public License
16  *  as published by the Free Software Foundation; either version
17  *  2 of the License, or (at your option) any later version.
18  */
19 
20 #include <linux/sched.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/kernel.h>
24 #include <linux/signal.h>
25 #include <linux/errno.h>
26 #include <linux/elf.h>
27 #include <linux/ptrace.h>
28 #include <linux/ratelimit.h>
29 #ifdef CONFIG_PPC64
30 #include <linux/syscalls.h>
31 #include <linux/compat.h>
32 #else
33 #include <linux/wait.h>
34 #include <linux/unistd.h>
35 #include <linux/stddef.h>
36 #include <linux/tty.h>
37 #include <linux/binfmts.h>
38 #endif
39 
40 #include <asm/uaccess.h>
41 #include <asm/cacheflush.h>
42 #include <asm/syscalls.h>
43 #include <asm/sigcontext.h>
44 #include <asm/vdso.h>
45 #include <asm/switch_to.h>
46 #include <asm/tm.h>
47 #ifdef CONFIG_PPC64
48 #include "ppc32.h"
49 #include <asm/unistd.h>
50 #else
51 #include <asm/ucontext.h>
52 #include <asm/pgtable.h>
53 #endif
54 
55 #include "signal.h"
56 
57 #undef DEBUG_SIG
58 
59 #ifdef CONFIG_PPC64
60 #define sys_rt_sigreturn	compat_sys_rt_sigreturn
61 #define sys_swapcontext	compat_sys_swapcontext
62 #define sys_sigreturn	compat_sys_sigreturn
63 
64 #define old_sigaction	old_sigaction32
65 #define sigcontext	sigcontext32
66 #define mcontext	mcontext32
67 #define ucontext	ucontext32
68 
69 #define __save_altstack __compat_save_altstack
70 
71 /*
72  * Userspace code may pass a ucontext which doesn't include VSX added
73  * at the end.  We need to check for this case.
74  */
75 #define UCONTEXTSIZEWITHOUTVSX \
76 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
77 
78 /*
79  * Returning 0 means we return to userspace via
80  * ret_from_except and thus restore all user
81  * registers from *regs.  This is what we need
82  * to do when a signal has been delivered.
83  */
84 
85 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
86 #undef __SIGNAL_FRAMESIZE
87 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
88 #undef ELF_NVRREG
89 #define ELF_NVRREG	ELF_NVRREG32
90 
91 /*
92  * Functions for flipping sigsets (thanks to brain dead generic
93  * implementation that makes things simple for little endian only)
94  */
95 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
96 {
97 	compat_sigset_t	cset;
98 
99 	switch (_NSIG_WORDS) {
100 	case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
101 		cset.sig[7] = set->sig[3] >> 32;
102 	case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
103 		cset.sig[5] = set->sig[2] >> 32;
104 	case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
105 		cset.sig[3] = set->sig[1] >> 32;
106 	case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
107 		cset.sig[1] = set->sig[0] >> 32;
108 	}
109 	return copy_to_user(uset, &cset, sizeof(*uset));
110 }
111 
112 static inline int get_sigset_t(sigset_t *set,
113 			       const compat_sigset_t __user *uset)
114 {
115 	compat_sigset_t s32;
116 
117 	if (copy_from_user(&s32, uset, sizeof(*uset)))
118 		return -EFAULT;
119 
120 	/*
121 	 * Swap the 2 words of the 64-bit sigset_t (they are stored
122 	 * in the "wrong" endian in 32-bit user storage).
123 	 */
124 	switch (_NSIG_WORDS) {
125 	case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
126 	case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
127 	case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
128 	case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
129 	}
130 	return 0;
131 }
132 
133 #define to_user_ptr(p)		ptr_to_compat(p)
134 #define from_user_ptr(p)	compat_ptr(p)
135 
136 static inline int save_general_regs(struct pt_regs *regs,
137 		struct mcontext __user *frame)
138 {
139 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
140 	int i;
141 
142 	WARN_ON(!FULL_REGS(regs));
143 
144 	for (i = 0; i <= PT_RESULT; i ++) {
145 		if (i == 14 && !FULL_REGS(regs))
146 			i = 32;
147 		if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
148 			return -EFAULT;
149 	}
150 	return 0;
151 }
152 
153 static inline int restore_general_regs(struct pt_regs *regs,
154 		struct mcontext __user *sr)
155 {
156 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
157 	int i;
158 
159 	for (i = 0; i <= PT_RESULT; i++) {
160 		if ((i == PT_MSR) || (i == PT_SOFTE))
161 			continue;
162 		if (__get_user(gregs[i], &sr->mc_gregs[i]))
163 			return -EFAULT;
164 	}
165 	return 0;
166 }
167 
168 #else /* CONFIG_PPC64 */
169 
170 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
171 
172 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
173 {
174 	return copy_to_user(uset, set, sizeof(*uset));
175 }
176 
177 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
178 {
179 	return copy_from_user(set, uset, sizeof(*uset));
180 }
181 
182 #define to_user_ptr(p)		((unsigned long)(p))
183 #define from_user_ptr(p)	((void __user *)(p))
184 
185 static inline int save_general_regs(struct pt_regs *regs,
186 		struct mcontext __user *frame)
187 {
188 	WARN_ON(!FULL_REGS(regs));
189 	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
190 }
191 
192 static inline int restore_general_regs(struct pt_regs *regs,
193 		struct mcontext __user *sr)
194 {
195 	/* copy up to but not including MSR */
196 	if (__copy_from_user(regs, &sr->mc_gregs,
197 				PT_MSR * sizeof(elf_greg_t)))
198 		return -EFAULT;
199 	/* copy from orig_r3 (the word after the MSR) up to the end */
200 	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
201 				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
202 		return -EFAULT;
203 	return 0;
204 }
205 #endif
206 
207 /*
208  * When we have signals to deliver, we set up on the
209  * user stack, going down from the original stack pointer:
210  *	an ABI gap of 56 words
211  *	an mcontext struct
212  *	a sigcontext struct
213  *	a gap of __SIGNAL_FRAMESIZE bytes
214  *
215  * Each of these things must be a multiple of 16 bytes in size. The following
216  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
217  *
218  */
219 struct sigframe {
220 	struct sigcontext sctx;		/* the sigcontext */
221 	struct mcontext	mctx;		/* all the register values */
222 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
223 	struct sigcontext sctx_transact;
224 	struct mcontext	mctx_transact;
225 #endif
226 	/*
227 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
228 	 * regs and 18 fp regs below sp before decrementing it.
229 	 */
230 	int			abigap[56];
231 };
232 
233 /* We use the mc_pad field for the signal return trampoline. */
234 #define tramp	mc_pad
235 
236 /*
237  *  When we have rt signals to deliver, we set up on the
238  *  user stack, going down from the original stack pointer:
239  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
240  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
241  *  (the +16 is to get the siginfo and ucontext in the same
242  *  positions as in older kernels).
243  *
244  *  Each of these things must be a multiple of 16 bytes in size.
245  *
246  */
247 struct rt_sigframe {
248 #ifdef CONFIG_PPC64
249 	compat_siginfo_t info;
250 #else
251 	struct siginfo info;
252 #endif
253 	struct ucontext	uc;
254 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
255 	struct ucontext	uc_transact;
256 #endif
257 	/*
258 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
259 	 * regs and 18 fp regs below sp before decrementing it.
260 	 */
261 	int			abigap[56];
262 };
263 
264 #ifdef CONFIG_VSX
265 unsigned long copy_fpr_to_user(void __user *to,
266 			       struct task_struct *task)
267 {
268 	double buf[ELF_NFPREG];
269 	int i;
270 
271 	/* save FPR copy to local buffer then write to the thread_struct */
272 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
273 		buf[i] = task->thread.TS_FPR(i);
274 	memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
275 	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
276 }
277 
278 unsigned long copy_fpr_from_user(struct task_struct *task,
279 				 void __user *from)
280 {
281 	double buf[ELF_NFPREG];
282 	int i;
283 
284 	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
285 		return 1;
286 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
287 		task->thread.TS_FPR(i) = buf[i];
288 	memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
289 
290 	return 0;
291 }
292 
293 unsigned long copy_vsx_to_user(void __user *to,
294 			       struct task_struct *task)
295 {
296 	double buf[ELF_NVSRHALFREG];
297 	int i;
298 
299 	/* save FPR copy to local buffer then write to the thread_struct */
300 	for (i = 0; i < ELF_NVSRHALFREG; i++)
301 		buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
302 	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
303 }
304 
305 unsigned long copy_vsx_from_user(struct task_struct *task,
306 				 void __user *from)
307 {
308 	double buf[ELF_NVSRHALFREG];
309 	int i;
310 
311 	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
312 		return 1;
313 	for (i = 0; i < ELF_NVSRHALFREG ; i++)
314 		task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
315 	return 0;
316 }
317 
318 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
319 unsigned long copy_transact_fpr_to_user(void __user *to,
320 				  struct task_struct *task)
321 {
322 	double buf[ELF_NFPREG];
323 	int i;
324 
325 	/* save FPR copy to local buffer then write to the thread_struct */
326 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
327 		buf[i] = task->thread.TS_TRANS_FPR(i);
328 	memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double));
329 	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
330 }
331 
332 unsigned long copy_transact_fpr_from_user(struct task_struct *task,
333 					  void __user *from)
334 {
335 	double buf[ELF_NFPREG];
336 	int i;
337 
338 	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
339 		return 1;
340 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
341 		task->thread.TS_TRANS_FPR(i) = buf[i];
342 	memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double));
343 
344 	return 0;
345 }
346 
347 unsigned long copy_transact_vsx_to_user(void __user *to,
348 				  struct task_struct *task)
349 {
350 	double buf[ELF_NVSRHALFREG];
351 	int i;
352 
353 	/* save FPR copy to local buffer then write to the thread_struct */
354 	for (i = 0; i < ELF_NVSRHALFREG; i++)
355 		buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET];
356 	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
357 }
358 
359 unsigned long copy_transact_vsx_from_user(struct task_struct *task,
360 					  void __user *from)
361 {
362 	double buf[ELF_NVSRHALFREG];
363 	int i;
364 
365 	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
366 		return 1;
367 	for (i = 0; i < ELF_NVSRHALFREG ; i++)
368 		task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i];
369 	return 0;
370 }
371 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
372 #else
373 inline unsigned long copy_fpr_to_user(void __user *to,
374 				      struct task_struct *task)
375 {
376 	return __copy_to_user(to, task->thread.fpr,
377 			      ELF_NFPREG * sizeof(double));
378 }
379 
380 inline unsigned long copy_fpr_from_user(struct task_struct *task,
381 					void __user *from)
382 {
383 	return __copy_from_user(task->thread.fpr, from,
384 			      ELF_NFPREG * sizeof(double));
385 }
386 
387 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
388 inline unsigned long copy_transact_fpr_to_user(void __user *to,
389 					 struct task_struct *task)
390 {
391 	return __copy_to_user(to, task->thread.transact_fpr,
392 			      ELF_NFPREG * sizeof(double));
393 }
394 
395 inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
396 						 void __user *from)
397 {
398 	return __copy_from_user(task->thread.transact_fpr, from,
399 				ELF_NFPREG * sizeof(double));
400 }
401 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
402 #endif
403 
404 /*
405  * Save the current user registers on the user stack.
406  * We only save the altivec/spe registers if the process has used
407  * altivec/spe instructions at some point.
408  */
409 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
410 			  struct mcontext __user *tm_frame, int sigret,
411 			  int ctx_has_vsx_region)
412 {
413 	unsigned long msr = regs->msr;
414 
415 	/* Make sure floating point registers are stored in regs */
416 	flush_fp_to_thread(current);
417 
418 	/* save general registers */
419 	if (save_general_regs(regs, frame))
420 		return 1;
421 
422 #ifdef CONFIG_ALTIVEC
423 	/* save altivec registers */
424 	if (current->thread.used_vr) {
425 		flush_altivec_to_thread(current);
426 		if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
427 				   ELF_NVRREG * sizeof(vector128)))
428 			return 1;
429 		/* set MSR_VEC in the saved MSR value to indicate that
430 		   frame->mc_vregs contains valid data */
431 		msr |= MSR_VEC;
432 	}
433 	/* else assert((regs->msr & MSR_VEC) == 0) */
434 
435 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
436 	 * use altivec. Since VSCR only contains 32 bits saved in the least
437 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
438 	 * most significant bits of that same vector. --BenH
439 	 * Note that the current VRSAVE value is in the SPR at this point.
440 	 */
441 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
442 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
443 	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
444 		return 1;
445 #endif /* CONFIG_ALTIVEC */
446 	if (copy_fpr_to_user(&frame->mc_fregs, current))
447 		return 1;
448 #ifdef CONFIG_VSX
449 	/*
450 	 * Copy VSR 0-31 upper half from thread_struct to local
451 	 * buffer, then write that to userspace.  Also set MSR_VSX in
452 	 * the saved MSR value to indicate that frame->mc_vregs
453 	 * contains valid data
454 	 */
455 	if (current->thread.used_vsr && ctx_has_vsx_region) {
456 		__giveup_vsx(current);
457 		if (copy_vsx_to_user(&frame->mc_vsregs, current))
458 			return 1;
459 		msr |= MSR_VSX;
460 	}
461 #endif /* CONFIG_VSX */
462 #ifdef CONFIG_SPE
463 	/* save spe registers */
464 	if (current->thread.used_spe) {
465 		flush_spe_to_thread(current);
466 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
467 				   ELF_NEVRREG * sizeof(u32)))
468 			return 1;
469 		/* set MSR_SPE in the saved MSR value to indicate that
470 		   frame->mc_vregs contains valid data */
471 		msr |= MSR_SPE;
472 	}
473 	/* else assert((regs->msr & MSR_SPE) == 0) */
474 
475 	/* We always copy to/from spefscr */
476 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
477 		return 1;
478 #endif /* CONFIG_SPE */
479 
480 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
481 		return 1;
482 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
483 	 * can check it on the restore to see if TM is active
484 	 */
485 	if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
486 		return 1;
487 
488 	if (sigret) {
489 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
490 		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
491 		    || __put_user(0x44000002UL, &frame->tramp[1]))
492 			return 1;
493 		flush_icache_range((unsigned long) &frame->tramp[0],
494 				   (unsigned long) &frame->tramp[2]);
495 	}
496 
497 	return 0;
498 }
499 
500 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
501 /*
502  * Save the current user registers on the user stack.
503  * We only save the altivec/spe registers if the process has used
504  * altivec/spe instructions at some point.
505  * We also save the transactional registers to a second ucontext in the
506  * frame.
507  *
508  * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
509  */
510 static int save_tm_user_regs(struct pt_regs *regs,
511 			     struct mcontext __user *frame,
512 			     struct mcontext __user *tm_frame, int sigret)
513 {
514 	unsigned long msr = regs->msr;
515 
516 	/* Make sure floating point registers are stored in regs */
517 	flush_fp_to_thread(current);
518 
519 	/* Save both sets of general registers */
520 	if (save_general_regs(&current->thread.ckpt_regs, frame)
521 	    || save_general_regs(regs, tm_frame))
522 		return 1;
523 
524 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
525 	 * of the transactional mcontext.  This way we have a backward-compatible
526 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
527 	 * also look at what type of transaction (T or S) was active at the
528 	 * time of the signal.
529 	 */
530 	if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
531 		return 1;
532 
533 #ifdef CONFIG_ALTIVEC
534 	/* save altivec registers */
535 	if (current->thread.used_vr) {
536 		flush_altivec_to_thread(current);
537 		if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
538 				   ELF_NVRREG * sizeof(vector128)))
539 			return 1;
540 		if (msr & MSR_VEC) {
541 			if (__copy_to_user(&tm_frame->mc_vregs,
542 					   current->thread.transact_vr,
543 					   ELF_NVRREG * sizeof(vector128)))
544 				return 1;
545 		} else {
546 			if (__copy_to_user(&tm_frame->mc_vregs,
547 					   current->thread.vr,
548 					   ELF_NVRREG * sizeof(vector128)))
549 				return 1;
550 		}
551 
552 		/* set MSR_VEC in the saved MSR value to indicate that
553 		 * frame->mc_vregs contains valid data
554 		 */
555 		msr |= MSR_VEC;
556 	}
557 
558 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
559 	 * use altivec. Since VSCR only contains 32 bits saved in the least
560 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
561 	 * most significant bits of that same vector. --BenH
562 	 */
563 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
564 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
565 	if (__put_user(current->thread.vrsave,
566 		       (u32 __user *)&frame->mc_vregs[32]))
567 		return 1;
568 	if (msr & MSR_VEC) {
569 		if (__put_user(current->thread.transact_vrsave,
570 			       (u32 __user *)&tm_frame->mc_vregs[32]))
571 			return 1;
572 	} else {
573 		if (__put_user(current->thread.vrsave,
574 			       (u32 __user *)&tm_frame->mc_vregs[32]))
575 			return 1;
576 	}
577 #endif /* CONFIG_ALTIVEC */
578 
579 	if (copy_fpr_to_user(&frame->mc_fregs, current))
580 		return 1;
581 	if (msr & MSR_FP) {
582 		if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current))
583 			return 1;
584 	} else {
585 		if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
586 			return 1;
587 	}
588 
589 #ifdef CONFIG_VSX
590 	/*
591 	 * Copy VSR 0-31 upper half from thread_struct to local
592 	 * buffer, then write that to userspace.  Also set MSR_VSX in
593 	 * the saved MSR value to indicate that frame->mc_vregs
594 	 * contains valid data
595 	 */
596 	if (current->thread.used_vsr) {
597 		__giveup_vsx(current);
598 		if (copy_vsx_to_user(&frame->mc_vsregs, current))
599 			return 1;
600 		if (msr & MSR_VSX) {
601 			if (copy_transact_vsx_to_user(&tm_frame->mc_vsregs,
602 						      current))
603 				return 1;
604 		} else {
605 			if (copy_vsx_to_user(&tm_frame->mc_vsregs, current))
606 				return 1;
607 		}
608 
609 		msr |= MSR_VSX;
610 	}
611 #endif /* CONFIG_VSX */
612 #ifdef CONFIG_SPE
613 	/* SPE regs are not checkpointed with TM, so this section is
614 	 * simply the same as in save_user_regs().
615 	 */
616 	if (current->thread.used_spe) {
617 		flush_spe_to_thread(current);
618 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
619 				   ELF_NEVRREG * sizeof(u32)))
620 			return 1;
621 		/* set MSR_SPE in the saved MSR value to indicate that
622 		 * frame->mc_vregs contains valid data */
623 		msr |= MSR_SPE;
624 	}
625 
626 	/* We always copy to/from spefscr */
627 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
628 		return 1;
629 #endif /* CONFIG_SPE */
630 
631 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
632 		return 1;
633 	if (sigret) {
634 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
635 		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
636 		    || __put_user(0x44000002UL, &frame->tramp[1]))
637 			return 1;
638 		flush_icache_range((unsigned long) &frame->tramp[0],
639 				   (unsigned long) &frame->tramp[2]);
640 	}
641 
642 	return 0;
643 }
644 #endif
645 
646 /*
647  * Restore the current user register values from the user stack,
648  * (except for MSR).
649  */
650 static long restore_user_regs(struct pt_regs *regs,
651 			      struct mcontext __user *sr, int sig)
652 {
653 	long err;
654 	unsigned int save_r2 = 0;
655 	unsigned long msr;
656 #ifdef CONFIG_VSX
657 	int i;
658 #endif
659 
660 	/*
661 	 * restore general registers but not including MSR or SOFTE. Also
662 	 * take care of keeping r2 (TLS) intact if not a signal
663 	 */
664 	if (!sig)
665 		save_r2 = (unsigned int)regs->gpr[2];
666 	err = restore_general_regs(regs, sr);
667 	regs->trap = 0;
668 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
669 	if (!sig)
670 		regs->gpr[2] = (unsigned long) save_r2;
671 	if (err)
672 		return 1;
673 
674 	/* if doing signal return, restore the previous little-endian mode */
675 	if (sig)
676 		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
677 
678 	/*
679 	 * Do this before updating the thread state in
680 	 * current->thread.fpr/vr/evr.  That way, if we get preempted
681 	 * and another task grabs the FPU/Altivec/SPE, it won't be
682 	 * tempted to save the current CPU state into the thread_struct
683 	 * and corrupt what we are writing there.
684 	 */
685 	discard_lazy_cpu_state();
686 
687 #ifdef CONFIG_ALTIVEC
688 	/*
689 	 * Force the process to reload the altivec registers from
690 	 * current->thread when it next does altivec instructions
691 	 */
692 	regs->msr &= ~MSR_VEC;
693 	if (msr & MSR_VEC) {
694 		/* restore altivec registers from the stack */
695 		if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
696 				     sizeof(sr->mc_vregs)))
697 			return 1;
698 	} else if (current->thread.used_vr)
699 		memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
700 
701 	/* Always get VRSAVE back */
702 	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
703 		return 1;
704 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
705 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
706 #endif /* CONFIG_ALTIVEC */
707 	if (copy_fpr_from_user(current, &sr->mc_fregs))
708 		return 1;
709 
710 #ifdef CONFIG_VSX
711 	/*
712 	 * Force the process to reload the VSX registers from
713 	 * current->thread when it next does VSX instruction.
714 	 */
715 	regs->msr &= ~MSR_VSX;
716 	if (msr & MSR_VSX) {
717 		/*
718 		 * Restore altivec registers from the stack to a local
719 		 * buffer, then write this out to the thread_struct
720 		 */
721 		if (copy_vsx_from_user(current, &sr->mc_vsregs))
722 			return 1;
723 	} else if (current->thread.used_vsr)
724 		for (i = 0; i < 32 ; i++)
725 			current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
726 #endif /* CONFIG_VSX */
727 	/*
728 	 * force the process to reload the FP registers from
729 	 * current->thread when it next does FP instructions
730 	 */
731 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
732 
733 #ifdef CONFIG_SPE
734 	/* force the process to reload the spe registers from
735 	   current->thread when it next does spe instructions */
736 	regs->msr &= ~MSR_SPE;
737 	if (msr & MSR_SPE) {
738 		/* restore spe registers from the stack */
739 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
740 				     ELF_NEVRREG * sizeof(u32)))
741 			return 1;
742 	} else if (current->thread.used_spe)
743 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
744 
745 	/* Always get SPEFSCR back */
746 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
747 		return 1;
748 #endif /* CONFIG_SPE */
749 
750 	return 0;
751 }
752 
753 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
754 /*
755  * Restore the current user register values from the user stack, except for
756  * MSR, and recheckpoint the original checkpointed register state for processes
757  * in transactions.
758  */
759 static long restore_tm_user_regs(struct pt_regs *regs,
760 				 struct mcontext __user *sr,
761 				 struct mcontext __user *tm_sr)
762 {
763 	long err;
764 	unsigned long msr, msr_hi;
765 #ifdef CONFIG_VSX
766 	int i;
767 #endif
768 
769 	/*
770 	 * restore general registers but not including MSR or SOFTE. Also
771 	 * take care of keeping r2 (TLS) intact if not a signal.
772 	 * See comment in signal_64.c:restore_tm_sigcontexts();
773 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
774 	 * were set by the signal delivery.
775 	 */
776 	err = restore_general_regs(regs, tm_sr);
777 	err |= restore_general_regs(&current->thread.ckpt_regs, sr);
778 
779 	err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
780 
781 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
782 	if (err)
783 		return 1;
784 
785 	/* Restore the previous little-endian mode */
786 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
787 
788 	/*
789 	 * Do this before updating the thread state in
790 	 * current->thread.fpr/vr/evr.  That way, if we get preempted
791 	 * and another task grabs the FPU/Altivec/SPE, it won't be
792 	 * tempted to save the current CPU state into the thread_struct
793 	 * and corrupt what we are writing there.
794 	 */
795 	discard_lazy_cpu_state();
796 
797 #ifdef CONFIG_ALTIVEC
798 	regs->msr &= ~MSR_VEC;
799 	if (msr & MSR_VEC) {
800 		/* restore altivec registers from the stack */
801 		if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
802 				     sizeof(sr->mc_vregs)) ||
803 		    __copy_from_user(current->thread.transact_vr,
804 				     &tm_sr->mc_vregs,
805 				     sizeof(sr->mc_vregs)))
806 			return 1;
807 	} else if (current->thread.used_vr) {
808 		memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
809 		memset(current->thread.transact_vr, 0,
810 		       ELF_NVRREG * sizeof(vector128));
811 	}
812 
813 	/* Always get VRSAVE back */
814 	if (__get_user(current->thread.vrsave,
815 		       (u32 __user *)&sr->mc_vregs[32]) ||
816 	    __get_user(current->thread.transact_vrsave,
817 		       (u32 __user *)&tm_sr->mc_vregs[32]))
818 		return 1;
819 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
820 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
821 #endif /* CONFIG_ALTIVEC */
822 
823 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
824 
825 	if (copy_fpr_from_user(current, &sr->mc_fregs) ||
826 	    copy_transact_fpr_from_user(current, &tm_sr->mc_fregs))
827 		return 1;
828 
829 #ifdef CONFIG_VSX
830 	regs->msr &= ~MSR_VSX;
831 	if (msr & MSR_VSX) {
832 		/*
833 		 * Restore altivec registers from the stack to a local
834 		 * buffer, then write this out to the thread_struct
835 		 */
836 		if (copy_vsx_from_user(current, &sr->mc_vsregs) ||
837 		    copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs))
838 			return 1;
839 	} else if (current->thread.used_vsr)
840 		for (i = 0; i < 32 ; i++) {
841 			current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
842 			current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
843 		}
844 #endif /* CONFIG_VSX */
845 
846 #ifdef CONFIG_SPE
847 	/* SPE regs are not checkpointed with TM, so this section is
848 	 * simply the same as in restore_user_regs().
849 	 */
850 	regs->msr &= ~MSR_SPE;
851 	if (msr & MSR_SPE) {
852 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
853 				     ELF_NEVRREG * sizeof(u32)))
854 			return 1;
855 	} else if (current->thread.used_spe)
856 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
857 
858 	/* Always get SPEFSCR back */
859 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
860 		       + ELF_NEVRREG))
861 		return 1;
862 #endif /* CONFIG_SPE */
863 
864 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
865 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
866 	 * transactional versions should be loaded.
867 	 */
868 	tm_enable();
869 	/* This loads the checkpointed FP/VEC state, if used */
870 	tm_recheckpoint(&current->thread, msr);
871 	/* Get the top half of the MSR */
872 	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
873 		return 1;
874 	/* Pull in MSR TM from user context */
875 	regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
876 
877 	/* This loads the speculative FP/VEC state, if used */
878 	if (msr & MSR_FP) {
879 		do_load_up_transact_fpu(&current->thread);
880 		regs->msr |= (MSR_FP | current->thread.fpexc_mode);
881 	}
882 #ifdef CONFIG_ALTIVEC
883 	if (msr & MSR_VEC) {
884 		do_load_up_transact_altivec(&current->thread);
885 		regs->msr |= MSR_VEC;
886 	}
887 #endif
888 
889 	return 0;
890 }
891 #endif
892 
893 #ifdef CONFIG_PPC64
894 int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
895 {
896 	int err;
897 
898 	if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
899 		return -EFAULT;
900 
901 	/* If you change siginfo_t structure, please be sure
902 	 * this code is fixed accordingly.
903 	 * It should never copy any pad contained in the structure
904 	 * to avoid security leaks, but must copy the generic
905 	 * 3 ints plus the relevant union member.
906 	 * This routine must convert siginfo from 64bit to 32bit as well
907 	 * at the same time.
908 	 */
909 	err = __put_user(s->si_signo, &d->si_signo);
910 	err |= __put_user(s->si_errno, &d->si_errno);
911 	err |= __put_user((short)s->si_code, &d->si_code);
912 	if (s->si_code < 0)
913 		err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
914 				      SI_PAD_SIZE32);
915 	else switch(s->si_code >> 16) {
916 	case __SI_CHLD >> 16:
917 		err |= __put_user(s->si_pid, &d->si_pid);
918 		err |= __put_user(s->si_uid, &d->si_uid);
919 		err |= __put_user(s->si_utime, &d->si_utime);
920 		err |= __put_user(s->si_stime, &d->si_stime);
921 		err |= __put_user(s->si_status, &d->si_status);
922 		break;
923 	case __SI_FAULT >> 16:
924 		err |= __put_user((unsigned int)(unsigned long)s->si_addr,
925 				  &d->si_addr);
926 		break;
927 	case __SI_POLL >> 16:
928 		err |= __put_user(s->si_band, &d->si_band);
929 		err |= __put_user(s->si_fd, &d->si_fd);
930 		break;
931 	case __SI_TIMER >> 16:
932 		err |= __put_user(s->si_tid, &d->si_tid);
933 		err |= __put_user(s->si_overrun, &d->si_overrun);
934 		err |= __put_user(s->si_int, &d->si_int);
935 		break;
936 	case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
937 	case __SI_MESGQ >> 16:
938 		err |= __put_user(s->si_int, &d->si_int);
939 		/* fallthrough */
940 	case __SI_KILL >> 16:
941 	default:
942 		err |= __put_user(s->si_pid, &d->si_pid);
943 		err |= __put_user(s->si_uid, &d->si_uid);
944 		break;
945 	}
946 	return err;
947 }
948 
949 #define copy_siginfo_to_user	copy_siginfo_to_user32
950 
951 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
952 {
953 	memset(to, 0, sizeof *to);
954 
955 	if (copy_from_user(to, from, 3*sizeof(int)) ||
956 	    copy_from_user(to->_sifields._pad,
957 			   from->_sifields._pad, SI_PAD_SIZE32))
958 		return -EFAULT;
959 
960 	return 0;
961 }
962 #endif /* CONFIG_PPC64 */
963 
964 /*
965  * Set up a signal frame for a "real-time" signal handler
966  * (one which gets siginfo).
967  */
968 int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
969 		siginfo_t *info, sigset_t *oldset,
970 		struct pt_regs *regs)
971 {
972 	struct rt_sigframe __user *rt_sf;
973 	struct mcontext __user *frame;
974 	struct mcontext __user *tm_frame = NULL;
975 	void __user *addr;
976 	unsigned long newsp = 0;
977 	int sigret;
978 	unsigned long tramp;
979 
980 	/* Set up Signal Frame */
981 	/* Put a Real Time Context onto stack */
982 	rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
983 	addr = rt_sf;
984 	if (unlikely(rt_sf == NULL))
985 		goto badframe;
986 
987 	/* Put the siginfo & fill in most of the ucontext */
988 	if (copy_siginfo_to_user(&rt_sf->info, info)
989 	    || __put_user(0, &rt_sf->uc.uc_flags)
990 	    || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
991 	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
992 		    &rt_sf->uc.uc_regs)
993 	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
994 		goto badframe;
995 
996 	/* Save user registers on the stack */
997 	frame = &rt_sf->uc.uc_mcontext;
998 	addr = frame;
999 	if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
1000 		sigret = 0;
1001 		tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
1002 	} else {
1003 		sigret = __NR_rt_sigreturn;
1004 		tramp = (unsigned long) frame->tramp;
1005 	}
1006 
1007 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1008 	tm_frame = &rt_sf->uc_transact.uc_mcontext;
1009 	if (MSR_TM_ACTIVE(regs->msr)) {
1010 		if (save_tm_user_regs(regs, frame, tm_frame, sigret))
1011 			goto badframe;
1012 	}
1013 	else
1014 #endif
1015 	{
1016 		if (save_user_regs(regs, frame, tm_frame, sigret, 1))
1017 			goto badframe;
1018 	}
1019 	regs->link = tramp;
1020 
1021 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1022 	if (MSR_TM_ACTIVE(regs->msr)) {
1023 		if (__put_user((unsigned long)&rt_sf->uc_transact,
1024 			       &rt_sf->uc.uc_link)
1025 		    || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs))
1026 			goto badframe;
1027 	}
1028 	else
1029 #endif
1030 		if (__put_user(0, &rt_sf->uc.uc_link))
1031 			goto badframe;
1032 
1033 	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
1034 
1035 	/* create a stack frame for the caller of the handler */
1036 	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
1037 	addr = (void __user *)regs->gpr[1];
1038 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1039 		goto badframe;
1040 
1041 	/* Fill registers for signal handler */
1042 	regs->gpr[1] = newsp;
1043 	regs->gpr[3] = sig;
1044 	regs->gpr[4] = (unsigned long) &rt_sf->info;
1045 	regs->gpr[5] = (unsigned long) &rt_sf->uc;
1046 	regs->gpr[6] = (unsigned long) rt_sf;
1047 	regs->nip = (unsigned long) ka->sa.sa_handler;
1048 	/* enter the signal handler in big-endian mode */
1049 	regs->msr &= ~MSR_LE;
1050 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1051 	/* Remove TM bits from thread's MSR.  The MSR in the sigcontext
1052 	 * just indicates to userland that we were doing a transaction, but we
1053 	 * don't want to return in transactional state:
1054 	 */
1055 	regs->msr &= ~MSR_TS_MASK;
1056 #endif
1057 	return 1;
1058 
1059 badframe:
1060 #ifdef DEBUG_SIG
1061 	printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
1062 	       regs, frame, newsp);
1063 #endif
1064 	if (show_unhandled_signals)
1065 		printk_ratelimited(KERN_INFO
1066 				   "%s[%d]: bad frame in handle_rt_signal32: "
1067 				   "%p nip %08lx lr %08lx\n",
1068 				   current->comm, current->pid,
1069 				   addr, regs->nip, regs->link);
1070 
1071 	force_sigsegv(sig, current);
1072 	return 0;
1073 }
1074 
1075 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
1076 {
1077 	sigset_t set;
1078 	struct mcontext __user *mcp;
1079 
1080 	if (get_sigset_t(&set, &ucp->uc_sigmask))
1081 		return -EFAULT;
1082 #ifdef CONFIG_PPC64
1083 	{
1084 		u32 cmcp;
1085 
1086 		if (__get_user(cmcp, &ucp->uc_regs))
1087 			return -EFAULT;
1088 		mcp = (struct mcontext __user *)(u64)cmcp;
1089 		/* no need to check access_ok(mcp), since mcp < 4GB */
1090 	}
1091 #else
1092 	if (__get_user(mcp, &ucp->uc_regs))
1093 		return -EFAULT;
1094 	if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
1095 		return -EFAULT;
1096 #endif
1097 	set_current_blocked(&set);
1098 	if (restore_user_regs(regs, mcp, sig))
1099 		return -EFAULT;
1100 
1101 	return 0;
1102 }
1103 
1104 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1105 static int do_setcontext_tm(struct ucontext __user *ucp,
1106 			    struct ucontext __user *tm_ucp,
1107 			    struct pt_regs *regs)
1108 {
1109 	sigset_t set;
1110 	struct mcontext __user *mcp;
1111 	struct mcontext __user *tm_mcp;
1112 	u32 cmcp;
1113 	u32 tm_cmcp;
1114 
1115 	if (get_sigset_t(&set, &ucp->uc_sigmask))
1116 		return -EFAULT;
1117 
1118 	if (__get_user(cmcp, &ucp->uc_regs) ||
1119 	    __get_user(tm_cmcp, &tm_ucp->uc_regs))
1120 		return -EFAULT;
1121 	mcp = (struct mcontext __user *)(u64)cmcp;
1122 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1123 	/* no need to check access_ok(mcp), since mcp < 4GB */
1124 
1125 	set_current_blocked(&set);
1126 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
1127 		return -EFAULT;
1128 
1129 	return 0;
1130 }
1131 #endif
1132 
1133 long sys_swapcontext(struct ucontext __user *old_ctx,
1134 		     struct ucontext __user *new_ctx,
1135 		     int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
1136 {
1137 	unsigned char tmp;
1138 	int ctx_has_vsx_region = 0;
1139 
1140 #ifdef CONFIG_PPC64
1141 	unsigned long new_msr = 0;
1142 
1143 	if (new_ctx) {
1144 		struct mcontext __user *mcp;
1145 		u32 cmcp;
1146 
1147 		/*
1148 		 * Get pointer to the real mcontext.  No need for
1149 		 * access_ok since we are dealing with compat
1150 		 * pointers.
1151 		 */
1152 		if (__get_user(cmcp, &new_ctx->uc_regs))
1153 			return -EFAULT;
1154 		mcp = (struct mcontext __user *)(u64)cmcp;
1155 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1156 			return -EFAULT;
1157 	}
1158 	/*
1159 	 * Check that the context is not smaller than the original
1160 	 * size (with VMX but without VSX)
1161 	 */
1162 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1163 		return -EINVAL;
1164 	/*
1165 	 * If the new context state sets the MSR VSX bits but
1166 	 * it doesn't provide VSX state.
1167 	 */
1168 	if ((ctx_size < sizeof(struct ucontext)) &&
1169 	    (new_msr & MSR_VSX))
1170 		return -EINVAL;
1171 	/* Does the context have enough room to store VSX data? */
1172 	if (ctx_size >= sizeof(struct ucontext))
1173 		ctx_has_vsx_region = 1;
1174 #else
1175 	/* Context size is for future use. Right now, we only make sure
1176 	 * we are passed something we understand
1177 	 */
1178 	if (ctx_size < sizeof(struct ucontext))
1179 		return -EINVAL;
1180 #endif
1181 	if (old_ctx != NULL) {
1182 		struct mcontext __user *mctx;
1183 
1184 		/*
1185 		 * old_ctx might not be 16-byte aligned, in which
1186 		 * case old_ctx->uc_mcontext won't be either.
1187 		 * Because we have the old_ctx->uc_pad2 field
1188 		 * before old_ctx->uc_mcontext, we need to round down
1189 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1190 		 */
1191 		mctx = (struct mcontext __user *)
1192 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1193 		if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
1194 		    || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1195 		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
1196 		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1197 			return -EFAULT;
1198 	}
1199 	if (new_ctx == NULL)
1200 		return 0;
1201 	if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
1202 	    || __get_user(tmp, (u8 __user *) new_ctx)
1203 	    || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
1204 		return -EFAULT;
1205 
1206 	/*
1207 	 * If we get a fault copying the context into the kernel's
1208 	 * image of the user's registers, we can't just return -EFAULT
1209 	 * because the user's registers will be corrupted.  For instance
1210 	 * the NIP value may have been updated but not some of the
1211 	 * other registers.  Given that we have done the access_ok
1212 	 * and successfully read the first and last bytes of the region
1213 	 * above, this should only happen in an out-of-memory situation
1214 	 * or if another thread unmaps the region containing the context.
1215 	 * We kill the task with a SIGSEGV in this situation.
1216 	 */
1217 	if (do_setcontext(new_ctx, regs, 0))
1218 		do_exit(SIGSEGV);
1219 
1220 	set_thread_flag(TIF_RESTOREALL);
1221 	return 0;
1222 }
1223 
1224 long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1225 		     struct pt_regs *regs)
1226 {
1227 	struct rt_sigframe __user *rt_sf;
1228 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1229 	struct ucontext __user *uc_transact;
1230 	unsigned long msr_hi;
1231 	unsigned long tmp;
1232 	int tm_restore = 0;
1233 #endif
1234 	/* Always make any pending restarted system calls return -EINTR */
1235 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
1236 
1237 	rt_sf = (struct rt_sigframe __user *)
1238 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1239 	if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1240 		goto bad;
1241 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1242 	if (__get_user(tmp, &rt_sf->uc.uc_link))
1243 		goto bad;
1244 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1245 	if (uc_transact) {
1246 		u32 cmcp;
1247 		struct mcontext __user *mcp;
1248 
1249 		if (__get_user(cmcp, &uc_transact->uc_regs))
1250 			return -EFAULT;
1251 		mcp = (struct mcontext __user *)(u64)cmcp;
1252 		/* The top 32 bits of the MSR are stashed in the transactional
1253 		 * ucontext. */
1254 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1255 			goto bad;
1256 
1257 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1258 			/* We only recheckpoint on return if we're
1259 			 * transaction.
1260 			 */
1261 			tm_restore = 1;
1262 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1263 				goto bad;
1264 		}
1265 	}
1266 	if (!tm_restore)
1267 		/* Fall through, for non-TM restore */
1268 #endif
1269 	if (do_setcontext(&rt_sf->uc, regs, 1))
1270 		goto bad;
1271 
1272 	/*
1273 	 * It's not clear whether or why it is desirable to save the
1274 	 * sigaltstack setting on signal delivery and restore it on
1275 	 * signal return.  But other architectures do this and we have
1276 	 * always done it up until now so it is probably better not to
1277 	 * change it.  -- paulus
1278 	 */
1279 #ifdef CONFIG_PPC64
1280 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1281 		goto bad;
1282 #else
1283 	if (restore_altstack(&rt_sf->uc.uc_stack))
1284 		goto bad;
1285 #endif
1286 	set_thread_flag(TIF_RESTOREALL);
1287 	return 0;
1288 
1289  bad:
1290 	if (show_unhandled_signals)
1291 		printk_ratelimited(KERN_INFO
1292 				   "%s[%d]: bad frame in sys_rt_sigreturn: "
1293 				   "%p nip %08lx lr %08lx\n",
1294 				   current->comm, current->pid,
1295 				   rt_sf, regs->nip, regs->link);
1296 
1297 	force_sig(SIGSEGV, current);
1298 	return 0;
1299 }
1300 
1301 #ifdef CONFIG_PPC32
1302 int sys_debug_setcontext(struct ucontext __user *ctx,
1303 			 int ndbg, struct sig_dbg_op __user *dbg,
1304 			 int r6, int r7, int r8,
1305 			 struct pt_regs *regs)
1306 {
1307 	struct sig_dbg_op op;
1308 	int i;
1309 	unsigned char tmp;
1310 	unsigned long new_msr = regs->msr;
1311 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1312 	unsigned long new_dbcr0 = current->thread.dbcr0;
1313 #endif
1314 
1315 	for (i=0; i<ndbg; i++) {
1316 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1317 			return -EFAULT;
1318 		switch (op.dbg_type) {
1319 		case SIG_DBG_SINGLE_STEPPING:
1320 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1321 			if (op.dbg_value) {
1322 				new_msr |= MSR_DE;
1323 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1324 			} else {
1325 				new_dbcr0 &= ~DBCR0_IC;
1326 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1327 						current->thread.dbcr1)) {
1328 					new_msr &= ~MSR_DE;
1329 					new_dbcr0 &= ~DBCR0_IDM;
1330 				}
1331 			}
1332 #else
1333 			if (op.dbg_value)
1334 				new_msr |= MSR_SE;
1335 			else
1336 				new_msr &= ~MSR_SE;
1337 #endif
1338 			break;
1339 		case SIG_DBG_BRANCH_TRACING:
1340 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1341 			return -EINVAL;
1342 #else
1343 			if (op.dbg_value)
1344 				new_msr |= MSR_BE;
1345 			else
1346 				new_msr &= ~MSR_BE;
1347 #endif
1348 			break;
1349 
1350 		default:
1351 			return -EINVAL;
1352 		}
1353 	}
1354 
1355 	/* We wait until here to actually install the values in the
1356 	   registers so if we fail in the above loop, it will not
1357 	   affect the contents of these registers.  After this point,
1358 	   failure is a problem, anyway, and it's very unlikely unless
1359 	   the user is really doing something wrong. */
1360 	regs->msr = new_msr;
1361 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1362 	current->thread.dbcr0 = new_dbcr0;
1363 #endif
1364 
1365 	if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1366 	    || __get_user(tmp, (u8 __user *) ctx)
1367 	    || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1368 		return -EFAULT;
1369 
1370 	/*
1371 	 * If we get a fault copying the context into the kernel's
1372 	 * image of the user's registers, we can't just return -EFAULT
1373 	 * because the user's registers will be corrupted.  For instance
1374 	 * the NIP value may have been updated but not some of the
1375 	 * other registers.  Given that we have done the access_ok
1376 	 * and successfully read the first and last bytes of the region
1377 	 * above, this should only happen in an out-of-memory situation
1378 	 * or if another thread unmaps the region containing the context.
1379 	 * We kill the task with a SIGSEGV in this situation.
1380 	 */
1381 	if (do_setcontext(ctx, regs, 1)) {
1382 		if (show_unhandled_signals)
1383 			printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1384 					   "sys_debug_setcontext: %p nip %08lx "
1385 					   "lr %08lx\n",
1386 					   current->comm, current->pid,
1387 					   ctx, regs->nip, regs->link);
1388 
1389 		force_sig(SIGSEGV, current);
1390 		goto out;
1391 	}
1392 
1393 	/*
1394 	 * It's not clear whether or why it is desirable to save the
1395 	 * sigaltstack setting on signal delivery and restore it on
1396 	 * signal return.  But other architectures do this and we have
1397 	 * always done it up until now so it is probably better not to
1398 	 * change it.  -- paulus
1399 	 */
1400 	restore_altstack(&ctx->uc_stack);
1401 
1402 	set_thread_flag(TIF_RESTOREALL);
1403  out:
1404 	return 0;
1405 }
1406 #endif
1407 
1408 /*
1409  * OK, we're invoking a handler
1410  */
1411 int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1412 		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
1413 {
1414 	struct sigcontext __user *sc;
1415 	struct sigframe __user *frame;
1416 	struct mcontext __user *tm_mctx = NULL;
1417 	unsigned long newsp = 0;
1418 	int sigret;
1419 	unsigned long tramp;
1420 
1421 	/* Set up Signal Frame */
1422 	frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1);
1423 	if (unlikely(frame == NULL))
1424 		goto badframe;
1425 	sc = (struct sigcontext __user *) &frame->sctx;
1426 
1427 #if _NSIG != 64
1428 #error "Please adjust handle_signal()"
1429 #endif
1430 	if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
1431 	    || __put_user(oldset->sig[0], &sc->oldmask)
1432 #ifdef CONFIG_PPC64
1433 	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1434 #else
1435 	    || __put_user(oldset->sig[1], &sc->_unused[3])
1436 #endif
1437 	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1438 	    || __put_user(sig, &sc->signal))
1439 		goto badframe;
1440 
1441 	if (vdso32_sigtramp && current->mm->context.vdso_base) {
1442 		sigret = 0;
1443 		tramp = current->mm->context.vdso_base + vdso32_sigtramp;
1444 	} else {
1445 		sigret = __NR_sigreturn;
1446 		tramp = (unsigned long) frame->mctx.tramp;
1447 	}
1448 
1449 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1450 	tm_mctx = &frame->mctx_transact;
1451 	if (MSR_TM_ACTIVE(regs->msr)) {
1452 		if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1453 				      sigret))
1454 			goto badframe;
1455 	}
1456 	else
1457 #endif
1458 	{
1459 		if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1460 			goto badframe;
1461 	}
1462 
1463 	regs->link = tramp;
1464 
1465 	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
1466 
1467 	/* create a stack frame for the caller of the handler */
1468 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1469 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1470 		goto badframe;
1471 
1472 	regs->gpr[1] = newsp;
1473 	regs->gpr[3] = sig;
1474 	regs->gpr[4] = (unsigned long) sc;
1475 	regs->nip = (unsigned long) ka->sa.sa_handler;
1476 	/* enter the signal handler in big-endian mode */
1477 	regs->msr &= ~MSR_LE;
1478 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1479 	/* Remove TM bits from thread's MSR.  The MSR in the sigcontext
1480 	 * just indicates to userland that we were doing a transaction, but we
1481 	 * don't want to return in transactional state:
1482 	 */
1483 	regs->msr &= ~MSR_TS_MASK;
1484 #endif
1485 	return 1;
1486 
1487 badframe:
1488 #ifdef DEBUG_SIG
1489 	printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1490 	       regs, frame, newsp);
1491 #endif
1492 	if (show_unhandled_signals)
1493 		printk_ratelimited(KERN_INFO
1494 				   "%s[%d]: bad frame in handle_signal32: "
1495 				   "%p nip %08lx lr %08lx\n",
1496 				   current->comm, current->pid,
1497 				   frame, regs->nip, regs->link);
1498 
1499 	force_sigsegv(sig, current);
1500 	return 0;
1501 }
1502 
1503 /*
1504  * Do a signal return; undo the signal stack.
1505  */
1506 long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1507 		       struct pt_regs *regs)
1508 {
1509 	struct sigframe __user *sf;
1510 	struct sigcontext __user *sc;
1511 	struct sigcontext sigctx;
1512 	struct mcontext __user *sr;
1513 	void __user *addr;
1514 	sigset_t set;
1515 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1516 	struct mcontext __user *mcp, *tm_mcp;
1517 	unsigned long msr_hi;
1518 #endif
1519 
1520 	/* Always make any pending restarted system calls return -EINTR */
1521 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
1522 
1523 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1524 	sc = &sf->sctx;
1525 	addr = sc;
1526 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1527 		goto badframe;
1528 
1529 #ifdef CONFIG_PPC64
1530 	/*
1531 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1532 	 * unused part of the signal stackframe
1533 	 */
1534 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1535 #else
1536 	set.sig[0] = sigctx.oldmask;
1537 	set.sig[1] = sigctx._unused[3];
1538 #endif
1539 	set_current_blocked(&set);
1540 
1541 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1542 	mcp = (struct mcontext __user *)&sf->mctx;
1543 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1544 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1545 		goto badframe;
1546 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
1547 		if (!cpu_has_feature(CPU_FTR_TM))
1548 			goto badframe;
1549 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
1550 			goto badframe;
1551 	} else
1552 #endif
1553 	{
1554 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1555 		addr = sr;
1556 		if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1557 		    || restore_user_regs(regs, sr, 1))
1558 			goto badframe;
1559 	}
1560 
1561 	set_thread_flag(TIF_RESTOREALL);
1562 	return 0;
1563 
1564 badframe:
1565 	if (show_unhandled_signals)
1566 		printk_ratelimited(KERN_INFO
1567 				   "%s[%d]: bad frame in sys_sigreturn: "
1568 				   "%p nip %08lx lr %08lx\n",
1569 				   current->comm, current->pid,
1570 				   addr, regs->nip, regs->link);
1571 
1572 	force_sig(SIGSEGV, current);
1573 	return 0;
1574 }
1575