xref: /openbmc/linux/arch/powerpc/kernel/signal_32.c (revision e8069f5a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
4  *
5  *  PowerPC version
6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7  * Copyright (C) 2001 IBM
8  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
10  *
11  *  Derived from "arch/i386/kernel/signal.c"
12  *    Copyright (C) 1991, 1992 Linus Torvalds
13  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
14  */
15 
16 #include <linux/sched.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/kernel.h>
20 #include <linux/signal.h>
21 #include <linux/errno.h>
22 #include <linux/elf.h>
23 #include <linux/ptrace.h>
24 #include <linux/pagemap.h>
25 #include <linux/ratelimit.h>
26 #include <linux/syscalls.h>
27 #ifdef CONFIG_PPC64
28 #include <linux/compat.h>
29 #else
30 #include <linux/wait.h>
31 #include <linux/unistd.h>
32 #include <linux/stddef.h>
33 #include <linux/tty.h>
34 #include <linux/binfmts.h>
35 #endif
36 
37 #include <linux/uaccess.h>
38 #include <asm/cacheflush.h>
39 #include <asm/syscalls.h>
40 #include <asm/sigcontext.h>
41 #include <asm/vdso.h>
42 #include <asm/switch_to.h>
43 #include <asm/tm.h>
44 #include <asm/asm-prototypes.h>
45 #ifdef CONFIG_PPC64
46 #include <asm/syscalls_32.h>
47 #include <asm/unistd.h>
48 #else
49 #include <asm/ucontext.h>
50 #endif
51 
52 #include "signal.h"
53 
54 
55 #ifdef CONFIG_PPC64
56 #define old_sigaction	old_sigaction32
57 #define sigcontext	sigcontext32
58 #define mcontext	mcontext32
59 #define ucontext	ucontext32
60 
61 /*
62  * Userspace code may pass a ucontext which doesn't include VSX added
63  * at the end.  We need to check for this case.
64  */
65 #define UCONTEXTSIZEWITHOUTVSX \
66 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
67 
68 /*
69  * Returning 0 means we return to userspace via
70  * ret_from_except and thus restore all user
71  * registers from *regs.  This is what we need
72  * to do when a signal has been delivered.
73  */
74 
75 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
76 #undef __SIGNAL_FRAMESIZE
77 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
78 #undef ELF_NVRREG
79 #define ELF_NVRREG	ELF_NVRREG32
80 
81 /*
82  * Functions for flipping sigsets (thanks to brain dead generic
83  * implementation that makes things simple for little endian only)
84  */
85 #define unsafe_put_sigset_t	unsafe_put_compat_sigset
86 #define unsafe_get_sigset_t	unsafe_get_compat_sigset
87 
88 #define to_user_ptr(p)		ptr_to_compat(p)
89 #define from_user_ptr(p)	compat_ptr(p)
90 
91 static __always_inline int
92 __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
93 {
94 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
95 	int val, i;
96 
97 	for (i = 0; i <= PT_RESULT; i ++) {
98 		/* Force usr to alway see softe as 1 (interrupts enabled) */
99 		if (i == PT_SOFTE)
100 			val = 1;
101 		else
102 			val = gregs[i];
103 
104 		unsafe_put_user(val, &frame->mc_gregs[i], failed);
105 	}
106 	return 0;
107 
108 failed:
109 	return 1;
110 }
111 
112 static __always_inline int
113 __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
114 {
115 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
116 	int i;
117 
118 	for (i = 0; i <= PT_RESULT; i++) {
119 		if ((i == PT_MSR) || (i == PT_SOFTE))
120 			continue;
121 		unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed);
122 	}
123 	return 0;
124 
125 failed:
126 	return 1;
127 }
128 
129 #else /* CONFIG_PPC64 */
130 
131 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
132 
133 #define unsafe_put_sigset_t(uset, set, label) do {			\
134 	sigset_t __user *__us = uset	;				\
135 	const sigset_t *__s = set;					\
136 									\
137 	unsafe_copy_to_user(__us, __s, sizeof(*__us), label);		\
138 } while (0)
139 
140 #define unsafe_get_sigset_t	unsafe_get_user_sigset
141 
142 #define to_user_ptr(p)		((unsigned long)(p))
143 #define from_user_ptr(p)	((void __user *)(p))
144 
145 static __always_inline int
146 __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
147 {
148 	unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
149 	return 0;
150 
151 failed:
152 	return 1;
153 }
154 
155 static __always_inline
156 int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
157 {
158 	/* copy up to but not including MSR */
159 	unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed);
160 
161 	/* copy from orig_r3 (the word after the MSR) up to the end */
162 	unsafe_copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
163 			      GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed);
164 
165 	return 0;
166 
167 failed:
168 	return 1;
169 }
170 #endif
171 
172 #define unsafe_save_general_regs(regs, frame, label) do {	\
173 	if (__unsafe_save_general_regs(regs, frame))		\
174 		goto label;					\
175 } while (0)
176 
177 #define unsafe_restore_general_regs(regs, frame, label) do {	\
178 	if (__unsafe_restore_general_regs(regs, frame))		\
179 		goto label;					\
180 } while (0)
181 
182 /*
183  * When we have signals to deliver, we set up on the
184  * user stack, going down from the original stack pointer:
185  *	an ABI gap of 56 words
186  *	an mcontext struct
187  *	a sigcontext struct
188  *	a gap of __SIGNAL_FRAMESIZE bytes
189  *
190  * Each of these things must be a multiple of 16 bytes in size. The following
191  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
192  *
193  */
194 struct sigframe {
195 	struct sigcontext sctx;		/* the sigcontext */
196 	struct mcontext	mctx;		/* all the register values */
197 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
198 	struct sigcontext sctx_transact;
199 	struct mcontext	mctx_transact;
200 #endif
201 	/*
202 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
203 	 * regs and 18 fp regs below sp before decrementing it.
204 	 */
205 	int			abigap[56];
206 };
207 
208 /*
209  *  When we have rt signals to deliver, we set up on the
210  *  user stack, going down from the original stack pointer:
211  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
212  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
213  *  (the +16 is to get the siginfo and ucontext in the same
214  *  positions as in older kernels).
215  *
216  *  Each of these things must be a multiple of 16 bytes in size.
217  *
218  */
219 struct rt_sigframe {
220 #ifdef CONFIG_PPC64
221 	compat_siginfo_t info;
222 #else
223 	struct siginfo info;
224 #endif
225 	struct ucontext	uc;
226 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
227 	struct ucontext	uc_transact;
228 #endif
229 	/*
230 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
231 	 * regs and 18 fp regs below sp before decrementing it.
232 	 */
233 	int			abigap[56];
234 };
235 
236 unsigned long get_min_sigframe_size_32(void)
237 {
238 	return max(sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE + 16,
239 		   sizeof(struct sigframe) + __SIGNAL_FRAMESIZE);
240 }
241 
242 /*
243  * Save the current user registers on the user stack.
244  * We only save the altivec/spe registers if the process has used
245  * altivec/spe instructions at some point.
246  */
247 static void prepare_save_user_regs(int ctx_has_vsx_region)
248 {
249 	/* Make sure floating point registers are stored in regs */
250 	flush_fp_to_thread(current);
251 #ifdef CONFIG_ALTIVEC
252 	if (current->thread.used_vr)
253 		flush_altivec_to_thread(current);
254 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
255 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
256 #endif
257 #ifdef CONFIG_VSX
258 	if (current->thread.used_vsr && ctx_has_vsx_region)
259 		flush_vsx_to_thread(current);
260 #endif
261 #ifdef CONFIG_SPE
262 	if (current->thread.used_spe)
263 		flush_spe_to_thread(current);
264 #endif
265 }
266 
267 static __always_inline int
268 __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
269 			struct mcontext __user *tm_frame, int ctx_has_vsx_region)
270 {
271 	unsigned long msr = regs->msr;
272 
273 	/* save general registers */
274 	unsafe_save_general_regs(regs, frame, failed);
275 
276 #ifdef CONFIG_ALTIVEC
277 	/* save altivec registers */
278 	if (current->thread.used_vr) {
279 		unsafe_copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
280 				    ELF_NVRREG * sizeof(vector128), failed);
281 		/* set MSR_VEC in the saved MSR value to indicate that
282 		   frame->mc_vregs contains valid data */
283 		msr |= MSR_VEC;
284 	}
285 	/* else assert((regs->msr & MSR_VEC) == 0) */
286 
287 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
288 	 * use altivec. Since VSCR only contains 32 bits saved in the least
289 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
290 	 * most significant bits of that same vector. --BenH
291 	 * Note that the current VRSAVE value is in the SPR at this point.
292 	 */
293 	unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
294 			failed);
295 #endif /* CONFIG_ALTIVEC */
296 	unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed);
297 
298 	/*
299 	 * Clear the MSR VSX bit to indicate there is no valid state attached
300 	 * to this context, except in the specific case below where we set it.
301 	 */
302 	msr &= ~MSR_VSX;
303 #ifdef CONFIG_VSX
304 	/*
305 	 * Copy VSR 0-31 upper half from thread_struct to local
306 	 * buffer, then write that to userspace.  Also set MSR_VSX in
307 	 * the saved MSR value to indicate that frame->mc_vregs
308 	 * contains valid data
309 	 */
310 	if (current->thread.used_vsr && ctx_has_vsx_region) {
311 		unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed);
312 		msr |= MSR_VSX;
313 	}
314 #endif /* CONFIG_VSX */
315 #ifdef CONFIG_SPE
316 	/* save spe registers */
317 	if (current->thread.used_spe) {
318 		unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
319 				    ELF_NEVRREG * sizeof(u32), failed);
320 		/* set MSR_SPE in the saved MSR value to indicate that
321 		   frame->mc_vregs contains valid data */
322 		msr |= MSR_SPE;
323 	}
324 	/* else assert((regs->msr & MSR_SPE) == 0) */
325 
326 	/* We always copy to/from spefscr */
327 	unsafe_put_user(current->thread.spefscr,
328 			(u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
329 #endif /* CONFIG_SPE */
330 
331 	unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
332 
333 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
334 	 * can check it on the restore to see if TM is active
335 	 */
336 	if (tm_frame)
337 		unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed);
338 
339 	return 0;
340 
341 failed:
342 	return 1;
343 }
344 
345 #define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
346 	if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx))	\
347 		goto label;						\
348 } while (0)
349 
350 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
351 /*
352  * Save the current user registers on the user stack.
353  * We only save the altivec/spe registers if the process has used
354  * altivec/spe instructions at some point.
355  * We also save the transactional registers to a second ucontext in the
356  * frame.
357  *
358  * See __unsafe_save_user_regs() and signal_64.c:setup_tm_sigcontexts().
359  */
360 static void prepare_save_tm_user_regs(void)
361 {
362 	WARN_ON(tm_suspend_disabled);
363 
364 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
365 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
366 }
367 
368 static __always_inline int
369 save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
370 			 struct mcontext __user *tm_frame, unsigned long msr)
371 {
372 	/* Save both sets of general registers */
373 	unsafe_save_general_regs(&current->thread.ckpt_regs, frame, failed);
374 	unsafe_save_general_regs(regs, tm_frame, failed);
375 
376 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
377 	 * of the transactional mcontext.  This way we have a backward-compatible
378 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
379 	 * also look at what type of transaction (T or S) was active at the
380 	 * time of the signal.
381 	 */
382 	unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
383 
384 	/* save altivec registers */
385 	if (current->thread.used_vr) {
386 		unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
387 				    ELF_NVRREG * sizeof(vector128), failed);
388 		if (msr & MSR_VEC)
389 			unsafe_copy_to_user(&tm_frame->mc_vregs,
390 					    &current->thread.vr_state,
391 					    ELF_NVRREG * sizeof(vector128), failed);
392 		else
393 			unsafe_copy_to_user(&tm_frame->mc_vregs,
394 					    &current->thread.ckvr_state,
395 					    ELF_NVRREG * sizeof(vector128), failed);
396 
397 		/* set MSR_VEC in the saved MSR value to indicate that
398 		 * frame->mc_vregs contains valid data
399 		 */
400 		msr |= MSR_VEC;
401 	}
402 
403 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
404 	 * use altivec. Since VSCR only contains 32 bits saved in the least
405 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
406 	 * most significant bits of that same vector. --BenH
407 	 */
408 	unsafe_put_user(current->thread.ckvrsave,
409 			(u32 __user *)&frame->mc_vregs[32], failed);
410 	if (msr & MSR_VEC)
411 		unsafe_put_user(current->thread.vrsave,
412 				(u32 __user *)&tm_frame->mc_vregs[32], failed);
413 	else
414 		unsafe_put_user(current->thread.ckvrsave,
415 				(u32 __user *)&tm_frame->mc_vregs[32], failed);
416 
417 	unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
418 	if (msr & MSR_FP)
419 		unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed);
420 	else
421 		unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
422 
423 	/*
424 	 * Copy VSR 0-31 upper half from thread_struct to local
425 	 * buffer, then write that to userspace.  Also set MSR_VSX in
426 	 * the saved MSR value to indicate that frame->mc_vregs
427 	 * contains valid data
428 	 */
429 	if (current->thread.used_vsr) {
430 		unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed);
431 		if (msr & MSR_VSX)
432 			unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed);
433 		else
434 			unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed);
435 
436 		msr |= MSR_VSX;
437 	}
438 
439 	unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
440 
441 	return 0;
442 
443 failed:
444 	return 1;
445 }
446 #else
447 static void prepare_save_tm_user_regs(void) { }
448 
449 static __always_inline int
450 save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
451 			 struct mcontext __user *tm_frame, unsigned long msr)
452 {
453 	return 0;
454 }
455 #endif
456 
457 #define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \
458 	if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr))	\
459 		goto label;						\
460 } while (0)
461 
462 /*
463  * Restore the current user register values from the user stack,
464  * (except for MSR).
465  */
466 static long restore_user_regs(struct pt_regs *regs,
467 			      struct mcontext __user *sr, int sig)
468 {
469 	unsigned int save_r2 = 0;
470 	unsigned long msr;
471 #ifdef CONFIG_VSX
472 	int i;
473 #endif
474 
475 	if (!user_read_access_begin(sr, sizeof(*sr)))
476 		return 1;
477 	/*
478 	 * restore general registers but not including MSR or SOFTE. Also
479 	 * take care of keeping r2 (TLS) intact if not a signal
480 	 */
481 	if (!sig)
482 		save_r2 = (unsigned int)regs->gpr[2];
483 	unsafe_restore_general_regs(regs, sr, failed);
484 	set_trap_norestart(regs);
485 	unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
486 	if (!sig)
487 		regs->gpr[2] = (unsigned long) save_r2;
488 
489 	/* if doing signal return, restore the previous little-endian mode */
490 	if (sig)
491 		regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
492 
493 #ifdef CONFIG_ALTIVEC
494 	/*
495 	 * Force the process to reload the altivec registers from
496 	 * current->thread when it next does altivec instructions
497 	 */
498 	regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
499 	if (msr & MSR_VEC) {
500 		/* restore altivec registers from the stack */
501 		unsafe_copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
502 				      sizeof(sr->mc_vregs), failed);
503 		current->thread.used_vr = true;
504 	} else if (current->thread.used_vr)
505 		memset(&current->thread.vr_state, 0,
506 		       ELF_NVRREG * sizeof(vector128));
507 
508 	/* Always get VRSAVE back */
509 	unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
510 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
511 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
512 #endif /* CONFIG_ALTIVEC */
513 	unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
514 
515 #ifdef CONFIG_VSX
516 	/*
517 	 * Force the process to reload the VSX registers from
518 	 * current->thread when it next does VSX instruction.
519 	 */
520 	regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
521 	if (msr & MSR_VSX) {
522 		/*
523 		 * Restore altivec registers from the stack to a local
524 		 * buffer, then write this out to the thread_struct
525 		 */
526 		unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed);
527 		current->thread.used_vsr = true;
528 	} else if (current->thread.used_vsr)
529 		for (i = 0; i < 32 ; i++)
530 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
531 #endif /* CONFIG_VSX */
532 	/*
533 	 * force the process to reload the FP registers from
534 	 * current->thread when it next does FP instructions
535 	 */
536 	regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
537 
538 #ifdef CONFIG_SPE
539 	/*
540 	 * Force the process to reload the spe registers from
541 	 * current->thread when it next does spe instructions.
542 	 * Since this is user ABI, we must enforce the sizing.
543 	 */
544 	BUILD_BUG_ON(sizeof(current->thread.spe) != ELF_NEVRREG * sizeof(u32));
545 	regs_set_return_msr(regs, regs->msr & ~MSR_SPE);
546 	if (msr & MSR_SPE) {
547 		/* restore spe registers from the stack */
548 		unsafe_copy_from_user(&current->thread.spe, &sr->mc_vregs,
549 				      sizeof(current->thread.spe), failed);
550 		current->thread.used_spe = true;
551 	} else if (current->thread.used_spe)
552 		memset(&current->thread.spe, 0, sizeof(current->thread.spe));
553 
554 	/* Always get SPEFSCR back */
555 	unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
556 #endif /* CONFIG_SPE */
557 
558 	user_read_access_end();
559 	return 0;
560 
561 failed:
562 	user_read_access_end();
563 	return 1;
564 }
565 
566 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
567 /*
568  * Restore the current user register values from the user stack, except for
569  * MSR, and recheckpoint the original checkpointed register state for processes
570  * in transactions.
571  */
572 static long restore_tm_user_regs(struct pt_regs *regs,
573 				 struct mcontext __user *sr,
574 				 struct mcontext __user *tm_sr)
575 {
576 	unsigned long msr, msr_hi;
577 	int i;
578 
579 	if (tm_suspend_disabled)
580 		return 1;
581 	/*
582 	 * restore general registers but not including MSR or SOFTE. Also
583 	 * take care of keeping r2 (TLS) intact if not a signal.
584 	 * See comment in signal_64.c:restore_tm_sigcontexts();
585 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
586 	 * were set by the signal delivery.
587 	 */
588 	if (!user_read_access_begin(sr, sizeof(*sr)))
589 		return 1;
590 
591 	unsafe_restore_general_regs(&current->thread.ckpt_regs, sr, failed);
592 	unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
593 	unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
594 
595 	/* Restore the previous little-endian mode */
596 	regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
597 
598 	regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
599 	if (msr & MSR_VEC) {
600 		/* restore altivec registers from the stack */
601 		unsafe_copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
602 				      sizeof(sr->mc_vregs), failed);
603 		current->thread.used_vr = true;
604 	} else if (current->thread.used_vr) {
605 		memset(&current->thread.vr_state, 0,
606 		       ELF_NVRREG * sizeof(vector128));
607 		memset(&current->thread.ckvr_state, 0,
608 		       ELF_NVRREG * sizeof(vector128));
609 	}
610 
611 	/* Always get VRSAVE back */
612 	unsafe_get_user(current->thread.ckvrsave,
613 			(u32 __user *)&sr->mc_vregs[32], failed);
614 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
615 		mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
616 
617 	regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
618 
619 	unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
620 
621 	regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
622 	if (msr & MSR_VSX) {
623 		/*
624 		 * Restore altivec registers from the stack to a local
625 		 * buffer, then write this out to the thread_struct
626 		 */
627 		unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed);
628 		current->thread.used_vsr = true;
629 	} else if (current->thread.used_vsr)
630 		for (i = 0; i < 32 ; i++) {
631 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
632 			current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
633 		}
634 
635 	user_read_access_end();
636 
637 	if (!user_read_access_begin(tm_sr, sizeof(*tm_sr)))
638 		return 1;
639 
640 	unsafe_restore_general_regs(regs, tm_sr, failed);
641 
642 	/* restore altivec registers from the stack */
643 	if (msr & MSR_VEC)
644 		unsafe_copy_from_user(&current->thread.vr_state, &tm_sr->mc_vregs,
645 				      sizeof(sr->mc_vregs), failed);
646 
647 	/* Always get VRSAVE back */
648 	unsafe_get_user(current->thread.vrsave,
649 			(u32 __user *)&tm_sr->mc_vregs[32], failed);
650 
651 	unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
652 
653 	if (msr & MSR_VSX) {
654 		/*
655 		 * Restore altivec registers from the stack to a local
656 		 * buffer, then write this out to the thread_struct
657 		 */
658 		unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
659 		current->thread.used_vsr = true;
660 	}
661 
662 	/* Get the top half of the MSR from the user context */
663 	unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
664 	msr_hi <<= 32;
665 
666 	user_read_access_end();
667 
668 	/* If TM bits are set to the reserved value, it's an invalid context */
669 	if (MSR_TM_RESV(msr_hi))
670 		return 1;
671 
672 	/*
673 	 * Disabling preemption, since it is unsafe to be preempted
674 	 * with MSR[TS] set without recheckpointing.
675 	 */
676 	preempt_disable();
677 
678 	/*
679 	 * CAUTION:
680 	 * After regs->MSR[TS] being updated, make sure that get_user(),
681 	 * put_user() or similar functions are *not* called. These
682 	 * functions can generate page faults which will cause the process
683 	 * to be de-scheduled with MSR[TS] set but without calling
684 	 * tm_recheckpoint(). This can cause a bug.
685 	 *
686 	 * Pull in the MSR TM bits from the user context
687 	 */
688 	regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK));
689 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
690 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
691 	 * transactional versions should be loaded.
692 	 */
693 	tm_enable();
694 	/* Make sure the transaction is marked as failed */
695 	current->thread.tm_texasr |= TEXASR_FS;
696 	/* This loads the checkpointed FP/VEC state, if used */
697 	tm_recheckpoint(&current->thread);
698 
699 	/* This loads the speculative FP/VEC state, if used */
700 	msr_check_and_set(msr & (MSR_FP | MSR_VEC));
701 	if (msr & MSR_FP) {
702 		load_fp_state(&current->thread.fp_state);
703 		regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode));
704 	}
705 	if (msr & MSR_VEC) {
706 		load_vr_state(&current->thread.vr_state);
707 		regs_set_return_msr(regs, regs->msr | MSR_VEC);
708 	}
709 
710 	preempt_enable();
711 
712 	return 0;
713 
714 failed:
715 	user_read_access_end();
716 	return 1;
717 }
718 #else
719 static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr,
720 				 struct mcontext __user *tm_sr)
721 {
722 	return 0;
723 }
724 #endif
725 
726 #ifdef CONFIG_PPC64
727 
728 #define copy_siginfo_to_user	copy_siginfo_to_user32
729 
730 #endif /* CONFIG_PPC64 */
731 
732 /*
733  * Set up a signal frame for a "real-time" signal handler
734  * (one which gets siginfo).
735  */
736 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
737 		       struct task_struct *tsk)
738 {
739 	struct rt_sigframe __user *frame;
740 	struct mcontext __user *mctx;
741 	struct mcontext __user *tm_mctx = NULL;
742 	unsigned long newsp = 0;
743 	unsigned long tramp;
744 	struct pt_regs *regs = tsk->thread.regs;
745 	/* Save the thread's msr before get_tm_stackpointer() changes it */
746 	unsigned long msr = regs->msr;
747 
748 	/* Set up Signal Frame */
749 	frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
750 	mctx = &frame->uc.uc_mcontext;
751 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
752 	tm_mctx = &frame->uc_transact.uc_mcontext;
753 #endif
754 	if (MSR_TM_ACTIVE(msr))
755 		prepare_save_tm_user_regs();
756 	else
757 		prepare_save_user_regs(1);
758 
759 	if (!user_access_begin(frame, sizeof(*frame)))
760 		goto badframe;
761 
762 	/* Put the siginfo & fill in most of the ucontext */
763 	unsafe_put_user(0, &frame->uc.uc_flags, failed);
764 #ifdef CONFIG_PPC64
765 	unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
766 #else
767 	unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
768 #endif
769 	unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed);
770 
771 	if (MSR_TM_ACTIVE(msr)) {
772 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
773 		unsafe_put_user((unsigned long)&frame->uc_transact,
774 				&frame->uc.uc_link, failed);
775 		unsafe_put_user((unsigned long)tm_mctx,
776 				&frame->uc_transact.uc_regs, failed);
777 #endif
778 		unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
779 	} else {
780 		unsafe_put_user(0, &frame->uc.uc_link, failed);
781 		unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
782 	}
783 
784 	/* Save user registers on the stack */
785 	if (tsk->mm->context.vdso) {
786 		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
787 	} else {
788 		tramp = (unsigned long)mctx->mc_pad;
789 		unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed);
790 		unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
791 		asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
792 	}
793 	unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
794 
795 	user_access_end();
796 
797 	if (copy_siginfo_to_user(&frame->info, &ksig->info))
798 		goto badframe;
799 
800 	regs->link = tramp;
801 
802 #ifdef CONFIG_PPC_FPU_REGS
803 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
804 #endif
805 
806 	/* create a stack frame for the caller of the handler */
807 	newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16);
808 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
809 		goto badframe;
810 
811 	/* Fill registers for signal handler */
812 	regs->gpr[1] = newsp;
813 	regs->gpr[3] = ksig->sig;
814 	regs->gpr[4] = (unsigned long)&frame->info;
815 	regs->gpr[5] = (unsigned long)&frame->uc;
816 	regs->gpr[6] = (unsigned long)frame;
817 	regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
818 	/* enter the signal handler in native-endian mode */
819 	regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
820 
821 	return 0;
822 
823 failed:
824 	user_access_end();
825 
826 badframe:
827 	signal_fault(tsk, regs, "handle_rt_signal32", frame);
828 
829 	return 1;
830 }
831 
832 /*
833  * OK, we're invoking a handler
834  */
835 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
836 		struct task_struct *tsk)
837 {
838 	struct sigcontext __user *sc;
839 	struct sigframe __user *frame;
840 	struct mcontext __user *mctx;
841 	struct mcontext __user *tm_mctx = NULL;
842 	unsigned long newsp = 0;
843 	unsigned long tramp;
844 	struct pt_regs *regs = tsk->thread.regs;
845 	/* Save the thread's msr before get_tm_stackpointer() changes it */
846 	unsigned long msr = regs->msr;
847 
848 	/* Set up Signal Frame */
849 	frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
850 	mctx = &frame->mctx;
851 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
852 	tm_mctx = &frame->mctx_transact;
853 #endif
854 	if (MSR_TM_ACTIVE(msr))
855 		prepare_save_tm_user_regs();
856 	else
857 		prepare_save_user_regs(1);
858 
859 	if (!user_access_begin(frame, sizeof(*frame)))
860 		goto badframe;
861 	sc = (struct sigcontext __user *) &frame->sctx;
862 
863 #if _NSIG != 64
864 #error "Please adjust handle_signal()"
865 #endif
866 	unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed);
867 	unsafe_put_user(oldset->sig[0], &sc->oldmask, failed);
868 #ifdef CONFIG_PPC64
869 	unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed);
870 #else
871 	unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed);
872 #endif
873 	unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed);
874 	unsafe_put_user(ksig->sig, &sc->signal, failed);
875 
876 	if (MSR_TM_ACTIVE(msr))
877 		unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
878 	else
879 		unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
880 
881 	if (tsk->mm->context.vdso) {
882 		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
883 	} else {
884 		tramp = (unsigned long)mctx->mc_pad;
885 		unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed);
886 		unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
887 		asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
888 	}
889 	user_access_end();
890 
891 	regs->link = tramp;
892 
893 #ifdef CONFIG_PPC_FPU_REGS
894 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
895 #endif
896 
897 	/* create a stack frame for the caller of the handler */
898 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
899 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
900 		goto badframe;
901 
902 	regs->gpr[1] = newsp;
903 	regs->gpr[3] = ksig->sig;
904 	regs->gpr[4] = (unsigned long) sc;
905 	regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
906 	/* enter the signal handler in native-endian mode */
907 	regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
908 
909 	return 0;
910 
911 failed:
912 	user_access_end();
913 
914 badframe:
915 	signal_fault(tsk, regs, "handle_signal32", frame);
916 
917 	return 1;
918 }
919 
920 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
921 {
922 	sigset_t set;
923 	struct mcontext __user *mcp;
924 
925 	if (!user_read_access_begin(ucp, sizeof(*ucp)))
926 		return -EFAULT;
927 
928 	unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
929 #ifdef CONFIG_PPC64
930 	{
931 		u32 cmcp;
932 
933 		unsafe_get_user(cmcp, &ucp->uc_regs, failed);
934 		mcp = (struct mcontext __user *)(u64)cmcp;
935 	}
936 #else
937 	unsafe_get_user(mcp, &ucp->uc_regs, failed);
938 #endif
939 	user_read_access_end();
940 
941 	set_current_blocked(&set);
942 	if (restore_user_regs(regs, mcp, sig))
943 		return -EFAULT;
944 
945 	return 0;
946 
947 failed:
948 	user_read_access_end();
949 	return -EFAULT;
950 }
951 
952 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
953 static int do_setcontext_tm(struct ucontext __user *ucp,
954 			    struct ucontext __user *tm_ucp,
955 			    struct pt_regs *regs)
956 {
957 	sigset_t set;
958 	struct mcontext __user *mcp;
959 	struct mcontext __user *tm_mcp;
960 	u32 cmcp;
961 	u32 tm_cmcp;
962 
963 	if (!user_read_access_begin(ucp, sizeof(*ucp)))
964 		return -EFAULT;
965 
966 	unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
967 	unsafe_get_user(cmcp, &ucp->uc_regs, failed);
968 
969 	user_read_access_end();
970 
971 	if (__get_user(tm_cmcp, &tm_ucp->uc_regs))
972 		return -EFAULT;
973 	mcp = (struct mcontext __user *)(u64)cmcp;
974 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
975 	/* no need to check access_ok(mcp), since mcp < 4GB */
976 
977 	set_current_blocked(&set);
978 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
979 		return -EFAULT;
980 
981 	return 0;
982 
983 failed:
984 	user_read_access_end();
985 	return -EFAULT;
986 }
987 #endif
988 
989 #ifdef CONFIG_PPC64
990 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
991 		       struct ucontext __user *, new_ctx, int, ctx_size)
992 #else
993 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
994 		       struct ucontext __user *, new_ctx, long, ctx_size)
995 #endif
996 {
997 	struct pt_regs *regs = current_pt_regs();
998 	int ctx_has_vsx_region = 0;
999 
1000 #ifdef CONFIG_PPC64
1001 	unsigned long new_msr = 0;
1002 
1003 	if (new_ctx) {
1004 		struct mcontext __user *mcp;
1005 		u32 cmcp;
1006 
1007 		/*
1008 		 * Get pointer to the real mcontext.  No need for
1009 		 * access_ok since we are dealing with compat
1010 		 * pointers.
1011 		 */
1012 		if (__get_user(cmcp, &new_ctx->uc_regs))
1013 			return -EFAULT;
1014 		mcp = (struct mcontext __user *)(u64)cmcp;
1015 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1016 			return -EFAULT;
1017 	}
1018 	/*
1019 	 * Check that the context is not smaller than the original
1020 	 * size (with VMX but without VSX)
1021 	 */
1022 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1023 		return -EINVAL;
1024 	/*
1025 	 * If the new context state sets the MSR VSX bits but
1026 	 * it doesn't provide VSX state.
1027 	 */
1028 	if ((ctx_size < sizeof(struct ucontext)) &&
1029 	    (new_msr & MSR_VSX))
1030 		return -EINVAL;
1031 	/* Does the context have enough room to store VSX data? */
1032 	if (ctx_size >= sizeof(struct ucontext))
1033 		ctx_has_vsx_region = 1;
1034 #else
1035 	/* Context size is for future use. Right now, we only make sure
1036 	 * we are passed something we understand
1037 	 */
1038 	if (ctx_size < sizeof(struct ucontext))
1039 		return -EINVAL;
1040 #endif
1041 	if (old_ctx != NULL) {
1042 		struct mcontext __user *mctx;
1043 
1044 		/*
1045 		 * old_ctx might not be 16-byte aligned, in which
1046 		 * case old_ctx->uc_mcontext won't be either.
1047 		 * Because we have the old_ctx->uc_pad2 field
1048 		 * before old_ctx->uc_mcontext, we need to round down
1049 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1050 		 */
1051 		mctx = (struct mcontext __user *)
1052 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1053 		prepare_save_user_regs(ctx_has_vsx_region);
1054 		if (!user_write_access_begin(old_ctx, ctx_size))
1055 			return -EFAULT;
1056 		unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed);
1057 		unsafe_put_sigset_t(&old_ctx->uc_sigmask, &current->blocked, failed);
1058 		unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed);
1059 		user_write_access_end();
1060 	}
1061 	if (new_ctx == NULL)
1062 		return 0;
1063 	if (!access_ok(new_ctx, ctx_size) ||
1064 	    fault_in_readable((char __user *)new_ctx, ctx_size))
1065 		return -EFAULT;
1066 
1067 	/*
1068 	 * If we get a fault copying the context into the kernel's
1069 	 * image of the user's registers, we can't just return -EFAULT
1070 	 * because the user's registers will be corrupted.  For instance
1071 	 * the NIP value may have been updated but not some of the
1072 	 * other registers.  Given that we have done the access_ok
1073 	 * and successfully read the first and last bytes of the region
1074 	 * above, this should only happen in an out-of-memory situation
1075 	 * or if another thread unmaps the region containing the context.
1076 	 * We kill the task with a SIGSEGV in this situation.
1077 	 */
1078 	if (do_setcontext(new_ctx, regs, 0)) {
1079 		force_exit_sig(SIGSEGV);
1080 		return -EFAULT;
1081 	}
1082 
1083 	set_thread_flag(TIF_RESTOREALL);
1084 	return 0;
1085 
1086 failed:
1087 	user_write_access_end();
1088 	return -EFAULT;
1089 }
1090 
1091 #ifdef CONFIG_PPC64
1092 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1093 #else
1094 SYSCALL_DEFINE0(rt_sigreturn)
1095 #endif
1096 {
1097 	struct rt_sigframe __user *rt_sf;
1098 	struct pt_regs *regs = current_pt_regs();
1099 	int tm_restore = 0;
1100 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1101 	struct ucontext __user *uc_transact;
1102 	unsigned long msr_hi;
1103 	unsigned long tmp;
1104 #endif
1105 	/* Always make any pending restarted system calls return -EINTR */
1106 	current->restart_block.fn = do_no_restart_syscall;
1107 
1108 	rt_sf = (struct rt_sigframe __user *)
1109 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1110 	if (!access_ok(rt_sf, sizeof(*rt_sf)))
1111 		goto bad;
1112 
1113 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1114 	/*
1115 	 * If there is a transactional state then throw it away.
1116 	 * The purpose of a sigreturn is to destroy all traces of the
1117 	 * signal frame, this includes any transactional state created
1118 	 * within in. We only check for suspended as we can never be
1119 	 * active in the kernel, we are active, there is nothing better to
1120 	 * do than go ahead and Bad Thing later.
1121 	 * The cause is not important as there will never be a
1122 	 * recheckpoint so it's not user visible.
1123 	 */
1124 	if (MSR_TM_SUSPENDED(mfmsr()))
1125 		tm_reclaim_current(0);
1126 
1127 	if (__get_user(tmp, &rt_sf->uc.uc_link))
1128 		goto bad;
1129 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1130 	if (uc_transact) {
1131 		u32 cmcp;
1132 		struct mcontext __user *mcp;
1133 
1134 		if (__get_user(cmcp, &uc_transact->uc_regs))
1135 			return -EFAULT;
1136 		mcp = (struct mcontext __user *)(u64)cmcp;
1137 		/* The top 32 bits of the MSR are stashed in the transactional
1138 		 * ucontext. */
1139 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1140 			goto bad;
1141 
1142 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1143 			/* Trying to start TM on non TM system */
1144 			if (!cpu_has_feature(CPU_FTR_TM))
1145 				goto bad;
1146 			/* We only recheckpoint on return if we're
1147 			 * transaction.
1148 			 */
1149 			tm_restore = 1;
1150 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1151 				goto bad;
1152 		}
1153 	}
1154 	if (!tm_restore) {
1155 		/*
1156 		 * Unset regs->msr because ucontext MSR TS is not
1157 		 * set, and recheckpoint was not called. This avoid
1158 		 * hitting a TM Bad thing at RFID
1159 		 */
1160 		regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
1161 	}
1162 	/* Fall through, for non-TM restore */
1163 #endif
1164 	if (!tm_restore)
1165 		if (do_setcontext(&rt_sf->uc, regs, 1))
1166 			goto bad;
1167 
1168 	/*
1169 	 * It's not clear whether or why it is desirable to save the
1170 	 * sigaltstack setting on signal delivery and restore it on
1171 	 * signal return.  But other architectures do this and we have
1172 	 * always done it up until now so it is probably better not to
1173 	 * change it.  -- paulus
1174 	 */
1175 #ifdef CONFIG_PPC64
1176 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1177 		goto bad;
1178 #else
1179 	if (restore_altstack(&rt_sf->uc.uc_stack))
1180 		goto bad;
1181 #endif
1182 	set_thread_flag(TIF_RESTOREALL);
1183 	return 0;
1184 
1185  bad:
1186 	signal_fault(current, regs, "sys_rt_sigreturn", rt_sf);
1187 
1188 	force_sig(SIGSEGV);
1189 	return 0;
1190 }
1191 
1192 #ifdef CONFIG_PPC32
1193 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1194 			 int, ndbg, struct sig_dbg_op __user *, dbg)
1195 {
1196 	struct pt_regs *regs = current_pt_regs();
1197 	struct sig_dbg_op op;
1198 	int i;
1199 	unsigned long new_msr = regs->msr;
1200 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1201 	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1202 #endif
1203 
1204 	for (i=0; i<ndbg; i++) {
1205 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1206 			return -EFAULT;
1207 		switch (op.dbg_type) {
1208 		case SIG_DBG_SINGLE_STEPPING:
1209 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1210 			if (op.dbg_value) {
1211 				new_msr |= MSR_DE;
1212 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1213 			} else {
1214 				new_dbcr0 &= ~DBCR0_IC;
1215 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1216 						current->thread.debug.dbcr1)) {
1217 					new_msr &= ~MSR_DE;
1218 					new_dbcr0 &= ~DBCR0_IDM;
1219 				}
1220 			}
1221 #else
1222 			if (op.dbg_value)
1223 				new_msr |= MSR_SE;
1224 			else
1225 				new_msr &= ~MSR_SE;
1226 #endif
1227 			break;
1228 		case SIG_DBG_BRANCH_TRACING:
1229 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1230 			return -EINVAL;
1231 #else
1232 			if (op.dbg_value)
1233 				new_msr |= MSR_BE;
1234 			else
1235 				new_msr &= ~MSR_BE;
1236 #endif
1237 			break;
1238 
1239 		default:
1240 			return -EINVAL;
1241 		}
1242 	}
1243 
1244 	/* We wait until here to actually install the values in the
1245 	   registers so if we fail in the above loop, it will not
1246 	   affect the contents of these registers.  After this point,
1247 	   failure is a problem, anyway, and it's very unlikely unless
1248 	   the user is really doing something wrong. */
1249 	regs_set_return_msr(regs, new_msr);
1250 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1251 	current->thread.debug.dbcr0 = new_dbcr0;
1252 #endif
1253 
1254 	if (!access_ok(ctx, sizeof(*ctx)) ||
1255 	    fault_in_readable((char __user *)ctx, sizeof(*ctx)))
1256 		return -EFAULT;
1257 
1258 	/*
1259 	 * If we get a fault copying the context into the kernel's
1260 	 * image of the user's registers, we can't just return -EFAULT
1261 	 * because the user's registers will be corrupted.  For instance
1262 	 * the NIP value may have been updated but not some of the
1263 	 * other registers.  Given that we have done the access_ok
1264 	 * and successfully read the first and last bytes of the region
1265 	 * above, this should only happen in an out-of-memory situation
1266 	 * or if another thread unmaps the region containing the context.
1267 	 * We kill the task with a SIGSEGV in this situation.
1268 	 */
1269 	if (do_setcontext(ctx, regs, 1)) {
1270 		signal_fault(current, regs, "sys_debug_setcontext", ctx);
1271 
1272 		force_sig(SIGSEGV);
1273 		goto out;
1274 	}
1275 
1276 	/*
1277 	 * It's not clear whether or why it is desirable to save the
1278 	 * sigaltstack setting on signal delivery and restore it on
1279 	 * signal return.  But other architectures do this and we have
1280 	 * always done it up until now so it is probably better not to
1281 	 * change it.  -- paulus
1282 	 */
1283 	restore_altstack(&ctx->uc_stack);
1284 
1285 	set_thread_flag(TIF_RESTOREALL);
1286  out:
1287 	return 0;
1288 }
1289 #endif
1290 
1291 /*
1292  * Do a signal return; undo the signal stack.
1293  */
1294 #ifdef CONFIG_PPC64
1295 COMPAT_SYSCALL_DEFINE0(sigreturn)
1296 #else
1297 SYSCALL_DEFINE0(sigreturn)
1298 #endif
1299 {
1300 	struct pt_regs *regs = current_pt_regs();
1301 	struct sigframe __user *sf;
1302 	struct sigcontext __user *sc;
1303 	struct sigcontext sigctx;
1304 	struct mcontext __user *sr;
1305 	sigset_t set;
1306 	struct mcontext __user *mcp;
1307 	struct mcontext __user *tm_mcp = NULL;
1308 	unsigned long long msr_hi = 0;
1309 
1310 	/* Always make any pending restarted system calls return -EINTR */
1311 	current->restart_block.fn = do_no_restart_syscall;
1312 
1313 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1314 	sc = &sf->sctx;
1315 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1316 		goto badframe;
1317 
1318 #ifdef CONFIG_PPC64
1319 	/*
1320 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1321 	 * unused part of the signal stackframe
1322 	 */
1323 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1324 #else
1325 	set.sig[0] = sigctx.oldmask;
1326 	set.sig[1] = sigctx._unused[3];
1327 #endif
1328 	set_current_blocked(&set);
1329 
1330 	mcp = (struct mcontext __user *)&sf->mctx;
1331 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1332 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1333 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1334 		goto badframe;
1335 #endif
1336 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
1337 		if (!cpu_has_feature(CPU_FTR_TM))
1338 			goto badframe;
1339 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
1340 			goto badframe;
1341 	} else {
1342 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1343 		if (restore_user_regs(regs, sr, 1)) {
1344 			signal_fault(current, regs, "sys_sigreturn", sr);
1345 
1346 			force_sig(SIGSEGV);
1347 			return 0;
1348 		}
1349 	}
1350 
1351 	set_thread_flag(TIF_RESTOREALL);
1352 	return 0;
1353 
1354 badframe:
1355 	signal_fault(current, regs, "sys_sigreturn", sc);
1356 
1357 	force_sig(SIGSEGV);
1358 	return 0;
1359 }
1360