xref: /openbmc/linux/arch/powerpc/kernel/signal_32.c (revision 31e67366)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
4  *
5  *  PowerPC version
6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7  * Copyright (C) 2001 IBM
8  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
10  *
11  *  Derived from "arch/i386/kernel/signal.c"
12  *    Copyright (C) 1991, 1992 Linus Torvalds
13  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
14  */
15 
16 #include <linux/sched.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/kernel.h>
20 #include <linux/signal.h>
21 #include <linux/errno.h>
22 #include <linux/elf.h>
23 #include <linux/ptrace.h>
24 #include <linux/pagemap.h>
25 #include <linux/ratelimit.h>
26 #include <linux/syscalls.h>
27 #ifdef CONFIG_PPC64
28 #include <linux/compat.h>
29 #else
30 #include <linux/wait.h>
31 #include <linux/unistd.h>
32 #include <linux/stddef.h>
33 #include <linux/tty.h>
34 #include <linux/binfmts.h>
35 #endif
36 
37 #include <linux/uaccess.h>
38 #include <asm/cacheflush.h>
39 #include <asm/syscalls.h>
40 #include <asm/sigcontext.h>
41 #include <asm/vdso.h>
42 #include <asm/switch_to.h>
43 #include <asm/tm.h>
44 #include <asm/asm-prototypes.h>
45 #ifdef CONFIG_PPC64
46 #include "ppc32.h"
47 #include <asm/unistd.h>
48 #else
49 #include <asm/ucontext.h>
50 #endif
51 
52 #include "signal.h"
53 
54 
55 #ifdef CONFIG_PPC64
56 #define old_sigaction	old_sigaction32
57 #define sigcontext	sigcontext32
58 #define mcontext	mcontext32
59 #define ucontext	ucontext32
60 
61 /*
62  * Userspace code may pass a ucontext which doesn't include VSX added
63  * at the end.  We need to check for this case.
64  */
65 #define UCONTEXTSIZEWITHOUTVSX \
66 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
67 
68 /*
69  * Returning 0 means we return to userspace via
70  * ret_from_except and thus restore all user
71  * registers from *regs.  This is what we need
72  * to do when a signal has been delivered.
73  */
74 
75 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
76 #undef __SIGNAL_FRAMESIZE
77 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
78 #undef ELF_NVRREG
79 #define ELF_NVRREG	ELF_NVRREG32
80 
81 /*
82  * Functions for flipping sigsets (thanks to brain dead generic
83  * implementation that makes things simple for little endian only)
84  */
85 #define unsafe_put_sigset_t	unsafe_put_compat_sigset
86 
87 static inline int get_sigset_t(sigset_t *set,
88 			       const compat_sigset_t __user *uset)
89 {
90 	return get_compat_sigset(set, uset);
91 }
92 
93 #define to_user_ptr(p)		ptr_to_compat(p)
94 #define from_user_ptr(p)	compat_ptr(p)
95 
96 static __always_inline int
97 save_general_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame)
98 {
99 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
100 	int val, i;
101 
102 	WARN_ON(!FULL_REGS(regs));
103 
104 	for (i = 0; i <= PT_RESULT; i ++) {
105 		/* Force usr to alway see softe as 1 (interrupts enabled) */
106 		if (i == PT_SOFTE)
107 			val = 1;
108 		else
109 			val = gregs[i];
110 
111 		unsafe_put_user(val, &frame->mc_gregs[i], failed);
112 	}
113 	return 0;
114 
115 failed:
116 	return 1;
117 }
118 
119 static inline int restore_general_regs(struct pt_regs *regs,
120 		struct mcontext __user *sr)
121 {
122 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
123 	int i;
124 
125 	for (i = 0; i <= PT_RESULT; i++) {
126 		if ((i == PT_MSR) || (i == PT_SOFTE))
127 			continue;
128 		if (__get_user(gregs[i], &sr->mc_gregs[i]))
129 			return -EFAULT;
130 	}
131 	return 0;
132 }
133 
134 #else /* CONFIG_PPC64 */
135 
136 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
137 
138 #define unsafe_put_sigset_t(uset, set, label) do {			\
139 	sigset_t __user *__us = uset	;				\
140 	const sigset_t *__s = set;					\
141 									\
142 	unsafe_copy_to_user(__us, __s, sizeof(*__us), label);		\
143 } while (0)
144 
145 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
146 {
147 	return copy_from_user(set, uset, sizeof(*uset));
148 }
149 
150 #define to_user_ptr(p)		((unsigned long)(p))
151 #define from_user_ptr(p)	((void __user *)(p))
152 
153 static __always_inline int
154 save_general_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame)
155 {
156 	WARN_ON(!FULL_REGS(regs));
157 	unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
158 	return 0;
159 
160 failed:
161 	return 1;
162 }
163 
164 static inline int restore_general_regs(struct pt_regs *regs,
165 		struct mcontext __user *sr)
166 {
167 	/* copy up to but not including MSR */
168 	if (__copy_from_user(regs, &sr->mc_gregs,
169 				PT_MSR * sizeof(elf_greg_t)))
170 		return -EFAULT;
171 	/* copy from orig_r3 (the word after the MSR) up to the end */
172 	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
173 				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
174 		return -EFAULT;
175 	return 0;
176 }
177 #endif
178 
179 #define unsafe_save_general_regs(regs, frame, label) do {	\
180 	if (save_general_regs_unsafe(regs, frame))	\
181 		goto label;					\
182 } while (0)
183 
184 /*
185  * When we have signals to deliver, we set up on the
186  * user stack, going down from the original stack pointer:
187  *	an ABI gap of 56 words
188  *	an mcontext struct
189  *	a sigcontext struct
190  *	a gap of __SIGNAL_FRAMESIZE bytes
191  *
192  * Each of these things must be a multiple of 16 bytes in size. The following
193  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
194  *
195  */
196 struct sigframe {
197 	struct sigcontext sctx;		/* the sigcontext */
198 	struct mcontext	mctx;		/* all the register values */
199 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
200 	struct sigcontext sctx_transact;
201 	struct mcontext	mctx_transact;
202 #endif
203 	/*
204 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
205 	 * regs and 18 fp regs below sp before decrementing it.
206 	 */
207 	int			abigap[56];
208 };
209 
210 /*
211  *  When we have rt signals to deliver, we set up on the
212  *  user stack, going down from the original stack pointer:
213  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
214  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
215  *  (the +16 is to get the siginfo and ucontext in the same
216  *  positions as in older kernels).
217  *
218  *  Each of these things must be a multiple of 16 bytes in size.
219  *
220  */
221 struct rt_sigframe {
222 #ifdef CONFIG_PPC64
223 	compat_siginfo_t info;
224 #else
225 	struct siginfo info;
226 #endif
227 	struct ucontext	uc;
228 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
229 	struct ucontext	uc_transact;
230 #endif
231 	/*
232 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
233 	 * regs and 18 fp regs below sp before decrementing it.
234 	 */
235 	int			abigap[56];
236 };
237 
238 /*
239  * Save the current user registers on the user stack.
240  * We only save the altivec/spe registers if the process has used
241  * altivec/spe instructions at some point.
242  */
243 static void prepare_save_user_regs(int ctx_has_vsx_region)
244 {
245 	/* Make sure floating point registers are stored in regs */
246 	flush_fp_to_thread(current);
247 #ifdef CONFIG_ALTIVEC
248 	if (current->thread.used_vr)
249 		flush_altivec_to_thread(current);
250 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
251 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
252 #endif
253 #ifdef CONFIG_VSX
254 	if (current->thread.used_vsr && ctx_has_vsx_region)
255 		flush_vsx_to_thread(current);
256 #endif
257 #ifdef CONFIG_SPE
258 	if (current->thread.used_spe)
259 		flush_spe_to_thread(current);
260 #endif
261 }
262 
263 static int save_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
264 				 struct mcontext __user *tm_frame, int ctx_has_vsx_region)
265 {
266 	unsigned long msr = regs->msr;
267 
268 	/* save general registers */
269 	unsafe_save_general_regs(regs, frame, failed);
270 
271 #ifdef CONFIG_ALTIVEC
272 	/* save altivec registers */
273 	if (current->thread.used_vr) {
274 		unsafe_copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
275 				    ELF_NVRREG * sizeof(vector128), failed);
276 		/* set MSR_VEC in the saved MSR value to indicate that
277 		   frame->mc_vregs contains valid data */
278 		msr |= MSR_VEC;
279 	}
280 	/* else assert((regs->msr & MSR_VEC) == 0) */
281 
282 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
283 	 * use altivec. Since VSCR only contains 32 bits saved in the least
284 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
285 	 * most significant bits of that same vector. --BenH
286 	 * Note that the current VRSAVE value is in the SPR at this point.
287 	 */
288 	unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
289 			failed);
290 #endif /* CONFIG_ALTIVEC */
291 	unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed);
292 
293 	/*
294 	 * Clear the MSR VSX bit to indicate there is no valid state attached
295 	 * to this context, except in the specific case below where we set it.
296 	 */
297 	msr &= ~MSR_VSX;
298 #ifdef CONFIG_VSX
299 	/*
300 	 * Copy VSR 0-31 upper half from thread_struct to local
301 	 * buffer, then write that to userspace.  Also set MSR_VSX in
302 	 * the saved MSR value to indicate that frame->mc_vregs
303 	 * contains valid data
304 	 */
305 	if (current->thread.used_vsr && ctx_has_vsx_region) {
306 		unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed);
307 		msr |= MSR_VSX;
308 	}
309 #endif /* CONFIG_VSX */
310 #ifdef CONFIG_SPE
311 	/* save spe registers */
312 	if (current->thread.used_spe) {
313 		unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
314 				    ELF_NEVRREG * sizeof(u32), failed);
315 		/* set MSR_SPE in the saved MSR value to indicate that
316 		   frame->mc_vregs contains valid data */
317 		msr |= MSR_SPE;
318 	}
319 	/* else assert((regs->msr & MSR_SPE) == 0) */
320 
321 	/* We always copy to/from spefscr */
322 	unsafe_put_user(current->thread.spefscr,
323 			(u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
324 #endif /* CONFIG_SPE */
325 
326 	unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
327 
328 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
329 	 * can check it on the restore to see if TM is active
330 	 */
331 	if (tm_frame)
332 		unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed);
333 
334 	return 0;
335 
336 failed:
337 	return 1;
338 }
339 
340 #define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
341 	if (save_user_regs_unsafe(regs, frame, tm_frame, has_vsx))	\
342 		goto label;						\
343 } while (0)
344 
345 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
346 /*
347  * Save the current user registers on the user stack.
348  * We only save the altivec/spe registers if the process has used
349  * altivec/spe instructions at some point.
350  * We also save the transactional registers to a second ucontext in the
351  * frame.
352  *
353  * See save_user_regs_unsafe() and signal_64.c:setup_tm_sigcontexts().
354  */
355 static void prepare_save_tm_user_regs(void)
356 {
357 	WARN_ON(tm_suspend_disabled);
358 
359 #ifdef CONFIG_ALTIVEC
360 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
361 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
362 #endif
363 #ifdef CONFIG_SPE
364 	if (current->thread.used_spe)
365 		flush_spe_to_thread(current);
366 #endif
367 }
368 
369 static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
370 				    struct mcontext __user *tm_frame, unsigned long msr)
371 {
372 	/* Save both sets of general registers */
373 	unsafe_save_general_regs(&current->thread.ckpt_regs, frame, failed);
374 	unsafe_save_general_regs(regs, tm_frame, failed);
375 
376 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
377 	 * of the transactional mcontext.  This way we have a backward-compatible
378 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
379 	 * also look at what type of transaction (T or S) was active at the
380 	 * time of the signal.
381 	 */
382 	unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
383 
384 #ifdef CONFIG_ALTIVEC
385 	/* save altivec registers */
386 	if (current->thread.used_vr) {
387 		unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
388 				    ELF_NVRREG * sizeof(vector128), failed);
389 		if (msr & MSR_VEC)
390 			unsafe_copy_to_user(&tm_frame->mc_vregs,
391 					    &current->thread.vr_state,
392 					    ELF_NVRREG * sizeof(vector128), failed);
393 		else
394 			unsafe_copy_to_user(&tm_frame->mc_vregs,
395 					    &current->thread.ckvr_state,
396 					    ELF_NVRREG * sizeof(vector128), failed);
397 
398 		/* set MSR_VEC in the saved MSR value to indicate that
399 		 * frame->mc_vregs contains valid data
400 		 */
401 		msr |= MSR_VEC;
402 	}
403 
404 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
405 	 * use altivec. Since VSCR only contains 32 bits saved in the least
406 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
407 	 * most significant bits of that same vector. --BenH
408 	 */
409 	unsafe_put_user(current->thread.ckvrsave,
410 			(u32 __user *)&frame->mc_vregs[32], failed);
411 	if (msr & MSR_VEC)
412 		unsafe_put_user(current->thread.vrsave,
413 				(u32 __user *)&tm_frame->mc_vregs[32], failed);
414 	else
415 		unsafe_put_user(current->thread.ckvrsave,
416 				(u32 __user *)&tm_frame->mc_vregs[32], failed);
417 #endif /* CONFIG_ALTIVEC */
418 
419 	unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
420 	if (msr & MSR_FP)
421 		unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed);
422 	else
423 		unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
424 
425 #ifdef CONFIG_VSX
426 	/*
427 	 * Copy VSR 0-31 upper half from thread_struct to local
428 	 * buffer, then write that to userspace.  Also set MSR_VSX in
429 	 * the saved MSR value to indicate that frame->mc_vregs
430 	 * contains valid data
431 	 */
432 	if (current->thread.used_vsr) {
433 		unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed);
434 		if (msr & MSR_VSX)
435 			unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed);
436 		else
437 			unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed);
438 
439 		msr |= MSR_VSX;
440 	}
441 #endif /* CONFIG_VSX */
442 #ifdef CONFIG_SPE
443 	/* SPE regs are not checkpointed with TM, so this section is
444 	 * simply the same as in save_user_regs_unsafe().
445 	 */
446 	if (current->thread.used_spe) {
447 		unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
448 				    ELF_NEVRREG * sizeof(u32), failed);
449 		/* set MSR_SPE in the saved MSR value to indicate that
450 		 * frame->mc_vregs contains valid data */
451 		msr |= MSR_SPE;
452 	}
453 
454 	/* We always copy to/from spefscr */
455 	unsafe_put_user(current->thread.spefscr,
456 			(u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
457 #endif /* CONFIG_SPE */
458 
459 	unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
460 
461 	return 0;
462 
463 failed:
464 	return 1;
465 }
466 #else
467 static void prepare_save_tm_user_regs(void) { }
468 
469 static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
470 				    struct mcontext __user *tm_frame, unsigned long msr)
471 {
472 	return 0;
473 }
474 #endif
475 
476 #define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \
477 	if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr))	\
478 		goto label;						\
479 } while (0)
480 
481 /*
482  * Restore the current user register values from the user stack,
483  * (except for MSR).
484  */
485 static long restore_user_regs(struct pt_regs *regs,
486 			      struct mcontext __user *sr, int sig)
487 {
488 	long err;
489 	unsigned int save_r2 = 0;
490 	unsigned long msr;
491 #ifdef CONFIG_VSX
492 	int i;
493 #endif
494 
495 	/*
496 	 * restore general registers but not including MSR or SOFTE. Also
497 	 * take care of keeping r2 (TLS) intact if not a signal
498 	 */
499 	if (!sig)
500 		save_r2 = (unsigned int)regs->gpr[2];
501 	err = restore_general_regs(regs, sr);
502 	set_trap_norestart(regs);
503 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
504 	if (!sig)
505 		regs->gpr[2] = (unsigned long) save_r2;
506 	if (err)
507 		return 1;
508 
509 	/* if doing signal return, restore the previous little-endian mode */
510 	if (sig)
511 		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
512 
513 #ifdef CONFIG_ALTIVEC
514 	/*
515 	 * Force the process to reload the altivec registers from
516 	 * current->thread when it next does altivec instructions
517 	 */
518 	regs->msr &= ~MSR_VEC;
519 	if (msr & MSR_VEC) {
520 		/* restore altivec registers from the stack */
521 		if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
522 				     sizeof(sr->mc_vregs)))
523 			return 1;
524 		current->thread.used_vr = true;
525 	} else if (current->thread.used_vr)
526 		memset(&current->thread.vr_state, 0,
527 		       ELF_NVRREG * sizeof(vector128));
528 
529 	/* Always get VRSAVE back */
530 	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
531 		return 1;
532 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
533 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
534 #endif /* CONFIG_ALTIVEC */
535 	if (copy_fpr_from_user(current, &sr->mc_fregs))
536 		return 1;
537 
538 #ifdef CONFIG_VSX
539 	/*
540 	 * Force the process to reload the VSX registers from
541 	 * current->thread when it next does VSX instruction.
542 	 */
543 	regs->msr &= ~MSR_VSX;
544 	if (msr & MSR_VSX) {
545 		/*
546 		 * Restore altivec registers from the stack to a local
547 		 * buffer, then write this out to the thread_struct
548 		 */
549 		if (copy_vsx_from_user(current, &sr->mc_vsregs))
550 			return 1;
551 		current->thread.used_vsr = true;
552 	} else if (current->thread.used_vsr)
553 		for (i = 0; i < 32 ; i++)
554 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
555 #endif /* CONFIG_VSX */
556 	/*
557 	 * force the process to reload the FP registers from
558 	 * current->thread when it next does FP instructions
559 	 */
560 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
561 
562 #ifdef CONFIG_SPE
563 	/* force the process to reload the spe registers from
564 	   current->thread when it next does spe instructions */
565 	regs->msr &= ~MSR_SPE;
566 	if (msr & MSR_SPE) {
567 		/* restore spe registers from the stack */
568 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
569 				     ELF_NEVRREG * sizeof(u32)))
570 			return 1;
571 		current->thread.used_spe = true;
572 	} else if (current->thread.used_spe)
573 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
574 
575 	/* Always get SPEFSCR back */
576 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
577 		return 1;
578 #endif /* CONFIG_SPE */
579 
580 	return 0;
581 }
582 
583 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
584 /*
585  * Restore the current user register values from the user stack, except for
586  * MSR, and recheckpoint the original checkpointed register state for processes
587  * in transactions.
588  */
589 static long restore_tm_user_regs(struct pt_regs *regs,
590 				 struct mcontext __user *sr,
591 				 struct mcontext __user *tm_sr)
592 {
593 	long err;
594 	unsigned long msr, msr_hi;
595 #ifdef CONFIG_VSX
596 	int i;
597 #endif
598 
599 	if (tm_suspend_disabled)
600 		return 1;
601 	/*
602 	 * restore general registers but not including MSR or SOFTE. Also
603 	 * take care of keeping r2 (TLS) intact if not a signal.
604 	 * See comment in signal_64.c:restore_tm_sigcontexts();
605 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
606 	 * were set by the signal delivery.
607 	 */
608 	err = restore_general_regs(regs, tm_sr);
609 	err |= restore_general_regs(&current->thread.ckpt_regs, sr);
610 
611 	err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
612 
613 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
614 	if (err)
615 		return 1;
616 
617 	/* Restore the previous little-endian mode */
618 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
619 
620 #ifdef CONFIG_ALTIVEC
621 	regs->msr &= ~MSR_VEC;
622 	if (msr & MSR_VEC) {
623 		/* restore altivec registers from the stack */
624 		if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
625 				     sizeof(sr->mc_vregs)) ||
626 		    __copy_from_user(&current->thread.vr_state,
627 				     &tm_sr->mc_vregs,
628 				     sizeof(sr->mc_vregs)))
629 			return 1;
630 		current->thread.used_vr = true;
631 	} else if (current->thread.used_vr) {
632 		memset(&current->thread.vr_state, 0,
633 		       ELF_NVRREG * sizeof(vector128));
634 		memset(&current->thread.ckvr_state, 0,
635 		       ELF_NVRREG * sizeof(vector128));
636 	}
637 
638 	/* Always get VRSAVE back */
639 	if (__get_user(current->thread.ckvrsave,
640 		       (u32 __user *)&sr->mc_vregs[32]) ||
641 	    __get_user(current->thread.vrsave,
642 		       (u32 __user *)&tm_sr->mc_vregs[32]))
643 		return 1;
644 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
645 		mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
646 #endif /* CONFIG_ALTIVEC */
647 
648 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
649 
650 	if (copy_fpr_from_user(current, &sr->mc_fregs) ||
651 	    copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
652 		return 1;
653 
654 #ifdef CONFIG_VSX
655 	regs->msr &= ~MSR_VSX;
656 	if (msr & MSR_VSX) {
657 		/*
658 		 * Restore altivec registers from the stack to a local
659 		 * buffer, then write this out to the thread_struct
660 		 */
661 		if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
662 		    copy_ckvsx_from_user(current, &sr->mc_vsregs))
663 			return 1;
664 		current->thread.used_vsr = true;
665 	} else if (current->thread.used_vsr)
666 		for (i = 0; i < 32 ; i++) {
667 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
668 			current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
669 		}
670 #endif /* CONFIG_VSX */
671 
672 #ifdef CONFIG_SPE
673 	/* SPE regs are not checkpointed with TM, so this section is
674 	 * simply the same as in restore_user_regs().
675 	 */
676 	regs->msr &= ~MSR_SPE;
677 	if (msr & MSR_SPE) {
678 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
679 				     ELF_NEVRREG * sizeof(u32)))
680 			return 1;
681 		current->thread.used_spe = true;
682 	} else if (current->thread.used_spe)
683 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
684 
685 	/* Always get SPEFSCR back */
686 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
687 		       + ELF_NEVRREG))
688 		return 1;
689 #endif /* CONFIG_SPE */
690 
691 	/* Get the top half of the MSR from the user context */
692 	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
693 		return 1;
694 	msr_hi <<= 32;
695 	/* If TM bits are set to the reserved value, it's an invalid context */
696 	if (MSR_TM_RESV(msr_hi))
697 		return 1;
698 
699 	/*
700 	 * Disabling preemption, since it is unsafe to be preempted
701 	 * with MSR[TS] set without recheckpointing.
702 	 */
703 	preempt_disable();
704 
705 	/*
706 	 * CAUTION:
707 	 * After regs->MSR[TS] being updated, make sure that get_user(),
708 	 * put_user() or similar functions are *not* called. These
709 	 * functions can generate page faults which will cause the process
710 	 * to be de-scheduled with MSR[TS] set but without calling
711 	 * tm_recheckpoint(). This can cause a bug.
712 	 *
713 	 * Pull in the MSR TM bits from the user context
714 	 */
715 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
716 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
717 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
718 	 * transactional versions should be loaded.
719 	 */
720 	tm_enable();
721 	/* Make sure the transaction is marked as failed */
722 	current->thread.tm_texasr |= TEXASR_FS;
723 	/* This loads the checkpointed FP/VEC state, if used */
724 	tm_recheckpoint(&current->thread);
725 
726 	/* This loads the speculative FP/VEC state, if used */
727 	msr_check_and_set(msr & (MSR_FP | MSR_VEC));
728 	if (msr & MSR_FP) {
729 		load_fp_state(&current->thread.fp_state);
730 		regs->msr |= (MSR_FP | current->thread.fpexc_mode);
731 	}
732 #ifdef CONFIG_ALTIVEC
733 	if (msr & MSR_VEC) {
734 		load_vr_state(&current->thread.vr_state);
735 		regs->msr |= MSR_VEC;
736 	}
737 #endif
738 
739 	preempt_enable();
740 
741 	return 0;
742 }
743 #endif
744 
745 #ifdef CONFIG_PPC64
746 
747 #define copy_siginfo_to_user	copy_siginfo_to_user32
748 
749 #endif /* CONFIG_PPC64 */
750 
751 /*
752  * Set up a signal frame for a "real-time" signal handler
753  * (one which gets siginfo).
754  */
755 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
756 		       struct task_struct *tsk)
757 {
758 	struct rt_sigframe __user *frame;
759 	struct mcontext __user *mctx;
760 	struct mcontext __user *tm_mctx = NULL;
761 	unsigned long newsp = 0;
762 	unsigned long tramp;
763 	struct pt_regs *regs = tsk->thread.regs;
764 	/* Save the thread's msr before get_tm_stackpointer() changes it */
765 	unsigned long msr = regs->msr;
766 
767 	/* Set up Signal Frame */
768 	frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
769 	mctx = &frame->uc.uc_mcontext;
770 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
771 	tm_mctx = &frame->uc_transact.uc_mcontext;
772 #endif
773 	if (MSR_TM_ACTIVE(msr))
774 		prepare_save_tm_user_regs();
775 	else
776 		prepare_save_user_regs(1);
777 
778 	if (!user_write_access_begin(frame, sizeof(*frame)))
779 		goto badframe;
780 
781 	/* Put the siginfo & fill in most of the ucontext */
782 	unsafe_put_user(0, &frame->uc.uc_flags, failed);
783 #ifdef CONFIG_PPC64
784 	unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
785 #else
786 	unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
787 #endif
788 	unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed);
789 
790 	if (MSR_TM_ACTIVE(msr)) {
791 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
792 		unsafe_put_user((unsigned long)&frame->uc_transact,
793 				&frame->uc.uc_link, failed);
794 		unsafe_put_user((unsigned long)tm_mctx,
795 				&frame->uc_transact.uc_regs, failed);
796 #endif
797 		unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
798 	} else {
799 		unsafe_put_user(0, &frame->uc.uc_link, failed);
800 		unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
801 	}
802 
803 	/* Save user registers on the stack */
804 	if (tsk->mm->context.vdso) {
805 		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
806 	} else {
807 		tramp = (unsigned long)mctx->mc_pad;
808 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
809 		unsafe_put_user(PPC_INST_ADDI + __NR_rt_sigreturn, &mctx->mc_pad[0],
810 				failed);
811 		unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
812 	}
813 	unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
814 
815 	user_write_access_end();
816 
817 	if (copy_siginfo_to_user(&frame->info, &ksig->info))
818 		goto badframe;
819 
820 	if (tramp == (unsigned long)mctx->mc_pad)
821 		flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
822 
823 	regs->link = tramp;
824 
825 #ifdef CONFIG_PPC_FPU_REGS
826 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
827 #endif
828 
829 	/* create a stack frame for the caller of the handler */
830 	newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16);
831 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
832 		goto badframe;
833 
834 	/* Fill registers for signal handler */
835 	regs->gpr[1] = newsp;
836 	regs->gpr[3] = ksig->sig;
837 	regs->gpr[4] = (unsigned long)&frame->info;
838 	regs->gpr[5] = (unsigned long)&frame->uc;
839 	regs->gpr[6] = (unsigned long)frame;
840 	regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
841 	/* enter the signal handler in native-endian mode */
842 	regs->msr &= ~MSR_LE;
843 	regs->msr |= (MSR_KERNEL & MSR_LE);
844 	return 0;
845 
846 failed:
847 	user_write_access_end();
848 
849 badframe:
850 	signal_fault(tsk, regs, "handle_rt_signal32", frame);
851 
852 	return 1;
853 }
854 
855 /*
856  * OK, we're invoking a handler
857  */
858 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
859 		struct task_struct *tsk)
860 {
861 	struct sigcontext __user *sc;
862 	struct sigframe __user *frame;
863 	struct mcontext __user *mctx;
864 	struct mcontext __user *tm_mctx = NULL;
865 	unsigned long newsp = 0;
866 	unsigned long tramp;
867 	struct pt_regs *regs = tsk->thread.regs;
868 	/* Save the thread's msr before get_tm_stackpointer() changes it */
869 	unsigned long msr = regs->msr;
870 
871 	/* Set up Signal Frame */
872 	frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
873 	mctx = &frame->mctx;
874 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
875 	tm_mctx = &frame->mctx_transact;
876 #endif
877 	if (MSR_TM_ACTIVE(msr))
878 		prepare_save_tm_user_regs();
879 	else
880 		prepare_save_user_regs(1);
881 
882 	if (!user_write_access_begin(frame, sizeof(*frame)))
883 		goto badframe;
884 	sc = (struct sigcontext __user *) &frame->sctx;
885 
886 #if _NSIG != 64
887 #error "Please adjust handle_signal()"
888 #endif
889 	unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed);
890 	unsafe_put_user(oldset->sig[0], &sc->oldmask, failed);
891 #ifdef CONFIG_PPC64
892 	unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed);
893 #else
894 	unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed);
895 #endif
896 	unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed);
897 	unsafe_put_user(ksig->sig, &sc->signal, failed);
898 
899 	if (MSR_TM_ACTIVE(msr))
900 		unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
901 	else
902 		unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
903 
904 	if (tsk->mm->context.vdso) {
905 		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
906 	} else {
907 		tramp = (unsigned long)mctx->mc_pad;
908 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
909 		unsafe_put_user(PPC_INST_ADDI + __NR_sigreturn, &mctx->mc_pad[0], failed);
910 		unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
911 	}
912 	user_write_access_end();
913 
914 	if (tramp == (unsigned long)mctx->mc_pad)
915 		flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
916 
917 	regs->link = tramp;
918 
919 #ifdef CONFIG_PPC_FPU_REGS
920 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
921 #endif
922 
923 	/* create a stack frame for the caller of the handler */
924 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
925 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
926 		goto badframe;
927 
928 	regs->gpr[1] = newsp;
929 	regs->gpr[3] = ksig->sig;
930 	regs->gpr[4] = (unsigned long) sc;
931 	regs->nip = (unsigned long)ksig->ka.sa.sa_handler;
932 	/* enter the signal handler in native-endian mode */
933 	regs->msr &= ~MSR_LE;
934 	regs->msr |= (MSR_KERNEL & MSR_LE);
935 	return 0;
936 
937 failed:
938 	user_write_access_end();
939 
940 badframe:
941 	signal_fault(tsk, regs, "handle_signal32", frame);
942 
943 	return 1;
944 }
945 
946 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
947 {
948 	sigset_t set;
949 	struct mcontext __user *mcp;
950 
951 	if (get_sigset_t(&set, &ucp->uc_sigmask))
952 		return -EFAULT;
953 #ifdef CONFIG_PPC64
954 	{
955 		u32 cmcp;
956 
957 		if (__get_user(cmcp, &ucp->uc_regs))
958 			return -EFAULT;
959 		mcp = (struct mcontext __user *)(u64)cmcp;
960 		/* no need to check access_ok(mcp), since mcp < 4GB */
961 	}
962 #else
963 	if (__get_user(mcp, &ucp->uc_regs))
964 		return -EFAULT;
965 	if (!access_ok(mcp, sizeof(*mcp)))
966 		return -EFAULT;
967 #endif
968 	set_current_blocked(&set);
969 	if (restore_user_regs(regs, mcp, sig))
970 		return -EFAULT;
971 
972 	return 0;
973 }
974 
975 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
976 static int do_setcontext_tm(struct ucontext __user *ucp,
977 			    struct ucontext __user *tm_ucp,
978 			    struct pt_regs *regs)
979 {
980 	sigset_t set;
981 	struct mcontext __user *mcp;
982 	struct mcontext __user *tm_mcp;
983 	u32 cmcp;
984 	u32 tm_cmcp;
985 
986 	if (get_sigset_t(&set, &ucp->uc_sigmask))
987 		return -EFAULT;
988 
989 	if (__get_user(cmcp, &ucp->uc_regs) ||
990 	    __get_user(tm_cmcp, &tm_ucp->uc_regs))
991 		return -EFAULT;
992 	mcp = (struct mcontext __user *)(u64)cmcp;
993 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
994 	/* no need to check access_ok(mcp), since mcp < 4GB */
995 
996 	set_current_blocked(&set);
997 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
998 		return -EFAULT;
999 
1000 	return 0;
1001 }
1002 #endif
1003 
1004 #ifdef CONFIG_PPC64
1005 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1006 		       struct ucontext __user *, new_ctx, int, ctx_size)
1007 #else
1008 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1009 		       struct ucontext __user *, new_ctx, long, ctx_size)
1010 #endif
1011 {
1012 	struct pt_regs *regs = current_pt_regs();
1013 	int ctx_has_vsx_region = 0;
1014 
1015 #ifdef CONFIG_PPC64
1016 	unsigned long new_msr = 0;
1017 
1018 	if (new_ctx) {
1019 		struct mcontext __user *mcp;
1020 		u32 cmcp;
1021 
1022 		/*
1023 		 * Get pointer to the real mcontext.  No need for
1024 		 * access_ok since we are dealing with compat
1025 		 * pointers.
1026 		 */
1027 		if (__get_user(cmcp, &new_ctx->uc_regs))
1028 			return -EFAULT;
1029 		mcp = (struct mcontext __user *)(u64)cmcp;
1030 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1031 			return -EFAULT;
1032 	}
1033 	/*
1034 	 * Check that the context is not smaller than the original
1035 	 * size (with VMX but without VSX)
1036 	 */
1037 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1038 		return -EINVAL;
1039 	/*
1040 	 * If the new context state sets the MSR VSX bits but
1041 	 * it doesn't provide VSX state.
1042 	 */
1043 	if ((ctx_size < sizeof(struct ucontext)) &&
1044 	    (new_msr & MSR_VSX))
1045 		return -EINVAL;
1046 	/* Does the context have enough room to store VSX data? */
1047 	if (ctx_size >= sizeof(struct ucontext))
1048 		ctx_has_vsx_region = 1;
1049 #else
1050 	/* Context size is for future use. Right now, we only make sure
1051 	 * we are passed something we understand
1052 	 */
1053 	if (ctx_size < sizeof(struct ucontext))
1054 		return -EINVAL;
1055 #endif
1056 	if (old_ctx != NULL) {
1057 		struct mcontext __user *mctx;
1058 
1059 		/*
1060 		 * old_ctx might not be 16-byte aligned, in which
1061 		 * case old_ctx->uc_mcontext won't be either.
1062 		 * Because we have the old_ctx->uc_pad2 field
1063 		 * before old_ctx->uc_mcontext, we need to round down
1064 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1065 		 */
1066 		mctx = (struct mcontext __user *)
1067 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1068 		prepare_save_user_regs(ctx_has_vsx_region);
1069 		if (!user_write_access_begin(old_ctx, ctx_size))
1070 			return -EFAULT;
1071 		unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed);
1072 		unsafe_put_sigset_t(&old_ctx->uc_sigmask, &current->blocked, failed);
1073 		unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed);
1074 		user_write_access_end();
1075 	}
1076 	if (new_ctx == NULL)
1077 		return 0;
1078 	if (!access_ok(new_ctx, ctx_size) ||
1079 	    fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
1080 		return -EFAULT;
1081 
1082 	/*
1083 	 * If we get a fault copying the context into the kernel's
1084 	 * image of the user's registers, we can't just return -EFAULT
1085 	 * because the user's registers will be corrupted.  For instance
1086 	 * the NIP value may have been updated but not some of the
1087 	 * other registers.  Given that we have done the access_ok
1088 	 * and successfully read the first and last bytes of the region
1089 	 * above, this should only happen in an out-of-memory situation
1090 	 * or if another thread unmaps the region containing the context.
1091 	 * We kill the task with a SIGSEGV in this situation.
1092 	 */
1093 	if (do_setcontext(new_ctx, regs, 0))
1094 		do_exit(SIGSEGV);
1095 
1096 	set_thread_flag(TIF_RESTOREALL);
1097 	return 0;
1098 
1099 failed:
1100 	user_write_access_end();
1101 	return -EFAULT;
1102 }
1103 
1104 #ifdef CONFIG_PPC64
1105 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1106 #else
1107 SYSCALL_DEFINE0(rt_sigreturn)
1108 #endif
1109 {
1110 	struct rt_sigframe __user *rt_sf;
1111 	struct pt_regs *regs = current_pt_regs();
1112 	int tm_restore = 0;
1113 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1114 	struct ucontext __user *uc_transact;
1115 	unsigned long msr_hi;
1116 	unsigned long tmp;
1117 #endif
1118 	/* Always make any pending restarted system calls return -EINTR */
1119 	current->restart_block.fn = do_no_restart_syscall;
1120 
1121 	rt_sf = (struct rt_sigframe __user *)
1122 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1123 	if (!access_ok(rt_sf, sizeof(*rt_sf)))
1124 		goto bad;
1125 
1126 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1127 	/*
1128 	 * If there is a transactional state then throw it away.
1129 	 * The purpose of a sigreturn is to destroy all traces of the
1130 	 * signal frame, this includes any transactional state created
1131 	 * within in. We only check for suspended as we can never be
1132 	 * active in the kernel, we are active, there is nothing better to
1133 	 * do than go ahead and Bad Thing later.
1134 	 * The cause is not important as there will never be a
1135 	 * recheckpoint so it's not user visible.
1136 	 */
1137 	if (MSR_TM_SUSPENDED(mfmsr()))
1138 		tm_reclaim_current(0);
1139 
1140 	if (__get_user(tmp, &rt_sf->uc.uc_link))
1141 		goto bad;
1142 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1143 	if (uc_transact) {
1144 		u32 cmcp;
1145 		struct mcontext __user *mcp;
1146 
1147 		if (__get_user(cmcp, &uc_transact->uc_regs))
1148 			return -EFAULT;
1149 		mcp = (struct mcontext __user *)(u64)cmcp;
1150 		/* The top 32 bits of the MSR are stashed in the transactional
1151 		 * ucontext. */
1152 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1153 			goto bad;
1154 
1155 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1156 			/* Trying to start TM on non TM system */
1157 			if (!cpu_has_feature(CPU_FTR_TM))
1158 				goto bad;
1159 			/* We only recheckpoint on return if we're
1160 			 * transaction.
1161 			 */
1162 			tm_restore = 1;
1163 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1164 				goto bad;
1165 		}
1166 	}
1167 	if (!tm_restore) {
1168 		/*
1169 		 * Unset regs->msr because ucontext MSR TS is not
1170 		 * set, and recheckpoint was not called. This avoid
1171 		 * hitting a TM Bad thing at RFID
1172 		 */
1173 		regs->msr &= ~MSR_TS_MASK;
1174 	}
1175 	/* Fall through, for non-TM restore */
1176 #endif
1177 	if (!tm_restore)
1178 		if (do_setcontext(&rt_sf->uc, regs, 1))
1179 			goto bad;
1180 
1181 	/*
1182 	 * It's not clear whether or why it is desirable to save the
1183 	 * sigaltstack setting on signal delivery and restore it on
1184 	 * signal return.  But other architectures do this and we have
1185 	 * always done it up until now so it is probably better not to
1186 	 * change it.  -- paulus
1187 	 */
1188 #ifdef CONFIG_PPC64
1189 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1190 		goto bad;
1191 #else
1192 	if (restore_altstack(&rt_sf->uc.uc_stack))
1193 		goto bad;
1194 #endif
1195 	set_thread_flag(TIF_RESTOREALL);
1196 	return 0;
1197 
1198  bad:
1199 	signal_fault(current, regs, "sys_rt_sigreturn", rt_sf);
1200 
1201 	force_sig(SIGSEGV);
1202 	return 0;
1203 }
1204 
1205 #ifdef CONFIG_PPC32
1206 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1207 			 int, ndbg, struct sig_dbg_op __user *, dbg)
1208 {
1209 	struct pt_regs *regs = current_pt_regs();
1210 	struct sig_dbg_op op;
1211 	int i;
1212 	unsigned long new_msr = regs->msr;
1213 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1214 	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1215 #endif
1216 
1217 	for (i=0; i<ndbg; i++) {
1218 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1219 			return -EFAULT;
1220 		switch (op.dbg_type) {
1221 		case SIG_DBG_SINGLE_STEPPING:
1222 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1223 			if (op.dbg_value) {
1224 				new_msr |= MSR_DE;
1225 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1226 			} else {
1227 				new_dbcr0 &= ~DBCR0_IC;
1228 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1229 						current->thread.debug.dbcr1)) {
1230 					new_msr &= ~MSR_DE;
1231 					new_dbcr0 &= ~DBCR0_IDM;
1232 				}
1233 			}
1234 #else
1235 			if (op.dbg_value)
1236 				new_msr |= MSR_SE;
1237 			else
1238 				new_msr &= ~MSR_SE;
1239 #endif
1240 			break;
1241 		case SIG_DBG_BRANCH_TRACING:
1242 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1243 			return -EINVAL;
1244 #else
1245 			if (op.dbg_value)
1246 				new_msr |= MSR_BE;
1247 			else
1248 				new_msr &= ~MSR_BE;
1249 #endif
1250 			break;
1251 
1252 		default:
1253 			return -EINVAL;
1254 		}
1255 	}
1256 
1257 	/* We wait until here to actually install the values in the
1258 	   registers so if we fail in the above loop, it will not
1259 	   affect the contents of these registers.  After this point,
1260 	   failure is a problem, anyway, and it's very unlikely unless
1261 	   the user is really doing something wrong. */
1262 	regs->msr = new_msr;
1263 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1264 	current->thread.debug.dbcr0 = new_dbcr0;
1265 #endif
1266 
1267 	if (!access_ok(ctx, sizeof(*ctx)) ||
1268 	    fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1269 		return -EFAULT;
1270 
1271 	/*
1272 	 * If we get a fault copying the context into the kernel's
1273 	 * image of the user's registers, we can't just return -EFAULT
1274 	 * because the user's registers will be corrupted.  For instance
1275 	 * the NIP value may have been updated but not some of the
1276 	 * other registers.  Given that we have done the access_ok
1277 	 * and successfully read the first and last bytes of the region
1278 	 * above, this should only happen in an out-of-memory situation
1279 	 * or if another thread unmaps the region containing the context.
1280 	 * We kill the task with a SIGSEGV in this situation.
1281 	 */
1282 	if (do_setcontext(ctx, regs, 1)) {
1283 		signal_fault(current, regs, "sys_debug_setcontext", ctx);
1284 
1285 		force_sig(SIGSEGV);
1286 		goto out;
1287 	}
1288 
1289 	/*
1290 	 * It's not clear whether or why it is desirable to save the
1291 	 * sigaltstack setting on signal delivery and restore it on
1292 	 * signal return.  But other architectures do this and we have
1293 	 * always done it up until now so it is probably better not to
1294 	 * change it.  -- paulus
1295 	 */
1296 	restore_altstack(&ctx->uc_stack);
1297 
1298 	set_thread_flag(TIF_RESTOREALL);
1299  out:
1300 	return 0;
1301 }
1302 #endif
1303 
1304 /*
1305  * Do a signal return; undo the signal stack.
1306  */
1307 #ifdef CONFIG_PPC64
1308 COMPAT_SYSCALL_DEFINE0(sigreturn)
1309 #else
1310 SYSCALL_DEFINE0(sigreturn)
1311 #endif
1312 {
1313 	struct pt_regs *regs = current_pt_regs();
1314 	struct sigframe __user *sf;
1315 	struct sigcontext __user *sc;
1316 	struct sigcontext sigctx;
1317 	struct mcontext __user *sr;
1318 	void __user *addr;
1319 	sigset_t set;
1320 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1321 	struct mcontext __user *mcp, *tm_mcp;
1322 	unsigned long msr_hi;
1323 #endif
1324 
1325 	/* Always make any pending restarted system calls return -EINTR */
1326 	current->restart_block.fn = do_no_restart_syscall;
1327 
1328 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1329 	sc = &sf->sctx;
1330 	addr = sc;
1331 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1332 		goto badframe;
1333 
1334 #ifdef CONFIG_PPC64
1335 	/*
1336 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1337 	 * unused part of the signal stackframe
1338 	 */
1339 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1340 #else
1341 	set.sig[0] = sigctx.oldmask;
1342 	set.sig[1] = sigctx._unused[3];
1343 #endif
1344 	set_current_blocked(&set);
1345 
1346 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1347 	mcp = (struct mcontext __user *)&sf->mctx;
1348 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1349 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1350 		goto badframe;
1351 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
1352 		if (!cpu_has_feature(CPU_FTR_TM))
1353 			goto badframe;
1354 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
1355 			goto badframe;
1356 	} else
1357 #endif
1358 	{
1359 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1360 		addr = sr;
1361 		if (!access_ok(sr, sizeof(*sr))
1362 		    || restore_user_regs(regs, sr, 1))
1363 			goto badframe;
1364 	}
1365 
1366 	set_thread_flag(TIF_RESTOREALL);
1367 	return 0;
1368 
1369 badframe:
1370 	signal_fault(current, regs, "sys_sigreturn", addr);
1371 
1372 	force_sig(SIGSEGV);
1373 	return 0;
1374 }
1375