12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
262f4f47dSStephen Rothwell /*
362f4f47dSStephen Rothwell * PowerPC version
462f4f47dSStephen Rothwell * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
562f4f47dSStephen Rothwell *
662f4f47dSStephen Rothwell * Derived from "arch/i386/kernel/signal.c"
762f4f47dSStephen Rothwell * Copyright (C) 1991, 1992 Linus Torvalds
862f4f47dSStephen Rothwell * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
962f4f47dSStephen Rothwell */
1062f4f47dSStephen Rothwell
1162f4f47dSStephen Rothwell #include <linux/sched.h>
1262f4f47dSStephen Rothwell #include <linux/mm.h>
1362f4f47dSStephen Rothwell #include <linux/smp.h>
1462f4f47dSStephen Rothwell #include <linux/kernel.h>
1562f4f47dSStephen Rothwell #include <linux/signal.h>
1662f4f47dSStephen Rothwell #include <linux/errno.h>
1762f4f47dSStephen Rothwell #include <linux/wait.h>
1862f4f47dSStephen Rothwell #include <linux/unistd.h>
1962f4f47dSStephen Rothwell #include <linux/stddef.h>
2062f4f47dSStephen Rothwell #include <linux/elf.h>
2162f4f47dSStephen Rothwell #include <linux/ptrace.h>
2276462232SChristian Dietrich #include <linux/ratelimit.h>
23f3675644SAl Viro #include <linux/syscalls.h>
2496032f98SChristophe Leroy #include <linux/pagemap.h>
2562f4f47dSStephen Rothwell
2662f4f47dSStephen Rothwell #include <asm/sigcontext.h>
2762f4f47dSStephen Rothwell #include <asm/ucontext.h>
287c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
2962f4f47dSStephen Rothwell #include <asm/unistd.h>
3062f4f47dSStephen Rothwell #include <asm/cacheflush.h>
31a7f31841SArnd Bergmann #include <asm/syscalls.h>
3262f4f47dSStephen Rothwell #include <asm/vdso.h>
33ae3a197eSDavid Howells #include <asm/switch_to.h>
342b0a576dSMichael Neuling #include <asm/tm.h>
350545d543SDaniel Axtens #include <asm/asm-prototypes.h>
3662f4f47dSStephen Rothwell
3722e38f29SBenjamin Herrenschmidt #include "signal.h"
3862f4f47dSStephen Rothwell
3962f4f47dSStephen Rothwell
4062f4f47dSStephen Rothwell #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
4162f4f47dSStephen Rothwell #define FP_REGS_SIZE sizeof(elf_fpregset_t)
4262f4f47dSStephen Rothwell
430138ba57SNicholas Piggin #define TRAMP_TRACEBACK 4
440138ba57SNicholas Piggin #define TRAMP_SIZE 7
4562f4f47dSStephen Rothwell
4662f4f47dSStephen Rothwell /*
4762f4f47dSStephen Rothwell * When we have signals to deliver, we set up on the user stack,
4862f4f47dSStephen Rothwell * going down from the original stack pointer:
4962f4f47dSStephen Rothwell * 1) a rt_sigframe struct which contains the ucontext
5062f4f47dSStephen Rothwell * 2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller
5162f4f47dSStephen Rothwell * frame for the signal handler.
5262f4f47dSStephen Rothwell */
5362f4f47dSStephen Rothwell
5462f4f47dSStephen Rothwell struct rt_sigframe {
5562f4f47dSStephen Rothwell /* sys_rt_sigreturn requires the ucontext be the first field */
5662f4f47dSStephen Rothwell struct ucontext uc;
572b0a576dSMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
582b0a576dSMichael Neuling struct ucontext uc_transact;
592b0a576dSMichael Neuling #endif
6062f4f47dSStephen Rothwell unsigned long _unused[2];
6162f4f47dSStephen Rothwell unsigned int tramp[TRAMP_SIZE];
6229e646dfSAl Viro struct siginfo __user *pinfo;
6329e646dfSAl Viro void __user *puc;
6462f4f47dSStephen Rothwell struct siginfo info;
65573ebfa6SPaul Mackerras /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
66573ebfa6SPaul Mackerras char abigap[USER_REDZONE_SIZE];
6762f4f47dSStephen Rothwell } __attribute__ ((aligned (16)));
6862f4f47dSStephen Rothwell
get_min_sigframe_size_64(void)692896b2dfSNicholas Piggin unsigned long get_min_sigframe_size_64(void)
702896b2dfSNicholas Piggin {
712896b2dfSNicholas Piggin return sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE;
722896b2dfSNicholas Piggin }
732896b2dfSNicholas Piggin
7462f4f47dSStephen Rothwell /*
752476c09fSAnshuman Khandual * This computes a quad word aligned pointer inside the vmx_reserve array
762476c09fSAnshuman Khandual * element. For historical reasons sigcontext might not be quad word aligned,
772476c09fSAnshuman Khandual * but the location we write the VMX regs to must be. See the comment in
782476c09fSAnshuman Khandual * sigcontext for more detail.
792476c09fSAnshuman Khandual */
802476c09fSAnshuman Khandual #ifdef CONFIG_ALTIVEC
sigcontext_vmx_regs(struct sigcontext __user * sc)812476c09fSAnshuman Khandual static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc)
822476c09fSAnshuman Khandual {
832476c09fSAnshuman Khandual return (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful);
842476c09fSAnshuman Khandual }
852476c09fSAnshuman Khandual #endif
862476c09fSAnshuman Khandual
prepare_setup_sigcontext(struct task_struct * tsk)87c6c9645eSChristopher M. Riedl static void prepare_setup_sigcontext(struct task_struct *tsk)
88c6c9645eSChristopher M. Riedl {
89c6c9645eSChristopher M. Riedl #ifdef CONFIG_ALTIVEC
90c6c9645eSChristopher M. Riedl /* save altivec registers */
91c6c9645eSChristopher M. Riedl if (tsk->thread.used_vr)
92c6c9645eSChristopher M. Riedl flush_altivec_to_thread(tsk);
93c6c9645eSChristopher M. Riedl if (cpu_has_feature(CPU_FTR_ALTIVEC))
94c6c9645eSChristopher M. Riedl tsk->thread.vrsave = mfspr(SPRN_VRSAVE);
95c6c9645eSChristopher M. Riedl #endif /* CONFIG_ALTIVEC */
96c6c9645eSChristopher M. Riedl
97c6c9645eSChristopher M. Riedl flush_fp_to_thread(tsk);
98c6c9645eSChristopher M. Riedl
99c6c9645eSChristopher M. Riedl #ifdef CONFIG_VSX
100c6c9645eSChristopher M. Riedl if (tsk->thread.used_vsr)
101c6c9645eSChristopher M. Riedl flush_vsx_to_thread(tsk);
102c6c9645eSChristopher M. Riedl #endif /* CONFIG_VSX */
103c6c9645eSChristopher M. Riedl }
104c6c9645eSChristopher M. Riedl
1052476c09fSAnshuman Khandual /*
10662f4f47dSStephen Rothwell * Set up the sigcontext for the signal frame.
10762f4f47dSStephen Rothwell */
10862f4f47dSStephen Rothwell
1097bb081c8SChristopher M. Riedl #define unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region, label)\
1107bb081c8SChristopher M. Riedl do { \
1117bb081c8SChristopher M. Riedl if (__unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region))\
1127bb081c8SChristopher M. Riedl goto label; \
1137bb081c8SChristopher M. Riedl } while (0)
__unsafe_setup_sigcontext(struct sigcontext __user * sc,struct task_struct * tsk,int signr,sigset_t * set,unsigned long handler,int ctx_has_vsx_region)1147bb081c8SChristopher M. Riedl static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc,
115d1199431SCyril Bur struct task_struct *tsk, int signr, sigset_t *set,
116d1199431SCyril Bur unsigned long handler, int ctx_has_vsx_region)
11762f4f47dSStephen Rothwell {
11862f4f47dSStephen Rothwell /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
11962f4f47dSStephen Rothwell * process never used altivec yet (MSR_VEC is zero in pt_regs of
12062f4f47dSStephen Rothwell * the context). This is very important because we must ensure we
12162f4f47dSStephen Rothwell * don't lose the VRSAVE content that may have been set prior to
12262f4f47dSStephen Rothwell * the process doing its first vector operation
12348fc7f7eSAdam Buchbinder * Userland shall check AT_HWCAP to know whether it can rely on the
12462f4f47dSStephen Rothwell * v_regs pointer or not
12562f4f47dSStephen Rothwell */
12662f4f47dSStephen Rothwell #ifdef CONFIG_ALTIVEC
1272476c09fSAnshuman Khandual elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
12862f4f47dSStephen Rothwell #endif
129d1199431SCyril Bur struct pt_regs *regs = tsk->thread.regs;
1300be234a4SBenjamin Herrenschmidt unsigned long msr = regs->msr;
1311fd02f66SJulia Lawall /* Force usr to always see softe as 1 (interrupts enabled) */
132a8a4b03aSMadhavan Srinivasan unsigned long softe = 0x1;
13362f4f47dSStephen Rothwell
134d1199431SCyril Bur BUG_ON(tsk != current);
135d1199431SCyril Bur
13662f4f47dSStephen Rothwell #ifdef CONFIG_ALTIVEC
1377bb081c8SChristopher M. Riedl unsafe_put_user(v_regs, &sc->v_regs, efault_out);
13862f4f47dSStephen Rothwell
13962f4f47dSStephen Rothwell /* save altivec registers */
140d1199431SCyril Bur if (tsk->thread.used_vr) {
14162f4f47dSStephen Rothwell /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
1427bb081c8SChristopher M. Riedl unsafe_copy_to_user(v_regs, &tsk->thread.vr_state,
1437bb081c8SChristopher M. Riedl 33 * sizeof(vector128), efault_out);
14462f4f47dSStephen Rothwell /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
14562f4f47dSStephen Rothwell * contains valid data.
14662f4f47dSStephen Rothwell */
1470be234a4SBenjamin Herrenschmidt msr |= MSR_VEC;
14862f4f47dSStephen Rothwell }
14962f4f47dSStephen Rothwell /* We always copy to/from vrsave, it's 0 if we don't have or don't
15062f4f47dSStephen Rothwell * use altivec.
15162f4f47dSStephen Rothwell */
1527bb081c8SChristopher M. Riedl unsafe_put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
15362f4f47dSStephen Rothwell #else /* CONFIG_ALTIVEC */
1547bb081c8SChristopher M. Riedl unsafe_put_user(0, &sc->v_regs, efault_out);
15562f4f47dSStephen Rothwell #endif /* CONFIG_ALTIVEC */
1566a274c08SMichael Neuling /* copy fpr regs and fpscr */
1577bb081c8SChristopher M. Riedl unsafe_copy_fpr_to_user(&sc->fp_regs, tsk, efault_out);
158ec67ad82SMichael Neuling
159ec67ad82SMichael Neuling /*
160ec67ad82SMichael Neuling * Clear the MSR VSX bit to indicate there is no valid state attached
161ec67ad82SMichael Neuling * to this context, except in the specific case below where we set it.
162ec67ad82SMichael Neuling */
163ec67ad82SMichael Neuling msr &= ~MSR_VSX;
164c6e6771bSMichael Neuling #ifdef CONFIG_VSX
165ce48b210SMichael Neuling /*
166ce48b210SMichael Neuling * Copy VSX low doubleword to local buffer for formatting,
167ce48b210SMichael Neuling * then out to userspace. Update v_regs to point after the
168ce48b210SMichael Neuling * VMX data.
169ce48b210SMichael Neuling */
170d1199431SCyril Bur if (tsk->thread.used_vsr && ctx_has_vsx_region) {
171ce48b210SMichael Neuling v_regs += ELF_NVRREG;
1727bb081c8SChristopher M. Riedl unsafe_copy_vsx_to_user(v_regs, tsk, efault_out);
173ce48b210SMichael Neuling /* set MSR_VSX in the MSR value in the frame to
174ce48b210SMichael Neuling * indicate that sc->vs_reg) contains valid data.
175ce48b210SMichael Neuling */
176ce48b210SMichael Neuling msr |= MSR_VSX;
177ce48b210SMichael Neuling }
178c6e6771bSMichael Neuling #endif /* CONFIG_VSX */
1797bb081c8SChristopher M. Riedl unsafe_put_user(&sc->gp_regs, &sc->regs, efault_out);
1807bb081c8SChristopher M. Riedl unsafe_copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE, efault_out);
1817bb081c8SChristopher M. Riedl unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out);
1827bb081c8SChristopher M. Riedl unsafe_put_user(softe, &sc->gp_regs[PT_SOFTE], efault_out);
1837bb081c8SChristopher M. Riedl unsafe_put_user(signr, &sc->signal, efault_out);
1847bb081c8SChristopher M. Riedl unsafe_put_user(handler, &sc->handler, efault_out);
18562f4f47dSStephen Rothwell if (set != NULL)
1867bb081c8SChristopher M. Riedl unsafe_put_user(set->sig[0], &sc->oldmask, efault_out);
18762f4f47dSStephen Rothwell
1887bb081c8SChristopher M. Riedl return 0;
1897bb081c8SChristopher M. Riedl
1907bb081c8SChristopher M. Riedl efault_out:
1917bb081c8SChristopher M. Riedl return -EFAULT;
19262f4f47dSStephen Rothwell }
19362f4f47dSStephen Rothwell
1942b0a576dSMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1952b0a576dSMichael Neuling /*
1962b0a576dSMichael Neuling * As above, but Transactional Memory is in use, so deliver sigcontexts
1972b0a576dSMichael Neuling * containing checkpointed and transactional register states.
1982b0a576dSMichael Neuling *
1992b3f8e87SMichael Neuling * To do this, we treclaim (done before entering here) to gather both sets of
2002b3f8e87SMichael Neuling * registers and set up the 'normal' sigcontext registers with rolled-back
2012b3f8e87SMichael Neuling * register values such that a simple signal handler sees a correct
2022b3f8e87SMichael Neuling * checkpointed register state. If interested, a TM-aware sighandler can
2032b3f8e87SMichael Neuling * examine the transactional registers in the 2nd sigcontext to determine the
2042b3f8e87SMichael Neuling * real origin of the signal.
2052b0a576dSMichael Neuling */
setup_tm_sigcontexts(struct sigcontext __user * sc,struct sigcontext __user * tm_sc,struct task_struct * tsk,int signr,sigset_t * set,unsigned long handler,unsigned long msr)2062b0a576dSMichael Neuling static long setup_tm_sigcontexts(struct sigcontext __user *sc,
2072b0a576dSMichael Neuling struct sigcontext __user *tm_sc,
208d1199431SCyril Bur struct task_struct *tsk,
2092464cc4cSGustavo Luiz Duarte int signr, sigset_t *set, unsigned long handler,
2102464cc4cSGustavo Luiz Duarte unsigned long msr)
2112b0a576dSMichael Neuling {
2122b0a576dSMichael Neuling /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
2132b0a576dSMichael Neuling * process never used altivec yet (MSR_VEC is zero in pt_regs of
2142b0a576dSMichael Neuling * the context). This is very important because we must ensure we
2152b0a576dSMichael Neuling * don't lose the VRSAVE content that may have been set prior to
2162b0a576dSMichael Neuling * the process doing its first vector operation
2172b0a576dSMichael Neuling * Userland shall check AT_HWCAP to know wether it can rely on the
2182b0a576dSMichael Neuling * v_regs pointer or not.
2192b0a576dSMichael Neuling */
2202b0a576dSMichael Neuling #ifdef CONFIG_ALTIVEC
2212476c09fSAnshuman Khandual elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
2222476c09fSAnshuman Khandual elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
2232b0a576dSMichael Neuling #endif
224d1199431SCyril Bur struct pt_regs *regs = tsk->thread.regs;
2252b0a576dSMichael Neuling long err = 0;
2262b0a576dSMichael Neuling
227d1199431SCyril Bur BUG_ON(tsk != current);
228d1199431SCyril Bur
2292464cc4cSGustavo Luiz Duarte BUG_ON(!MSR_TM_ACTIVE(msr));
2302b0a576dSMichael Neuling
23192fb8690SMichael Neuling WARN_ON(tm_suspend_disabled);
23292fb8690SMichael Neuling
2331c200e63SGustavo Romero /* Restore checkpointed FP, VEC, and VSX bits from ckpt_regs as
2341c200e63SGustavo Romero * it contains the correct FP, VEC, VSX state after we treclaimed
2351c200e63SGustavo Romero * the transaction and giveup_all() was called on reclaiming.
2361c200e63SGustavo Romero */
2371c200e63SGustavo Romero msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
2381c200e63SGustavo Romero
2392b0a576dSMichael Neuling #ifdef CONFIG_ALTIVEC
2402b0a576dSMichael Neuling err |= __put_user(v_regs, &sc->v_regs);
2412b0a576dSMichael Neuling err |= __put_user(tm_v_regs, &tm_sc->v_regs);
2422b0a576dSMichael Neuling
2432b0a576dSMichael Neuling /* save altivec registers */
244d1199431SCyril Bur if (tsk->thread.used_vr) {
2452b0a576dSMichael Neuling /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
246000ec280SCyril Bur err |= __copy_to_user(v_regs, &tsk->thread.ckvr_state,
2472b0a576dSMichael Neuling 33 * sizeof(vector128));
2482b0a576dSMichael Neuling /* If VEC was enabled there are transactional VRs valid too,
2492b0a576dSMichael Neuling * else they're a copy of the checkpointed VRs.
2502b0a576dSMichael Neuling */
2512b0a576dSMichael Neuling if (msr & MSR_VEC)
2522b0a576dSMichael Neuling err |= __copy_to_user(tm_v_regs,
253dc310669SCyril Bur &tsk->thread.vr_state,
2542b0a576dSMichael Neuling 33 * sizeof(vector128));
2552b0a576dSMichael Neuling else
2562b0a576dSMichael Neuling err |= __copy_to_user(tm_v_regs,
257000ec280SCyril Bur &tsk->thread.ckvr_state,
2582b0a576dSMichael Neuling 33 * sizeof(vector128));
2592b0a576dSMichael Neuling
2602b0a576dSMichael Neuling /* set MSR_VEC in the MSR value in the frame to indicate
2612b0a576dSMichael Neuling * that sc->v_reg contains valid data.
2622b0a576dSMichael Neuling */
2632b0a576dSMichael Neuling msr |= MSR_VEC;
2642b0a576dSMichael Neuling }
2652b0a576dSMichael Neuling /* We always copy to/from vrsave, it's 0 if we don't have or don't
2662b0a576dSMichael Neuling * use altivec.
2672b0a576dSMichael Neuling */
268408a7e08SPaul Mackerras if (cpu_has_feature(CPU_FTR_ALTIVEC))
269000ec280SCyril Bur tsk->thread.ckvrsave = mfspr(SPRN_VRSAVE);
270000ec280SCyril Bur err |= __put_user(tsk->thread.ckvrsave, (u32 __user *)&v_regs[33]);
2712b0a576dSMichael Neuling if (msr & MSR_VEC)
272dc310669SCyril Bur err |= __put_user(tsk->thread.vrsave,
2732b0a576dSMichael Neuling (u32 __user *)&tm_v_regs[33]);
2742b0a576dSMichael Neuling else
275000ec280SCyril Bur err |= __put_user(tsk->thread.ckvrsave,
2762b0a576dSMichael Neuling (u32 __user *)&tm_v_regs[33]);
2772b0a576dSMichael Neuling
2782b0a576dSMichael Neuling #else /* CONFIG_ALTIVEC */
2792b0a576dSMichael Neuling err |= __put_user(0, &sc->v_regs);
2802b0a576dSMichael Neuling err |= __put_user(0, &tm_sc->v_regs);
2812b0a576dSMichael Neuling #endif /* CONFIG_ALTIVEC */
2822b0a576dSMichael Neuling
2832b0a576dSMichael Neuling /* copy fpr regs and fpscr */
284000ec280SCyril Bur err |= copy_ckfpr_to_user(&sc->fp_regs, tsk);
2852b0a576dSMichael Neuling if (msr & MSR_FP)
286d1199431SCyril Bur err |= copy_fpr_to_user(&tm_sc->fp_regs, tsk);
287dc310669SCyril Bur else
288000ec280SCyril Bur err |= copy_ckfpr_to_user(&tm_sc->fp_regs, tsk);
2892b0a576dSMichael Neuling
2902b0a576dSMichael Neuling #ifdef CONFIG_VSX
2912b0a576dSMichael Neuling /*
2922b0a576dSMichael Neuling * Copy VSX low doubleword to local buffer for formatting,
2932b0a576dSMichael Neuling * then out to userspace. Update v_regs to point after the
2942b0a576dSMichael Neuling * VMX data.
2952b0a576dSMichael Neuling */
296d1199431SCyril Bur if (tsk->thread.used_vsr) {
2972b0a576dSMichael Neuling v_regs += ELF_NVRREG;
2982b0a576dSMichael Neuling tm_v_regs += ELF_NVRREG;
2992b0a576dSMichael Neuling
300000ec280SCyril Bur err |= copy_ckvsx_to_user(v_regs, tsk);
3012b0a576dSMichael Neuling
3022b0a576dSMichael Neuling if (msr & MSR_VSX)
303d1199431SCyril Bur err |= copy_vsx_to_user(tm_v_regs, tsk);
304dc310669SCyril Bur else
305000ec280SCyril Bur err |= copy_ckvsx_to_user(tm_v_regs, tsk);
3062b0a576dSMichael Neuling
3072b0a576dSMichael Neuling /* set MSR_VSX in the MSR value in the frame to
3082b0a576dSMichael Neuling * indicate that sc->vs_reg) contains valid data.
3092b0a576dSMichael Neuling */
3102b0a576dSMichael Neuling msr |= MSR_VSX;
3112b0a576dSMichael Neuling }
3122b0a576dSMichael Neuling #endif /* CONFIG_VSX */
3132b0a576dSMichael Neuling
3142b0a576dSMichael Neuling err |= __put_user(&sc->gp_regs, &sc->regs);
3152b0a576dSMichael Neuling err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs);
3162b0a576dSMichael Neuling err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE);
3172b0a576dSMichael Neuling err |= __copy_to_user(&sc->gp_regs,
318d1199431SCyril Bur &tsk->thread.ckpt_regs, GP_REGS_SIZE);
3192b0a576dSMichael Neuling err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]);
3202b0a576dSMichael Neuling err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
3212b0a576dSMichael Neuling err |= __put_user(signr, &sc->signal);
3222b0a576dSMichael Neuling err |= __put_user(handler, &sc->handler);
3232b0a576dSMichael Neuling if (set != NULL)
3242b0a576dSMichael Neuling err |= __put_user(set->sig[0], &sc->oldmask);
3252b0a576dSMichael Neuling
3262b0a576dSMichael Neuling return err;
3272b0a576dSMichael Neuling }
3282b0a576dSMichael Neuling #endif
3292b0a576dSMichael Neuling
33062f4f47dSStephen Rothwell /*
33162f4f47dSStephen Rothwell * Restore the sigcontext from the signal frame.
33262f4f47dSStephen Rothwell */
333193323e1SChristopher M. Riedl #define unsafe_restore_sigcontext(tsk, set, sig, sc, label) do { \
334193323e1SChristopher M. Riedl if (__unsafe_restore_sigcontext(tsk, set, sig, sc)) \
335193323e1SChristopher M. Riedl goto label; \
336193323e1SChristopher M. Riedl } while (0)
__unsafe_restore_sigcontext(struct task_struct * tsk,sigset_t * set,int sig,struct sigcontext __user * sc)337193323e1SChristopher M. Riedl static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_t *set,
338193323e1SChristopher M. Riedl int sig, struct sigcontext __user *sc)
33962f4f47dSStephen Rothwell {
34062f4f47dSStephen Rothwell #ifdef CONFIG_ALTIVEC
34162f4f47dSStephen Rothwell elf_vrreg_t __user *v_regs;
34262f4f47dSStephen Rothwell #endif
34362f4f47dSStephen Rothwell unsigned long save_r13 = 0;
34462f4f47dSStephen Rothwell unsigned long msr;
345d1199431SCyril Bur struct pt_regs *regs = tsk->thread.regs;
3466a274c08SMichael Neuling #ifdef CONFIG_VSX
3476a274c08SMichael Neuling int i;
3486a274c08SMichael Neuling #endif
34962f4f47dSStephen Rothwell
350d1199431SCyril Bur BUG_ON(tsk != current);
351d1199431SCyril Bur
35262f4f47dSStephen Rothwell /* If this is not a signal return, we preserve the TLS in r13 */
35362f4f47dSStephen Rothwell if (!sig)
35462f4f47dSStephen Rothwell save_r13 = regs->gpr[13];
35562f4f47dSStephen Rothwell
356fcbc5a97SStephen Rothwell /* copy the GPRs */
357193323e1SChristopher M. Riedl unsafe_copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr), efault_out);
358193323e1SChristopher M. Riedl unsafe_get_user(regs->nip, &sc->gp_regs[PT_NIP], efault_out);
359fab5db97SPaul Mackerras /* get MSR separately, transfer the LE bit if doing signal return */
360193323e1SChristopher M. Riedl unsafe_get_user(msr, &sc->gp_regs[PT_MSR], efault_out);
361fab5db97SPaul Mackerras if (sig)
36259dc5bfcSNicholas Piggin regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
363193323e1SChristopher M. Riedl unsafe_get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3], efault_out);
364193323e1SChristopher M. Riedl unsafe_get_user(regs->ctr, &sc->gp_regs[PT_CTR], efault_out);
365193323e1SChristopher M. Riedl unsafe_get_user(regs->link, &sc->gp_regs[PT_LNK], efault_out);
366193323e1SChristopher M. Riedl unsafe_get_user(regs->xer, &sc->gp_regs[PT_XER], efault_out);
367193323e1SChristopher M. Riedl unsafe_get_user(regs->ccr, &sc->gp_regs[PT_CCR], efault_out);
3684e0e45b0SNicholas Piggin /* Don't allow userspace to set SOFTE */
3694e0e45b0SNicholas Piggin set_trap_norestart(regs);
370193323e1SChristopher M. Riedl unsafe_get_user(regs->dar, &sc->gp_regs[PT_DAR], efault_out);
371193323e1SChristopher M. Riedl unsafe_get_user(regs->dsisr, &sc->gp_regs[PT_DSISR], efault_out);
372193323e1SChristopher M. Riedl unsafe_get_user(regs->result, &sc->gp_regs[PT_RESULT], efault_out);
37362f4f47dSStephen Rothwell
37462f4f47dSStephen Rothwell if (!sig)
37562f4f47dSStephen Rothwell regs->gpr[13] = save_r13;
37662f4f47dSStephen Rothwell if (set != NULL)
377193323e1SChristopher M. Riedl unsafe_get_user(set->sig[0], &sc->oldmask, efault_out);
37862f4f47dSStephen Rothwell
3795388fb10SPaul Mackerras /*
380*e4787e71SRashmica Gupta * Force reload of FP/VEC/VSX so userspace sees any changes.
381*e4787e71SRashmica Gupta * Clear these bits from the user process' MSR before copying into the
382*e4787e71SRashmica Gupta * thread struct. If we are rescheduled or preempted and another task
383*e4787e71SRashmica Gupta * uses FP/VEC/VSX, and this process has the MSR bits set, then the
384*e4787e71SRashmica Gupta * context switch code will save the current CPU state into the
385*e4787e71SRashmica Gupta * thread_struct - possibly overwriting the data we are updating here.
386ae62fbb5SPaul Mackerras */
38759dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
388ae62fbb5SPaul Mackerras
38962f4f47dSStephen Rothwell #ifdef CONFIG_ALTIVEC
390193323e1SChristopher M. Riedl unsafe_get_user(v_regs, &sc->v_regs, efault_out);
39196d4f267SLinus Torvalds if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128)))
3927c85d1f9SPaul Mackerras return -EFAULT;
39362f4f47dSStephen Rothwell /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
394e1c0d66fSSimon Guo if (v_regs != NULL && (msr & MSR_VEC) != 0) {
395193323e1SChristopher M. Riedl unsafe_copy_from_user(&tsk->thread.vr_state, v_regs,
396193323e1SChristopher M. Riedl 33 * sizeof(vector128), efault_out);
397d1199431SCyril Bur tsk->thread.used_vr = true;
398d1199431SCyril Bur } else if (tsk->thread.used_vr) {
399d1199431SCyril Bur memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
400e1c0d66fSSimon Guo }
40162f4f47dSStephen Rothwell /* Always get VRSAVE back */
402b0d436c7SAnton Blanchard if (v_regs != NULL)
403193323e1SChristopher M. Riedl unsafe_get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
40462f4f47dSStephen Rothwell else
405d1199431SCyril Bur tsk->thread.vrsave = 0;
406408a7e08SPaul Mackerras if (cpu_has_feature(CPU_FTR_ALTIVEC))
407d1199431SCyril Bur mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
40862f4f47dSStephen Rothwell #endif /* CONFIG_ALTIVEC */
409c6e6771bSMichael Neuling /* restore floating point */
410193323e1SChristopher M. Riedl unsafe_copy_fpr_from_user(tsk, &sc->fp_regs, efault_out);
4116a274c08SMichael Neuling #ifdef CONFIG_VSX
412ce48b210SMichael Neuling /*
413ce48b210SMichael Neuling * Get additional VSX data. Update v_regs to point after the
414ce48b210SMichael Neuling * VMX data. Copy VSX low doubleword from userspace to local
415ce48b210SMichael Neuling * buffer for formatting, then into the taskstruct.
416ce48b210SMichael Neuling */
417ce48b210SMichael Neuling v_regs += ELF_NVRREG;
418e1c0d66fSSimon Guo if ((msr & MSR_VSX) != 0) {
419193323e1SChristopher M. Riedl unsafe_copy_vsx_from_user(tsk, v_regs, efault_out);
420d1199431SCyril Bur tsk->thread.used_vsr = true;
421d1199431SCyril Bur } else {
422ce48b210SMichael Neuling for (i = 0; i < 32 ; i++)
423d1199431SCyril Bur tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
424d1199431SCyril Bur }
425c6e6771bSMichael Neuling #endif
426193323e1SChristopher M. Riedl return 0;
427193323e1SChristopher M. Riedl
428193323e1SChristopher M. Riedl efault_out:
429193323e1SChristopher M. Riedl return -EFAULT;
43062f4f47dSStephen Rothwell }
43162f4f47dSStephen Rothwell
4322b0a576dSMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
4332b0a576dSMichael Neuling /*
4342b0a576dSMichael Neuling * Restore the two sigcontexts from the frame of a transactional processes.
4352b0a576dSMichael Neuling */
4362b0a576dSMichael Neuling
restore_tm_sigcontexts(struct task_struct * tsk,struct sigcontext __user * sc,struct sigcontext __user * tm_sc)437d1199431SCyril Bur static long restore_tm_sigcontexts(struct task_struct *tsk,
4382b0a576dSMichael Neuling struct sigcontext __user *sc,
4392b0a576dSMichael Neuling struct sigcontext __user *tm_sc)
4402b0a576dSMichael Neuling {
4412b0a576dSMichael Neuling #ifdef CONFIG_ALTIVEC
4422b0a576dSMichael Neuling elf_vrreg_t __user *v_regs, *tm_v_regs;
4432b0a576dSMichael Neuling #endif
4442b0a576dSMichael Neuling unsigned long err = 0;
4452b0a576dSMichael Neuling unsigned long msr;
446d1199431SCyril Bur struct pt_regs *regs = tsk->thread.regs;
4472b0a576dSMichael Neuling #ifdef CONFIG_VSX
4482b0a576dSMichael Neuling int i;
4492b0a576dSMichael Neuling #endif
450d1199431SCyril Bur
451d1199431SCyril Bur BUG_ON(tsk != current);
452d1199431SCyril Bur
45392fb8690SMichael Neuling if (tm_suspend_disabled)
45492fb8690SMichael Neuling return -EINVAL;
45592fb8690SMichael Neuling
4562b0a576dSMichael Neuling /* copy the GPRs */
4572b0a576dSMichael Neuling err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr));
458d1199431SCyril Bur err |= __copy_from_user(&tsk->thread.ckpt_regs, sc->gp_regs,
4592b0a576dSMichael Neuling sizeof(regs->gpr));
4602b0a576dSMichael Neuling
4612b0a576dSMichael Neuling /*
4622b0a576dSMichael Neuling * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP.
4632b0a576dSMichael Neuling * TEXASR was set by the signal delivery reclaim, as was TFIAR.
4642b0a576dSMichael Neuling * Users doing anything abhorrent like thread-switching w/ signals for
4652b0a576dSMichael Neuling * TM-Suspended code will have to back TEXASR/TFIAR up themselves.
4662b0a576dSMichael Neuling * For the case of getting a signal and simply returning from it,
4672b0a576dSMichael Neuling * we don't need to re-copy them here.
4682b0a576dSMichael Neuling */
4692b0a576dSMichael Neuling err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]);
470d1199431SCyril Bur err |= __get_user(tsk->thread.tm_tfhar, &sc->gp_regs[PT_NIP]);
4712b0a576dSMichael Neuling
4722b0a576dSMichael Neuling /* get MSR separately, transfer the LE bit if doing signal return */
4732b0a576dSMichael Neuling err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
474d2b9d2a5SMichael Neuling /* Don't allow reserved mode. */
475d2b9d2a5SMichael Neuling if (MSR_TM_RESV(msr))
476d2b9d2a5SMichael Neuling return -EINVAL;
477d2b9d2a5SMichael Neuling
47887b4e539SMichael Neuling /* pull in MSR LE from user context */
47959dc5bfcSNicholas Piggin regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
4802b0a576dSMichael Neuling
4812b0a576dSMichael Neuling /* The following non-GPR non-FPR non-VR state is also checkpointed: */
4822b0a576dSMichael Neuling err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]);
4832b0a576dSMichael Neuling err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]);
4842b0a576dSMichael Neuling err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]);
4852b0a576dSMichael Neuling err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]);
486d1199431SCyril Bur err |= __get_user(tsk->thread.ckpt_regs.ctr,
4872b0a576dSMichael Neuling &sc->gp_regs[PT_CTR]);
488d1199431SCyril Bur err |= __get_user(tsk->thread.ckpt_regs.link,
4892b0a576dSMichael Neuling &sc->gp_regs[PT_LNK]);
490d1199431SCyril Bur err |= __get_user(tsk->thread.ckpt_regs.xer,
4912b0a576dSMichael Neuling &sc->gp_regs[PT_XER]);
492d1199431SCyril Bur err |= __get_user(tsk->thread.ckpt_regs.ccr,
4932b0a576dSMichael Neuling &sc->gp_regs[PT_CCR]);
4944e0e45b0SNicholas Piggin /* Don't allow userspace to set SOFTE */
4954e0e45b0SNicholas Piggin set_trap_norestart(regs);
4962b0a576dSMichael Neuling /* These regs are not checkpointed; they can go in 'regs'. */
4972b0a576dSMichael Neuling err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
4982b0a576dSMichael Neuling err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
4992b0a576dSMichael Neuling err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
5002b0a576dSMichael Neuling
5012b0a576dSMichael Neuling /*
5022b0a576dSMichael Neuling * Force reload of FP/VEC.
503d1199431SCyril Bur * This has to be done before copying stuff into tsk->thread.fpr/vr
5042b0a576dSMichael Neuling * for the reasons explained in the previous comment.
5052b0a576dSMichael Neuling */
50659dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
5072b0a576dSMichael Neuling
5082b0a576dSMichael Neuling #ifdef CONFIG_ALTIVEC
5092b0a576dSMichael Neuling err |= __get_user(v_regs, &sc->v_regs);
5102b0a576dSMichael Neuling err |= __get_user(tm_v_regs, &tm_sc->v_regs);
5112b0a576dSMichael Neuling if (err)
5122b0a576dSMichael Neuling return err;
51396d4f267SLinus Torvalds if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128)))
5142b0a576dSMichael Neuling return -EFAULT;
51596d4f267SLinus Torvalds if (tm_v_regs && !access_ok(tm_v_regs, 34 * sizeof(vector128)))
5162b0a576dSMichael Neuling return -EFAULT;
5172b0a576dSMichael Neuling /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
518b0d436c7SAnton Blanchard if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
519000ec280SCyril Bur err |= __copy_from_user(&tsk->thread.ckvr_state, v_regs,
5202b0a576dSMichael Neuling 33 * sizeof(vector128));
521dc310669SCyril Bur err |= __copy_from_user(&tsk->thread.vr_state, tm_v_regs,
5222b0a576dSMichael Neuling 33 * sizeof(vector128));
523e1c0d66fSSimon Guo current->thread.used_vr = true;
5242b0a576dSMichael Neuling }
525d1199431SCyril Bur else if (tsk->thread.used_vr) {
526d1199431SCyril Bur memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
527000ec280SCyril Bur memset(&tsk->thread.ckvr_state, 0, 33 * sizeof(vector128));
5282b0a576dSMichael Neuling }
5292b0a576dSMichael Neuling /* Always get VRSAVE back */
530b0d436c7SAnton Blanchard if (v_regs != NULL && tm_v_regs != NULL) {
531000ec280SCyril Bur err |= __get_user(tsk->thread.ckvrsave,
532dc310669SCyril Bur (u32 __user *)&v_regs[33]);
533dc310669SCyril Bur err |= __get_user(tsk->thread.vrsave,
5342b0a576dSMichael Neuling (u32 __user *)&tm_v_regs[33]);
5352b0a576dSMichael Neuling }
5362b0a576dSMichael Neuling else {
537d1199431SCyril Bur tsk->thread.vrsave = 0;
538000ec280SCyril Bur tsk->thread.ckvrsave = 0;
5392b0a576dSMichael Neuling }
540408a7e08SPaul Mackerras if (cpu_has_feature(CPU_FTR_ALTIVEC))
541d1199431SCyril Bur mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
5422b0a576dSMichael Neuling #endif /* CONFIG_ALTIVEC */
5432b0a576dSMichael Neuling /* restore floating point */
544dc310669SCyril Bur err |= copy_fpr_from_user(tsk, &tm_sc->fp_regs);
545000ec280SCyril Bur err |= copy_ckfpr_from_user(tsk, &sc->fp_regs);
5462b0a576dSMichael Neuling #ifdef CONFIG_VSX
5472b0a576dSMichael Neuling /*
5482b0a576dSMichael Neuling * Get additional VSX data. Update v_regs to point after the
5492b0a576dSMichael Neuling * VMX data. Copy VSX low doubleword from userspace to local
5502b0a576dSMichael Neuling * buffer for formatting, then into the taskstruct.
5512b0a576dSMichael Neuling */
5522b0a576dSMichael Neuling if (v_regs && ((msr & MSR_VSX) != 0)) {
5532b0a576dSMichael Neuling v_regs += ELF_NVRREG;
5542b0a576dSMichael Neuling tm_v_regs += ELF_NVRREG;
555dc310669SCyril Bur err |= copy_vsx_from_user(tsk, tm_v_regs);
556000ec280SCyril Bur err |= copy_ckvsx_from_user(tsk, v_regs);
557d1199431SCyril Bur tsk->thread.used_vsr = true;
5582b0a576dSMichael Neuling } else {
5592b0a576dSMichael Neuling for (i = 0; i < 32 ; i++) {
560d1199431SCyril Bur tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
561000ec280SCyril Bur tsk->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
5622b0a576dSMichael Neuling }
5632b0a576dSMichael Neuling }
5642b0a576dSMichael Neuling #endif
5652b0a576dSMichael Neuling tm_enable();
566e6b8fd02SMichael Neuling /* Make sure the transaction is marked as failed */
567d1199431SCyril Bur tsk->thread.tm_texasr |= TEXASR_FS;
568e1c3743eSBreno Leitao
569e1c3743eSBreno Leitao /*
570e1c3743eSBreno Leitao * Disabling preemption, since it is unsafe to be preempted
571e1c3743eSBreno Leitao * with MSR[TS] set without recheckpointing.
572e1c3743eSBreno Leitao */
573e1c3743eSBreno Leitao preempt_disable();
574e1c3743eSBreno Leitao
575e1c3743eSBreno Leitao /* pull in MSR TS bits from user context */
57659dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | (msr & MSR_TS_MASK));
577e1c3743eSBreno Leitao
578e1c3743eSBreno Leitao /*
579e1c3743eSBreno Leitao * Ensure that TM is enabled in regs->msr before we leave the signal
580e1c3743eSBreno Leitao * handler. It could be the case that (a) user disabled the TM bit
581e1c3743eSBreno Leitao * through the manipulation of the MSR bits in uc_mcontext or (b) the
582e1c3743eSBreno Leitao * TM bit was disabled because a sufficient number of context switches
583e1c3743eSBreno Leitao * happened whilst in the signal handler and load_tm overflowed,
584e1c3743eSBreno Leitao * disabling the TM bit. In either case we can end up with an illegal
585e1c3743eSBreno Leitao * TM state leading to a TM Bad Thing when we return to userspace.
586e1c3743eSBreno Leitao *
587e1c3743eSBreno Leitao * CAUTION:
588e1c3743eSBreno Leitao * After regs->MSR[TS] being updated, make sure that get_user(),
589e1c3743eSBreno Leitao * put_user() or similar functions are *not* called. These
590e1c3743eSBreno Leitao * functions can generate page faults which will cause the process
591e1c3743eSBreno Leitao * to be de-scheduled with MSR[TS] set but without calling
592e1c3743eSBreno Leitao * tm_recheckpoint(). This can cause a bug.
593e1c3743eSBreno Leitao */
59459dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | MSR_TM);
595e1c3743eSBreno Leitao
5962b0a576dSMichael Neuling /* This loads the checkpointed FP/VEC state, if used */
597eb5c3f1cSCyril Bur tm_recheckpoint(&tsk->thread);
5982b0a576dSMichael Neuling
599dc310669SCyril Bur msr_check_and_set(msr & (MSR_FP | MSR_VEC));
6002b0a576dSMichael Neuling if (msr & MSR_FP) {
601dc310669SCyril Bur load_fp_state(&tsk->thread.fp_state);
60259dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | (MSR_FP | tsk->thread.fpexc_mode));
6032b0a576dSMichael Neuling }
6042b0a576dSMichael Neuling if (msr & MSR_VEC) {
605dc310669SCyril Bur load_vr_state(&tsk->thread.vr_state);
60659dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr | MSR_VEC);
6072b0a576dSMichael Neuling }
6082b0a576dSMichael Neuling
609e1c3743eSBreno Leitao preempt_enable();
610e1c3743eSBreno Leitao
6112b0a576dSMichael Neuling return err;
6122b0a576dSMichael Neuling }
6132d19630eSChristopher M. Riedl #else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
restore_tm_sigcontexts(struct task_struct * tsk,struct sigcontext __user * sc,struct sigcontext __user * tm_sc)6142d19630eSChristopher M. Riedl static long restore_tm_sigcontexts(struct task_struct *tsk, struct sigcontext __user *sc,
6152d19630eSChristopher M. Riedl struct sigcontext __user *tm_sc)
6162d19630eSChristopher M. Riedl {
6172d19630eSChristopher M. Riedl return -EINVAL;
6182d19630eSChristopher M. Riedl }
6192b0a576dSMichael Neuling #endif
6202b0a576dSMichael Neuling
62162f4f47dSStephen Rothwell /*
62262f4f47dSStephen Rothwell * Setup the trampoline code on the stack
62362f4f47dSStephen Rothwell */
setup_trampoline(unsigned int syscall,unsigned int __user * tramp)62462f4f47dSStephen Rothwell static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
62562f4f47dSStephen Rothwell {
62662f4f47dSStephen Rothwell int i;
62762f4f47dSStephen Rothwell long err = 0;
62862f4f47dSStephen Rothwell
6291c9debbcSChristophe Leroy /* Call the handler and pop the dummy stackframe*/
6301c9debbcSChristophe Leroy err |= __put_user(PPC_RAW_BCTRL(), &tramp[0]);
6311c9debbcSChristophe Leroy err |= __put_user(PPC_RAW_ADDI(_R1, _R1, __SIGNAL_FRAMESIZE), &tramp[1]);
6321c9debbcSChristophe Leroy
6331c9debbcSChristophe Leroy err |= __put_user(PPC_RAW_LI(_R0, syscall), &tramp[2]);
6341c9debbcSChristophe Leroy err |= __put_user(PPC_RAW_SC(), &tramp[3]);
63562f4f47dSStephen Rothwell
63662f4f47dSStephen Rothwell /* Minimal traceback info */
63762f4f47dSStephen Rothwell for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++)
63862f4f47dSStephen Rothwell err |= __put_user(0, &tramp[i]);
63962f4f47dSStephen Rothwell
64062f4f47dSStephen Rothwell if (!err)
64162f4f47dSStephen Rothwell flush_icache_range((unsigned long) &tramp[0],
64262f4f47dSStephen Rothwell (unsigned long) &tramp[TRAMP_SIZE]);
64362f4f47dSStephen Rothwell
64462f4f47dSStephen Rothwell return err;
64562f4f47dSStephen Rothwell }
64662f4f47dSStephen Rothwell
64762f4f47dSStephen Rothwell /*
648c1cb299eSMichael Neuling * Userspace code may pass a ucontext which doesn't include VSX added
649c1cb299eSMichael Neuling * at the end. We need to check for this case.
650c1cb299eSMichael Neuling */
651c1cb299eSMichael Neuling #define UCONTEXTSIZEWITHOUTVSX \
652c1cb299eSMichael Neuling (sizeof(struct ucontext) - 32*sizeof(long))
653c1cb299eSMichael Neuling
654c1cb299eSMichael Neuling /*
65562f4f47dSStephen Rothwell * Handle {get,set,swap}_context operations
65662f4f47dSStephen Rothwell */
SYSCALL_DEFINE3(swapcontext,struct ucontext __user *,old_ctx,struct ucontext __user *,new_ctx,long,ctx_size)657f3675644SAl Viro SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
658f3675644SAl Viro struct ucontext __user *, new_ctx, long, ctx_size)
65962f4f47dSStephen Rothwell {
66062f4f47dSStephen Rothwell sigset_t set;
661c1cb299eSMichael Neuling unsigned long new_msr = 0;
66216c29d18SMichael Neuling int ctx_has_vsx_region = 0;
66362f4f47dSStephen Rothwell
664c1cb299eSMichael Neuling if (new_ctx &&
66516c29d18SMichael Neuling get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR]))
666c1cb299eSMichael Neuling return -EFAULT;
667c1cb299eSMichael Neuling /*
668c1cb299eSMichael Neuling * Check that the context is not smaller than the original
669c1cb299eSMichael Neuling * size (with VMX but without VSX)
67062f4f47dSStephen Rothwell */
671c1cb299eSMichael Neuling if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
67262f4f47dSStephen Rothwell return -EINVAL;
673c1cb299eSMichael Neuling /*
674c1cb299eSMichael Neuling * If the new context state sets the MSR VSX bits but
675c1cb299eSMichael Neuling * it doesn't provide VSX state.
676c1cb299eSMichael Neuling */
677c1cb299eSMichael Neuling if ((ctx_size < sizeof(struct ucontext)) &&
678c1cb299eSMichael Neuling (new_msr & MSR_VSX))
679c1cb299eSMichael Neuling return -EINVAL;
68016c29d18SMichael Neuling /* Does the context have enough room to store VSX data? */
68116c29d18SMichael Neuling if (ctx_size >= sizeof(struct ucontext))
68216c29d18SMichael Neuling ctx_has_vsx_region = 1;
68316c29d18SMichael Neuling
68462f4f47dSStephen Rothwell if (old_ctx != NULL) {
685c6c9645eSChristopher M. Riedl prepare_setup_sigcontext(current);
6867bb081c8SChristopher M. Riedl if (!user_write_access_begin(old_ctx, ctx_size))
68762f4f47dSStephen Rothwell return -EFAULT;
6887bb081c8SChristopher M. Riedl
6897bb081c8SChristopher M. Riedl unsafe_setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL,
6907bb081c8SChristopher M. Riedl 0, ctx_has_vsx_region, efault_out);
6917bb081c8SChristopher M. Riedl unsafe_copy_to_user(&old_ctx->uc_sigmask, ¤t->blocked,
6927bb081c8SChristopher M. Riedl sizeof(sigset_t), efault_out);
6937bb081c8SChristopher M. Riedl
6947bb081c8SChristopher M. Riedl user_write_access_end();
69562f4f47dSStephen Rothwell }
69662f4f47dSStephen Rothwell if (new_ctx == NULL)
69762f4f47dSStephen Rothwell return 0;
69896032f98SChristophe Leroy if (!access_ok(new_ctx, ctx_size) ||
699bb523b40SAndreas Gruenbacher fault_in_readable((char __user *)new_ctx, ctx_size))
70062f4f47dSStephen Rothwell return -EFAULT;
70162f4f47dSStephen Rothwell
70262f4f47dSStephen Rothwell /*
70362f4f47dSStephen Rothwell * If we get a fault copying the context into the kernel's
70462f4f47dSStephen Rothwell * image of the user's registers, we can't just return -EFAULT
70562f4f47dSStephen Rothwell * because the user's registers will be corrupted. For instance
70662f4f47dSStephen Rothwell * the NIP value may have been updated but not some of the
70762f4f47dSStephen Rothwell * other registers. Given that we have done the access_ok
70862f4f47dSStephen Rothwell * and successfully read the first and last bytes of the region
70962f4f47dSStephen Rothwell * above, this should only happen in an out-of-memory situation
71062f4f47dSStephen Rothwell * or if another thread unmaps the region containing the context.
71162f4f47dSStephen Rothwell * We kill the task with a SIGSEGV in this situation.
71262f4f47dSStephen Rothwell */
71362f4f47dSStephen Rothwell
71483a1f27aSEric W. Biederman if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) {
715fcb116bcSEric W. Biederman force_exit_sig(SIGSEGV);
71683a1f27aSEric W. Biederman return -EFAULT;
71783a1f27aSEric W. Biederman }
71817440f17SAl Viro set_current_blocked(&set);
719193323e1SChristopher M. Riedl
720193323e1SChristopher M. Riedl if (!user_read_access_begin(new_ctx, ctx_size))
721193323e1SChristopher M. Riedl return -EFAULT;
722193323e1SChristopher M. Riedl if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) {
723193323e1SChristopher M. Riedl user_read_access_end();
724fcb116bcSEric W. Biederman force_exit_sig(SIGSEGV);
72583a1f27aSEric W. Biederman return -EFAULT;
726193323e1SChristopher M. Riedl }
727193323e1SChristopher M. Riedl user_read_access_end();
72862f4f47dSStephen Rothwell
72962f4f47dSStephen Rothwell /* This returns like rt_sigreturn */
730401d1f02SDavid Woodhouse set_thread_flag(TIF_RESTOREALL);
73159dc5bfcSNicholas Piggin
73262f4f47dSStephen Rothwell return 0;
7337bb081c8SChristopher M. Riedl
7347bb081c8SChristopher M. Riedl efault_out:
7357bb081c8SChristopher M. Riedl user_write_access_end();
7367bb081c8SChristopher M. Riedl return -EFAULT;
73762f4f47dSStephen Rothwell }
73862f4f47dSStephen Rothwell
73962f4f47dSStephen Rothwell
74062f4f47dSStephen Rothwell /*
74162f4f47dSStephen Rothwell * Do a signal return; undo the signal stack.
74262f4f47dSStephen Rothwell */
74362f4f47dSStephen Rothwell
SYSCALL_DEFINE0(rt_sigreturn)744f3675644SAl Viro SYSCALL_DEFINE0(rt_sigreturn)
74562f4f47dSStephen Rothwell {
746f3675644SAl Viro struct pt_regs *regs = current_pt_regs();
74762f4f47dSStephen Rothwell struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1];
74862f4f47dSStephen Rothwell sigset_t set;
7492b0a576dSMichael Neuling unsigned long msr;
75062f4f47dSStephen Rothwell
75162f4f47dSStephen Rothwell /* Always make any pending restarted system calls return -EINTR */
752f56141e3SAndy Lutomirski current->restart_block.fn = do_no_restart_syscall;
75362f4f47dSStephen Rothwell
75496d4f267SLinus Torvalds if (!access_ok(uc, sizeof(*uc)))
75562f4f47dSStephen Rothwell goto badframe;
75662f4f47dSStephen Rothwell
757d3ccc978SChristopher M. Riedl if (__get_user_sigset(&set, &uc->uc_sigmask))
75862f4f47dSStephen Rothwell goto badframe;
75917440f17SAl Viro set_current_blocked(&set);
76078a3e888SCyril Bur
7612d19630eSChristopher M. Riedl if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM)) {
76278a3e888SCyril Bur /*
76378a3e888SCyril Bur * If there is a transactional state then throw it away.
76478a3e888SCyril Bur * The purpose of a sigreturn is to destroy all traces of the
76578a3e888SCyril Bur * signal frame, this includes any transactional state created
76678a3e888SCyril Bur * within in. We only check for suspended as we can never be
76778a3e888SCyril Bur * active in the kernel, we are active, there is nothing better to
76878a3e888SCyril Bur * do than go ahead and Bad Thing later.
76978a3e888SCyril Bur * The cause is not important as there will never be a
77078a3e888SCyril Bur * recheckpoint so it's not user visible.
77178a3e888SCyril Bur */
77278a3e888SCyril Bur if (MSR_TM_SUSPENDED(mfmsr()))
77378a3e888SCyril Bur tm_reclaim_current(0);
77478a3e888SCyril Bur
775e620d450SBreno Leitao /*
776e620d450SBreno Leitao * Disable MSR[TS] bit also, so, if there is an exception in the
777e620d450SBreno Leitao * code below (as a page fault in copy_ckvsx_to_user()), it does
778e620d450SBreno Leitao * not recheckpoint this task if there was a context switch inside
779e620d450SBreno Leitao * the exception.
780e620d450SBreno Leitao *
781e620d450SBreno Leitao * A major page fault can indirectly call schedule(). A reschedule
782e620d450SBreno Leitao * process in the middle of an exception can have a side effect
783e620d450SBreno Leitao * (Changing the CPU MSR[TS] state), since schedule() is called
784e620d450SBreno Leitao * with the CPU MSR[TS] disable and returns with MSR[TS]=Suspended
785e620d450SBreno Leitao * (switch_to() calls tm_recheckpoint() for the 'new' process). In
786e620d450SBreno Leitao * this case, the process continues to be the same in the CPU, but
787e620d450SBreno Leitao * the CPU state just changed.
788e620d450SBreno Leitao *
789e620d450SBreno Leitao * This can cause a TM Bad Thing, since the MSR in the stack will
790e620d450SBreno Leitao * have the MSR[TS]=0, and this is what will be used to RFID.
791e620d450SBreno Leitao *
792e620d450SBreno Leitao * Clearing MSR[TS] state here will avoid a recheckpoint if there
793e620d450SBreno Leitao * is any process reschedule in kernel space. The MSR[TS] state
794e620d450SBreno Leitao * does not need to be saved also, since it will be replaced with
795e620d450SBreno Leitao * the MSR[TS] that came from user context later, at
796e620d450SBreno Leitao * restore_tm_sigcontexts.
797e620d450SBreno Leitao */
79859dc5bfcSNicholas Piggin regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
799e620d450SBreno Leitao
8002b0a576dSMichael Neuling if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
8012b0a576dSMichael Neuling goto badframe;
8022d19630eSChristopher M. Riedl }
8032d19630eSChristopher M. Riedl
8042d19630eSChristopher M. Riedl if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && MSR_TM_ACTIVE(msr)) {
8052b0a576dSMichael Neuling /* We recheckpoint on return. */
8062b0a576dSMichael Neuling struct ucontext __user *uc_transact;
807f16d80b7SMichael Neuling
808f16d80b7SMichael Neuling /* Trying to start TM on non TM system */
809f16d80b7SMichael Neuling if (!cpu_has_feature(CPU_FTR_TM))
810f16d80b7SMichael Neuling goto badframe;
811f16d80b7SMichael Neuling
8122b0a576dSMichael Neuling if (__get_user(uc_transact, &uc->uc_link))
8132b0a576dSMichael Neuling goto badframe;
814d1199431SCyril Bur if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
8152b0a576dSMichael Neuling &uc_transact->uc_mcontext))
8162b0a576dSMichael Neuling goto badframe;
8172d19630eSChristopher M. Riedl } else {
8186f5b9f01SBreno Leitao /*
819897bc3dfSBreno Leitao * Fall through, for non-TM restore
820897bc3dfSBreno Leitao *
8216f5b9f01SBreno Leitao * Unset MSR[TS] on the thread regs since MSR from user
8226f5b9f01SBreno Leitao * context does not have MSR active, and recheckpoint was
8236f5b9f01SBreno Leitao * not called since restore_tm_sigcontexts() was not called
8246f5b9f01SBreno Leitao * also.
8256f5b9f01SBreno Leitao *
8266f5b9f01SBreno Leitao * If not unsetting it, the code can RFID to userspace with
8276f5b9f01SBreno Leitao * MSR[TS] set, but without CPU in the proper state,
8286f5b9f01SBreno Leitao * causing a TM bad thing.
8296f5b9f01SBreno Leitao */
83059dc5bfcSNicholas Piggin regs_set_return_msr(current->thread.regs,
83159dc5bfcSNicholas Piggin current->thread.regs->msr & ~MSR_TS_MASK);
832193323e1SChristopher M. Riedl if (!user_read_access_begin(&uc->uc_mcontext, sizeof(uc->uc_mcontext)))
83362f4f47dSStephen Rothwell goto badframe;
8340f92433bSDaniel Axtens
8350f92433bSDaniel Axtens unsafe_restore_sigcontext(current, NULL, 1, &uc->uc_mcontext,
8360f92433bSDaniel Axtens badframe_block);
8370f92433bSDaniel Axtens
838193323e1SChristopher M. Riedl user_read_access_end();
839193323e1SChristopher M. Riedl }
84062f4f47dSStephen Rothwell
8417cce2465SAl Viro if (restore_altstack(&uc->uc_stack))
8427cce2465SAl Viro goto badframe;
84362f4f47dSStephen Rothwell
844401d1f02SDavid Woodhouse set_thread_flag(TIF_RESTOREALL);
84559dc5bfcSNicholas Piggin
846401d1f02SDavid Woodhouse return 0;
84762f4f47dSStephen Rothwell
8480f92433bSDaniel Axtens badframe_block:
8490f92433bSDaniel Axtens user_read_access_end();
85062f4f47dSStephen Rothwell badframe:
8517fe8f773SChristophe Leroy signal_fault(current, regs, "rt_sigreturn", uc);
852d0c3d534SOlof Johansson
8533cf5d076SEric W. Biederman force_sig(SIGSEGV);
85462f4f47dSStephen Rothwell return 0;
85562f4f47dSStephen Rothwell }
85662f4f47dSStephen Rothwell
handle_rt_signal64(struct ksignal * ksig,sigset_t * set,struct task_struct * tsk)857d1199431SCyril Bur int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
858d1199431SCyril Bur struct task_struct *tsk)
85962f4f47dSStephen Rothwell {
86062f4f47dSStephen Rothwell struct rt_sigframe __user *frame;
86162f4f47dSStephen Rothwell unsigned long newsp = 0;
86262f4f47dSStephen Rothwell long err = 0;
863d1199431SCyril Bur struct pt_regs *regs = tsk->thread.regs;
8642464cc4cSGustavo Luiz Duarte /* Save the thread's msr before get_tm_stackpointer() changes it */
8652464cc4cSGustavo Luiz Duarte unsigned long msr = regs->msr;
86662f4f47dSStephen Rothwell
867c180cb30SChristophe Leroy frame = get_sigframe(ksig, tsk, sizeof(*frame), 0);
86896d7a4e0SDaniel Axtens
86996d7a4e0SDaniel Axtens /*
87096d7a4e0SDaniel Axtens * This only applies when calling unsafe_setup_sigcontext() and must be
87196d7a4e0SDaniel Axtens * called before opening the uaccess window.
87296d7a4e0SDaniel Axtens */
87396d7a4e0SDaniel Axtens if (!MSR_TM_ACTIVE(msr))
87496d7a4e0SDaniel Axtens prepare_setup_sigcontext(tsk);
87596d7a4e0SDaniel Axtens
87696d7a4e0SDaniel Axtens if (!user_write_access_begin(frame, sizeof(*frame)))
87762f4f47dSStephen Rothwell goto badframe;
87862f4f47dSStephen Rothwell
87996d7a4e0SDaniel Axtens unsafe_put_user(&frame->info, &frame->pinfo, badframe_block);
88096d7a4e0SDaniel Axtens unsafe_put_user(&frame->uc, &frame->puc, badframe_block);
88162f4f47dSStephen Rothwell
88262f4f47dSStephen Rothwell /* Create the ucontext. */
88396d7a4e0SDaniel Axtens unsafe_put_user(0, &frame->uc.uc_flags, badframe_block);
88496d7a4e0SDaniel Axtens unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], badframe_block);
8852d19630eSChristopher M. Riedl
8862464cc4cSGustavo Luiz Duarte if (MSR_TM_ACTIVE(msr)) {
8872d19630eSChristopher M. Riedl #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
8882b0a576dSMichael Neuling /* The ucontext_t passed to userland points to the second
8892b0a576dSMichael Neuling * ucontext_t (for transactional state) with its uc_link ptr.
8902b0a576dSMichael Neuling */
89196d7a4e0SDaniel Axtens unsafe_put_user(&frame->uc_transact, &frame->uc.uc_link, badframe_block);
89296d7a4e0SDaniel Axtens
89396d7a4e0SDaniel Axtens user_write_access_end();
89496d7a4e0SDaniel Axtens
8952b0a576dSMichael Neuling err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
8962b0a576dSMichael Neuling &frame->uc_transact.uc_mcontext,
897d1199431SCyril Bur tsk, ksig->sig, NULL,
8982464cc4cSGustavo Luiz Duarte (unsigned long)ksig->ka.sa.sa_handler,
8992464cc4cSGustavo Luiz Duarte msr);
90096d7a4e0SDaniel Axtens
90196d7a4e0SDaniel Axtens if (!user_write_access_begin(&frame->uc.uc_sigmask,
90296d7a4e0SDaniel Axtens sizeof(frame->uc.uc_sigmask)))
90396d7a4e0SDaniel Axtens goto badframe;
90496d7a4e0SDaniel Axtens
9052b0a576dSMichael Neuling #endif
9062d19630eSChristopher M. Riedl } else {
90796d7a4e0SDaniel Axtens unsafe_put_user(0, &frame->uc.uc_link, badframe_block);
90896d7a4e0SDaniel Axtens unsafe_setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig,
90996d7a4e0SDaniel Axtens NULL, (unsigned long)ksig->ka.sa.sa_handler,
91096d7a4e0SDaniel Axtens 1, badframe_block);
9112b0a576dSMichael Neuling }
91296d7a4e0SDaniel Axtens
91396d7a4e0SDaniel Axtens unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block);
91496d7a4e0SDaniel Axtens user_write_access_end();
91562f4f47dSStephen Rothwell
916e41d6c3fSMichael Ellerman /* Save the siginfo outside of the unsafe block. */
917e41d6c3fSMichael Ellerman if (copy_siginfo_to_user(&frame->info, &ksig->info))
918e41d6c3fSMichael Ellerman goto badframe;
919e41d6c3fSMichael Ellerman
920cc657f53SPaul Mackerras /* Make sure signal handler doesn't get spurious FP exceptions */
921d1199431SCyril Bur tsk->thread.fp_state.fpscr = 0;
922cc657f53SPaul Mackerras
92362f4f47dSStephen Rothwell /* Set up to return from userspace. */
92491bf6955SChristophe Leroy if (tsk->mm->context.vdso) {
92559dc5bfcSNicholas Piggin regs_set_return_ip(regs, VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64));
92662f4f47dSStephen Rothwell } else {
92762f4f47dSStephen Rothwell err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
92862f4f47dSStephen Rothwell if (err)
92962f4f47dSStephen Rothwell goto badframe;
93059dc5bfcSNicholas Piggin regs_set_return_ip(regs, (unsigned long) &frame->tramp[0]);
93162f4f47dSStephen Rothwell }
93262f4f47dSStephen Rothwell
93362f4f47dSStephen Rothwell /* Allocate a dummy caller frame for the signal handler. */
934a3f61dc0SBenjamin Herrenschmidt newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
93562f4f47dSStephen Rothwell err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
93662f4f47dSStephen Rothwell
93762f4f47dSStephen Rothwell /* Set up "regs" so we "return" to the signal handler. */
938d606b92aSRusty Russell if (is_elf2_task()) {
9390138ba57SNicholas Piggin regs->ctr = (unsigned long) ksig->ka.sa.sa_handler;
9400138ba57SNicholas Piggin regs->gpr[12] = regs->ctr;
941d606b92aSRusty Russell } else {
942d606b92aSRusty Russell /* Handler is *really* a pointer to the function descriptor for
943d606b92aSRusty Russell * the signal routine. The first entry in the function
944d606b92aSRusty Russell * descriptor is the entry address of signal and the second
945d606b92aSRusty Russell * entry is the TOC value we need to use.
946d606b92aSRusty Russell */
9475b23cb8cSChristophe Leroy struct func_desc __user *ptr =
9485b23cb8cSChristophe Leroy (struct func_desc __user *)ksig->ka.sa.sa_handler;
949d606b92aSRusty Russell
9505b23cb8cSChristophe Leroy err |= get_user(regs->ctr, &ptr->addr);
9515b23cb8cSChristophe Leroy err |= get_user(regs->gpr[2], &ptr->toc);
952d606b92aSRusty Russell }
953d606b92aSRusty Russell
954e871c6bbSAnton Blanchard /* enter the signal handler in native-endian mode */
95559dc5bfcSNicholas Piggin regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
95662f4f47dSStephen Rothwell regs->gpr[1] = newsp;
957129b69dfSRichard Weinberger regs->gpr[3] = ksig->sig;
95862f4f47dSStephen Rothwell regs->result = 0;
959129b69dfSRichard Weinberger if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
960a3309226SMichael Ellerman regs->gpr[4] = (unsigned long)&frame->info;
961a3309226SMichael Ellerman regs->gpr[5] = (unsigned long)&frame->uc;
96262f4f47dSStephen Rothwell regs->gpr[6] = (unsigned long) frame;
96362f4f47dSStephen Rothwell } else {
96462f4f47dSStephen Rothwell regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext;
96562f4f47dSStephen Rothwell }
96662f4f47dSStephen Rothwell if (err)
96762f4f47dSStephen Rothwell goto badframe;
96862f4f47dSStephen Rothwell
969129b69dfSRichard Weinberger return 0;
97062f4f47dSStephen Rothwell
97196d7a4e0SDaniel Axtens badframe_block:
97296d7a4e0SDaniel Axtens user_write_access_end();
97362f4f47dSStephen Rothwell badframe:
9747fe8f773SChristophe Leroy signal_fault(current, regs, "handle_rt_signal64", frame);
975d0c3d534SOlof Johansson
976129b69dfSRichard Weinberger return 1;
97762f4f47dSStephen Rothwell }
978