xref: /openbmc/linux/arch/mips/kernel/signal.c (revision d02a40af)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1991, 1992  Linus Torvalds
7  * Copyright (C) 1994 - 2000  Ralf Baechle
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #include <linux/cache.h>
12 #include <linux/context_tracking.h>
13 #include <linux/irqflags.h>
14 #include <linux/sched.h>
15 #include <linux/mm.h>
16 #include <linux/personality.h>
17 #include <linux/smp.h>
18 #include <linux/kernel.h>
19 #include <linux/signal.h>
20 #include <linux/errno.h>
21 #include <linux/wait.h>
22 #include <linux/ptrace.h>
23 #include <linux/unistd.h>
24 #include <linux/compiler.h>
25 #include <linux/syscalls.h>
26 #include <linux/uaccess.h>
27 #include <linux/tracehook.h>
28 
29 #include <asm/abi.h>
30 #include <asm/asm.h>
31 #include <linux/bitops.h>
32 #include <asm/cacheflush.h>
33 #include <asm/fpu.h>
34 #include <asm/sim.h>
35 #include <asm/ucontext.h>
36 #include <asm/cpu-features.h>
37 #include <asm/war.h>
38 #include <asm/vdso.h>
39 #include <asm/dsp.h>
40 #include <asm/inst.h>
41 
42 #include "signal-common.h"
43 
44 static int (*save_fp_context)(void __user *sc);
45 static int (*restore_fp_context)(void __user *sc);
46 
47 struct sigframe {
48 	u32 sf_ass[4];		/* argument save space for o32 */
49 	u32 sf_pad[2];		/* Was: signal trampoline */
50 	struct sigcontext sf_sc;
51 	sigset_t sf_mask;
52 };
53 
54 struct rt_sigframe {
55 	u32 rs_ass[4];		/* argument save space for o32 */
56 	u32 rs_pad[2];		/* Was: signal trampoline */
57 	struct siginfo rs_info;
58 	struct ucontext rs_uc;
59 };
60 
61 /*
62  * Thread saved context copy to/from a signal context presumed to be on the
63  * user stack, and therefore accessed with appropriate macros from uaccess.h.
64  */
65 static int copy_fp_to_sigcontext(void __user *sc)
66 {
67 	struct mips_abi *abi = current->thread.abi;
68 	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
69 	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
70 	int i;
71 	int err = 0;
72 	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
73 
74 	for (i = 0; i < NUM_FPU_REGS; i += inc) {
75 		err |=
76 		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
77 			       &fpregs[i]);
78 	}
79 	err |= __put_user(current->thread.fpu.fcr31, csr);
80 
81 	return err;
82 }
83 
84 static int copy_fp_from_sigcontext(void __user *sc)
85 {
86 	struct mips_abi *abi = current->thread.abi;
87 	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
88 	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
89 	int i;
90 	int err = 0;
91 	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
92 	u64 fpr_val;
93 
94 	for (i = 0; i < NUM_FPU_REGS; i += inc) {
95 		err |= __get_user(fpr_val, &fpregs[i]);
96 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
97 	}
98 	err |= __get_user(current->thread.fpu.fcr31, csr);
99 
100 	return err;
101 }
102 
103 /*
104  * Wrappers for the assembly _{save,restore}_fp_context functions.
105  */
106 static int save_hw_fp_context(void __user *sc)
107 {
108 	struct mips_abi *abi = current->thread.abi;
109 	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
110 	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
111 
112 	return _save_fp_context(fpregs, csr);
113 }
114 
115 static int restore_hw_fp_context(void __user *sc)
116 {
117 	struct mips_abi *abi = current->thread.abi;
118 	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
119 	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
120 
121 	return _restore_fp_context(fpregs, csr);
122 }
123 
124 /*
125  * Helper routines
126  */
127 int protected_save_fp_context(void __user *sc)
128 {
129 	struct mips_abi *abi = current->thread.abi;
130 	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
131 	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
132 	uint32_t __user *used_math = sc + abi->off_sc_used_math;
133 	unsigned int used;
134 	int err;
135 
136 	used = !!used_math();
137 	err = __put_user(used, used_math);
138 	if (err || !used)
139 		return err;
140 
141 	/*
142 	 * EVA does not have userland equivalents of ldc1 or sdc1, so
143 	 * save to the kernel FP context & copy that to userland below.
144 	 */
145 	if (config_enabled(CONFIG_EVA))
146 		lose_fpu(1);
147 
148 	while (1) {
149 		lock_fpu_owner();
150 		if (is_fpu_owner()) {
151 			err = save_fp_context(sc);
152 			unlock_fpu_owner();
153 		} else {
154 			unlock_fpu_owner();
155 			err = copy_fp_to_sigcontext(sc);
156 		}
157 		if (likely(!err))
158 			break;
159 		/* touch the sigcontext and try again */
160 		err = __put_user(0, &fpregs[0]) |
161 			__put_user(0, &fpregs[31]) |
162 			__put_user(0, csr);
163 		if (err)
164 			break;	/* really bad sigcontext */
165 	}
166 
167 	return err;
168 }
169 
170 int protected_restore_fp_context(void __user *sc)
171 {
172 	struct mips_abi *abi = current->thread.abi;
173 	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
174 	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
175 	uint32_t __user *used_math = sc + abi->off_sc_used_math;
176 	unsigned int used;
177 	int err, sig, tmp __maybe_unused;
178 
179 	err = __get_user(used, used_math);
180 	conditional_used_math(used);
181 
182 	/*
183 	 * The signal handler may have used FPU; give it up if the program
184 	 * doesn't want it following sigreturn.
185 	 */
186 	if (err || !used) {
187 		lose_fpu(0);
188 		return err;
189 	}
190 
191 	err = sig = fpcsr_pending(csr);
192 	if (err < 0)
193 		return err;
194 
195 	/*
196 	 * EVA does not have userland equivalents of ldc1 or sdc1, so we
197 	 * disable the FPU here such that the code below simply copies to
198 	 * the kernel FP context.
199 	 */
200 	if (config_enabled(CONFIG_EVA))
201 		lose_fpu(0);
202 
203 	while (1) {
204 		lock_fpu_owner();
205 		if (is_fpu_owner()) {
206 			err = restore_fp_context(sc);
207 			unlock_fpu_owner();
208 		} else {
209 			unlock_fpu_owner();
210 			err = copy_fp_from_sigcontext(sc);
211 		}
212 		if (likely(!err))
213 			break;
214 		/* touch the sigcontext and try again */
215 		err = __get_user(tmp, &fpregs[0]) |
216 			__get_user(tmp, &fpregs[31]) |
217 			__get_user(tmp, csr);
218 		if (err)
219 			break;	/* really bad sigcontext */
220 	}
221 
222 	return err ?: sig;
223 }
224 
225 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
226 {
227 	int err = 0;
228 	int i;
229 
230 	err |= __put_user(regs->cp0_epc, &sc->sc_pc);
231 
232 	err |= __put_user(0, &sc->sc_regs[0]);
233 	for (i = 1; i < 32; i++)
234 		err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
235 
236 #ifdef CONFIG_CPU_HAS_SMARTMIPS
237 	err |= __put_user(regs->acx, &sc->sc_acx);
238 #endif
239 	err |= __put_user(regs->hi, &sc->sc_mdhi);
240 	err |= __put_user(regs->lo, &sc->sc_mdlo);
241 	if (cpu_has_dsp) {
242 		err |= __put_user(mfhi1(), &sc->sc_hi1);
243 		err |= __put_user(mflo1(), &sc->sc_lo1);
244 		err |= __put_user(mfhi2(), &sc->sc_hi2);
245 		err |= __put_user(mflo2(), &sc->sc_lo2);
246 		err |= __put_user(mfhi3(), &sc->sc_hi3);
247 		err |= __put_user(mflo3(), &sc->sc_lo3);
248 		err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
249 	}
250 
251 
252 	/*
253 	 * Save FPU state to signal context. Signal handler
254 	 * will "inherit" current FPU state.
255 	 */
256 	err |= protected_save_fp_context(sc);
257 
258 	return err;
259 }
260 
261 int fpcsr_pending(unsigned int __user *fpcsr)
262 {
263 	int err, sig = 0;
264 	unsigned int csr, enabled;
265 
266 	err = __get_user(csr, fpcsr);
267 	enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
268 	/*
269 	 * If the signal handler set some FPU exceptions, clear it and
270 	 * send SIGFPE.
271 	 */
272 	if (csr & enabled) {
273 		csr &= ~enabled;
274 		err |= __put_user(csr, fpcsr);
275 		sig = SIGFPE;
276 	}
277 	return err ?: sig;
278 }
279 
280 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
281 {
282 	unsigned long treg;
283 	int err = 0;
284 	int i;
285 
286 	/* Always make any pending restarted system calls return -EINTR */
287 	current->restart_block.fn = do_no_restart_syscall;
288 
289 	err |= __get_user(regs->cp0_epc, &sc->sc_pc);
290 
291 #ifdef CONFIG_CPU_HAS_SMARTMIPS
292 	err |= __get_user(regs->acx, &sc->sc_acx);
293 #endif
294 	err |= __get_user(regs->hi, &sc->sc_mdhi);
295 	err |= __get_user(regs->lo, &sc->sc_mdlo);
296 	if (cpu_has_dsp) {
297 		err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
298 		err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
299 		err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
300 		err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
301 		err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
302 		err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
303 		err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
304 	}
305 
306 	for (i = 1; i < 32; i++)
307 		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
308 
309 	return err ?: protected_restore_fp_context(sc);
310 }
311 
312 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
313 			  size_t frame_size)
314 {
315 	unsigned long sp;
316 
317 	/* Default to using normal stack */
318 	sp = regs->regs[29];
319 
320 	/*
321 	 * FPU emulator may have it's own trampoline active just
322 	 * above the user stack, 16-bytes before the next lowest
323 	 * 16 byte boundary.  Try to avoid trashing it.
324 	 */
325 	sp -= 32;
326 
327 	sp = sigsp(sp, ksig);
328 
329 	return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
330 }
331 
332 /*
333  * Atomically swap in the new signal mask, and wait for a signal.
334  */
335 
336 #ifdef CONFIG_TRAD_SIGNALS
337 SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset)
338 {
339 	return sys_rt_sigsuspend(uset, sizeof(sigset_t));
340 }
341 #endif
342 
343 #ifdef CONFIG_TRAD_SIGNALS
344 SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
345 	struct sigaction __user *, oact)
346 {
347 	struct k_sigaction new_ka, old_ka;
348 	int ret;
349 	int err = 0;
350 
351 	if (act) {
352 		old_sigset_t mask;
353 
354 		if (!access_ok(VERIFY_READ, act, sizeof(*act)))
355 			return -EFAULT;
356 		err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
357 		err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
358 		err |= __get_user(mask, &act->sa_mask.sig[0]);
359 		if (err)
360 			return -EFAULT;
361 
362 		siginitset(&new_ka.sa.sa_mask, mask);
363 	}
364 
365 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
366 
367 	if (!ret && oact) {
368 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
369 			return -EFAULT;
370 		err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
371 		err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
372 		err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
373 		err |= __put_user(0, &oact->sa_mask.sig[1]);
374 		err |= __put_user(0, &oact->sa_mask.sig[2]);
375 		err |= __put_user(0, &oact->sa_mask.sig[3]);
376 		if (err)
377 			return -EFAULT;
378 	}
379 
380 	return ret;
381 }
382 #endif
383 
384 #ifdef CONFIG_TRAD_SIGNALS
385 asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
386 {
387 	struct sigframe __user *frame;
388 	sigset_t blocked;
389 	int sig;
390 
391 	frame = (struct sigframe __user *) regs.regs[29];
392 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
393 		goto badframe;
394 	if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
395 		goto badframe;
396 
397 	set_current_blocked(&blocked);
398 
399 	sig = restore_sigcontext(&regs, &frame->sf_sc);
400 	if (sig < 0)
401 		goto badframe;
402 	else if (sig)
403 		force_sig(sig, current);
404 
405 	/*
406 	 * Don't let your children do this ...
407 	 */
408 	__asm__ __volatile__(
409 		"move\t$29, %0\n\t"
410 		"j\tsyscall_exit"
411 		:/* no outputs */
412 		:"r" (&regs));
413 	/* Unreached */
414 
415 badframe:
416 	force_sig(SIGSEGV, current);
417 }
418 #endif /* CONFIG_TRAD_SIGNALS */
419 
420 asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
421 {
422 	struct rt_sigframe __user *frame;
423 	sigset_t set;
424 	int sig;
425 
426 	frame = (struct rt_sigframe __user *) regs.regs[29];
427 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
428 		goto badframe;
429 	if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
430 		goto badframe;
431 
432 	set_current_blocked(&set);
433 
434 	sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
435 	if (sig < 0)
436 		goto badframe;
437 	else if (sig)
438 		force_sig(sig, current);
439 
440 	if (restore_altstack(&frame->rs_uc.uc_stack))
441 		goto badframe;
442 
443 	/*
444 	 * Don't let your children do this ...
445 	 */
446 	__asm__ __volatile__(
447 		"move\t$29, %0\n\t"
448 		"j\tsyscall_exit"
449 		:/* no outputs */
450 		:"r" (&regs));
451 	/* Unreached */
452 
453 badframe:
454 	force_sig(SIGSEGV, current);
455 }
456 
457 #ifdef CONFIG_TRAD_SIGNALS
458 static int setup_frame(void *sig_return, struct ksignal *ksig,
459 		       struct pt_regs *regs, sigset_t *set)
460 {
461 	struct sigframe __user *frame;
462 	int err = 0;
463 
464 	frame = get_sigframe(ksig, regs, sizeof(*frame));
465 	if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
466 		return -EFAULT;
467 
468 	err |= setup_sigcontext(regs, &frame->sf_sc);
469 	err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
470 	if (err)
471 		return -EFAULT;
472 
473 	/*
474 	 * Arguments to signal handler:
475 	 *
476 	 *   a0 = signal number
477 	 *   a1 = 0 (should be cause)
478 	 *   a2 = pointer to struct sigcontext
479 	 *
480 	 * $25 and c0_epc point to the signal handler, $29 points to the
481 	 * struct sigframe.
482 	 */
483 	regs->regs[ 4] = ksig->sig;
484 	regs->regs[ 5] = 0;
485 	regs->regs[ 6] = (unsigned long) &frame->sf_sc;
486 	regs->regs[29] = (unsigned long) frame;
487 	regs->regs[31] = (unsigned long) sig_return;
488 	regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
489 
490 	DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
491 	       current->comm, current->pid,
492 	       frame, regs->cp0_epc, regs->regs[31]);
493 	return 0;
494 }
495 #endif
496 
497 static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
498 			  struct pt_regs *regs, sigset_t *set)
499 {
500 	struct rt_sigframe __user *frame;
501 	int err = 0;
502 
503 	frame = get_sigframe(ksig, regs, sizeof(*frame));
504 	if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
505 		return -EFAULT;
506 
507 	/* Create siginfo.  */
508 	err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
509 
510 	/* Create the ucontext.	 */
511 	err |= __put_user(0, &frame->rs_uc.uc_flags);
512 	err |= __put_user(NULL, &frame->rs_uc.uc_link);
513 	err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
514 	err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
515 	err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
516 
517 	if (err)
518 		return -EFAULT;
519 
520 	/*
521 	 * Arguments to signal handler:
522 	 *
523 	 *   a0 = signal number
524 	 *   a1 = 0 (should be cause)
525 	 *   a2 = pointer to ucontext
526 	 *
527 	 * $25 and c0_epc point to the signal handler, $29 points to
528 	 * the struct rt_sigframe.
529 	 */
530 	regs->regs[ 4] = ksig->sig;
531 	regs->regs[ 5] = (unsigned long) &frame->rs_info;
532 	regs->regs[ 6] = (unsigned long) &frame->rs_uc;
533 	regs->regs[29] = (unsigned long) frame;
534 	regs->regs[31] = (unsigned long) sig_return;
535 	regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
536 
537 	DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
538 	       current->comm, current->pid,
539 	       frame, regs->cp0_epc, regs->regs[31]);
540 
541 	return 0;
542 }
543 
544 struct mips_abi mips_abi = {
545 #ifdef CONFIG_TRAD_SIGNALS
546 	.setup_frame	= setup_frame,
547 	.signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
548 #endif
549 	.setup_rt_frame = setup_rt_frame,
550 	.rt_signal_return_offset =
551 		offsetof(struct mips_vdso, rt_signal_trampoline),
552 	.restart	= __NR_restart_syscall,
553 
554 	.off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
555 	.off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
556 	.off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
557 };
558 
559 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
560 {
561 	sigset_t *oldset = sigmask_to_save();
562 	int ret;
563 	struct mips_abi *abi = current->thread.abi;
564 #ifdef CONFIG_CPU_MICROMIPS
565 	void *vdso;
566 	unsigned long tmp = (unsigned long)current->mm->context.vdso;
567 
568 	set_isa16_mode(tmp);
569 	vdso = (void *)tmp;
570 #else
571 	void *vdso = current->mm->context.vdso;
572 #endif
573 
574 	if (regs->regs[0]) {
575 		switch(regs->regs[2]) {
576 		case ERESTART_RESTARTBLOCK:
577 		case ERESTARTNOHAND:
578 			regs->regs[2] = EINTR;
579 			break;
580 		case ERESTARTSYS:
581 			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
582 				regs->regs[2] = EINTR;
583 				break;
584 			}
585 		/* fallthrough */
586 		case ERESTARTNOINTR:
587 			regs->regs[7] = regs->regs[26];
588 			regs->regs[2] = regs->regs[0];
589 			regs->cp0_epc -= 4;
590 		}
591 
592 		regs->regs[0] = 0;		/* Don't deal with this again.	*/
593 	}
594 
595 	if (sig_uses_siginfo(&ksig->ka))
596 		ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
597 					  ksig, regs, oldset);
598 	else
599 		ret = abi->setup_frame(vdso + abi->signal_return_offset, ksig,
600 				       regs, oldset);
601 
602 	signal_setup_done(ret, ksig, 0);
603 }
604 
605 static void do_signal(struct pt_regs *regs)
606 {
607 	struct ksignal ksig;
608 
609 	if (get_signal(&ksig)) {
610 		/* Whee!  Actually deliver the signal.	*/
611 		handle_signal(&ksig, regs);
612 		return;
613 	}
614 
615 	if (regs->regs[0]) {
616 		switch (regs->regs[2]) {
617 		case ERESTARTNOHAND:
618 		case ERESTARTSYS:
619 		case ERESTARTNOINTR:
620 			regs->regs[2] = regs->regs[0];
621 			regs->regs[7] = regs->regs[26];
622 			regs->cp0_epc -= 4;
623 			break;
624 
625 		case ERESTART_RESTARTBLOCK:
626 			regs->regs[2] = current->thread.abi->restart;
627 			regs->regs[7] = regs->regs[26];
628 			regs->cp0_epc -= 4;
629 			break;
630 		}
631 		regs->regs[0] = 0;	/* Don't deal with this again.	*/
632 	}
633 
634 	/*
635 	 * If there's no signal to deliver, we just put the saved sigmask
636 	 * back
637 	 */
638 	restore_saved_sigmask();
639 }
640 
641 /*
642  * notification of userspace execution resumption
643  * - triggered by the TIF_WORK_MASK flags
644  */
645 asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
646 	__u32 thread_info_flags)
647 {
648 	local_irq_enable();
649 
650 	user_exit();
651 
652 	/* deal with pending signal delivery */
653 	if (thread_info_flags & _TIF_SIGPENDING)
654 		do_signal(regs);
655 
656 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
657 		clear_thread_flag(TIF_NOTIFY_RESUME);
658 		tracehook_notify_resume(regs);
659 	}
660 
661 	user_enter();
662 }
663 
664 #ifdef CONFIG_SMP
665 static int smp_save_fp_context(void __user *sc)
666 {
667 	return raw_cpu_has_fpu
668 	       ? save_hw_fp_context(sc)
669 	       : copy_fp_to_sigcontext(sc);
670 }
671 
672 static int smp_restore_fp_context(void __user *sc)
673 {
674 	return raw_cpu_has_fpu
675 	       ? restore_hw_fp_context(sc)
676 	       : copy_fp_from_sigcontext(sc);
677 }
678 #endif
679 
680 static int signal_setup(void)
681 {
682 #ifdef CONFIG_SMP
683 	/* For now just do the cpu_has_fpu check when the functions are invoked */
684 	save_fp_context = smp_save_fp_context;
685 	restore_fp_context = smp_restore_fp_context;
686 #else
687 	if (cpu_has_fpu) {
688 		save_fp_context = save_hw_fp_context;
689 		restore_fp_context = restore_hw_fp_context;
690 	} else {
691 		save_fp_context = copy_fp_to_sigcontext;
692 		restore_fp_context = copy_fp_from_sigcontext;
693 	}
694 #endif /* CONFIG_SMP */
695 
696 	return 0;
697 }
698 
699 arch_initcall(signal_setup);
700