xref: /openbmc/linux/arch/arm/kernel/signal.c (revision e8e0929d)
1 /*
2  *  linux/arch/arm/kernel/signal.c
3  *
4  *  Copyright (C) 1995-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/errno.h>
11 #include <linux/signal.h>
12 #include <linux/personality.h>
13 #include <linux/freezer.h>
14 #include <linux/uaccess.h>
15 #include <linux/tracehook.h>
16 
17 #include <asm/elf.h>
18 #include <asm/cacheflush.h>
19 #include <asm/ucontext.h>
20 #include <asm/unistd.h>
21 
22 #include "ptrace.h"
23 #include "signal.h"
24 
25 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
26 
27 /*
28  * For ARM syscalls, we encode the syscall number into the instruction.
29  */
30 #define SWI_SYS_SIGRETURN	(0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
31 #define SWI_SYS_RT_SIGRETURN	(0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
32 
33 /*
34  * With EABI, the syscall number has to be loaded into r7.
35  */
36 #define MOV_R7_NR_SIGRETURN	(0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
37 #define MOV_R7_NR_RT_SIGRETURN	(0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
38 
39 /*
40  * For Thumb syscalls, we pass the syscall number via r7.  We therefore
41  * need two 16-bit instructions.
42  */
43 #define SWI_THUMB_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
44 #define SWI_THUMB_RT_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
45 
46 const unsigned long sigreturn_codes[7] = {
47 	MOV_R7_NR_SIGRETURN,    SWI_SYS_SIGRETURN,    SWI_THUMB_SIGRETURN,
48 	MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
49 };
50 
51 /*
52  * atomically swap in the new signal mask, and wait for a signal.
53  */
54 asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
55 {
56 	mask &= _BLOCKABLE;
57 	spin_lock_irq(&current->sighand->siglock);
58 	current->saved_sigmask = current->blocked;
59 	siginitset(&current->blocked, mask);
60 	recalc_sigpending();
61 	spin_unlock_irq(&current->sighand->siglock);
62 
63 	current->state = TASK_INTERRUPTIBLE;
64 	schedule();
65 	set_restore_sigmask();
66 	return -ERESTARTNOHAND;
67 }
68 
69 asmlinkage int
70 sys_sigaction(int sig, const struct old_sigaction __user *act,
71 	      struct old_sigaction __user *oact)
72 {
73 	struct k_sigaction new_ka, old_ka;
74 	int ret;
75 
76 	if (act) {
77 		old_sigset_t mask;
78 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
79 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
80 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
81 			return -EFAULT;
82 		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
83 		__get_user(mask, &act->sa_mask);
84 		siginitset(&new_ka.sa.sa_mask, mask);
85 	}
86 
87 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
88 
89 	if (!ret && oact) {
90 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
91 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
92 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
93 			return -EFAULT;
94 		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
95 		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
96 	}
97 
98 	return ret;
99 }
100 
101 #ifdef CONFIG_CRUNCH
102 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
103 {
104 	char kbuf[sizeof(*frame) + 8];
105 	struct crunch_sigframe *kframe;
106 
107 	/* the crunch context must be 64 bit aligned */
108 	kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
109 	kframe->magic = CRUNCH_MAGIC;
110 	kframe->size = CRUNCH_STORAGE_SIZE;
111 	crunch_task_copy(current_thread_info(), &kframe->storage);
112 	return __copy_to_user(frame, kframe, sizeof(*frame));
113 }
114 
115 static int restore_crunch_context(struct crunch_sigframe __user *frame)
116 {
117 	char kbuf[sizeof(*frame) + 8];
118 	struct crunch_sigframe *kframe;
119 
120 	/* the crunch context must be 64 bit aligned */
121 	kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
122 	if (__copy_from_user(kframe, frame, sizeof(*frame)))
123 		return -1;
124 	if (kframe->magic != CRUNCH_MAGIC ||
125 	    kframe->size != CRUNCH_STORAGE_SIZE)
126 		return -1;
127 	crunch_task_restore(current_thread_info(), &kframe->storage);
128 	return 0;
129 }
130 #endif
131 
132 #ifdef CONFIG_IWMMXT
133 
134 static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
135 {
136 	char kbuf[sizeof(*frame) + 8];
137 	struct iwmmxt_sigframe *kframe;
138 
139 	/* the iWMMXt context must be 64 bit aligned */
140 	kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
141 	kframe->magic = IWMMXT_MAGIC;
142 	kframe->size = IWMMXT_STORAGE_SIZE;
143 	iwmmxt_task_copy(current_thread_info(), &kframe->storage);
144 	return __copy_to_user(frame, kframe, sizeof(*frame));
145 }
146 
147 static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
148 {
149 	char kbuf[sizeof(*frame) + 8];
150 	struct iwmmxt_sigframe *kframe;
151 
152 	/* the iWMMXt context must be 64 bit aligned */
153 	kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
154 	if (__copy_from_user(kframe, frame, sizeof(*frame)))
155 		return -1;
156 	if (kframe->magic != IWMMXT_MAGIC ||
157 	    kframe->size != IWMMXT_STORAGE_SIZE)
158 		return -1;
159 	iwmmxt_task_restore(current_thread_info(), &kframe->storage);
160 	return 0;
161 }
162 
163 #endif
164 
165 /*
166  * Do a signal return; undo the signal stack.  These are aligned to 64-bit.
167  */
168 struct sigframe {
169 	struct ucontext uc;
170 	unsigned long retcode[2];
171 };
172 
173 struct rt_sigframe {
174 	struct siginfo info;
175 	struct sigframe sig;
176 };
177 
178 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
179 {
180 	struct aux_sigframe __user *aux;
181 	sigset_t set;
182 	int err;
183 
184 	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
185 	if (err == 0) {
186 		sigdelsetmask(&set, ~_BLOCKABLE);
187 		spin_lock_irq(&current->sighand->siglock);
188 		current->blocked = set;
189 		recalc_sigpending();
190 		spin_unlock_irq(&current->sighand->siglock);
191 	}
192 
193 	__get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
194 	__get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
195 	__get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
196 	__get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
197 	__get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
198 	__get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
199 	__get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
200 	__get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
201 	__get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
202 	__get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
203 	__get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
204 	__get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
205 	__get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
206 	__get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
207 	__get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
208 	__get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
209 	__get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
210 
211 	err |= !valid_user_regs(regs);
212 
213 	aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
214 #ifdef CONFIG_CRUNCH
215 	if (err == 0)
216 		err |= restore_crunch_context(&aux->crunch);
217 #endif
218 #ifdef CONFIG_IWMMXT
219 	if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
220 		err |= restore_iwmmxt_context(&aux->iwmmxt);
221 #endif
222 #ifdef CONFIG_VFP
223 //	if (err == 0)
224 //		err |= vfp_restore_state(&sf->aux.vfp);
225 #endif
226 
227 	return err;
228 }
229 
230 asmlinkage int sys_sigreturn(struct pt_regs *regs)
231 {
232 	struct sigframe __user *frame;
233 
234 	/* Always make any pending restarted system calls return -EINTR */
235 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
236 
237 	/*
238 	 * Since we stacked the signal on a 64-bit boundary,
239 	 * then 'sp' should be word aligned here.  If it's
240 	 * not, then the user is trying to mess with us.
241 	 */
242 	if (regs->ARM_sp & 7)
243 		goto badframe;
244 
245 	frame = (struct sigframe __user *)regs->ARM_sp;
246 
247 	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
248 		goto badframe;
249 
250 	if (restore_sigframe(regs, frame))
251 		goto badframe;
252 
253 	single_step_trap(current);
254 
255 	return regs->ARM_r0;
256 
257 badframe:
258 	force_sig(SIGSEGV, current);
259 	return 0;
260 }
261 
262 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
263 {
264 	struct rt_sigframe __user *frame;
265 
266 	/* Always make any pending restarted system calls return -EINTR */
267 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
268 
269 	/*
270 	 * Since we stacked the signal on a 64-bit boundary,
271 	 * then 'sp' should be word aligned here.  If it's
272 	 * not, then the user is trying to mess with us.
273 	 */
274 	if (regs->ARM_sp & 7)
275 		goto badframe;
276 
277 	frame = (struct rt_sigframe __user *)regs->ARM_sp;
278 
279 	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
280 		goto badframe;
281 
282 	if (restore_sigframe(regs, &frame->sig))
283 		goto badframe;
284 
285 	if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
286 		goto badframe;
287 
288 	single_step_trap(current);
289 
290 	return regs->ARM_r0;
291 
292 badframe:
293 	force_sig(SIGSEGV, current);
294 	return 0;
295 }
296 
297 static int
298 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
299 {
300 	struct aux_sigframe __user *aux;
301 	int err = 0;
302 
303 	__put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
304 	__put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
305 	__put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
306 	__put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
307 	__put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
308 	__put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
309 	__put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
310 	__put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
311 	__put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
312 	__put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
313 	__put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
314 	__put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
315 	__put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
316 	__put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
317 	__put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
318 	__put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
319 	__put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
320 
321 	__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
322 	__put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
323 	__put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
324 	__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
325 
326 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
327 
328 	aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
329 #ifdef CONFIG_CRUNCH
330 	if (err == 0)
331 		err |= preserve_crunch_context(&aux->crunch);
332 #endif
333 #ifdef CONFIG_IWMMXT
334 	if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
335 		err |= preserve_iwmmxt_context(&aux->iwmmxt);
336 #endif
337 #ifdef CONFIG_VFP
338 //	if (err == 0)
339 //		err |= vfp_save_state(&sf->aux.vfp);
340 #endif
341 	__put_user_error(0, &aux->end_magic, err);
342 
343 	return err;
344 }
345 
346 static inline void __user *
347 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize)
348 {
349 	unsigned long sp = regs->ARM_sp;
350 	void __user *frame;
351 
352 	/*
353 	 * This is the X/Open sanctioned signal stack switching.
354 	 */
355 	if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
356 		sp = current->sas_ss_sp + current->sas_ss_size;
357 
358 	/*
359 	 * ATPCS B01 mandates 8-byte alignment
360 	 */
361 	frame = (void __user *)((sp - framesize) & ~7);
362 
363 	/*
364 	 * Check that we can actually write to the signal frame.
365 	 */
366 	if (!access_ok(VERIFY_WRITE, frame, framesize))
367 		frame = NULL;
368 
369 	return frame;
370 }
371 
372 static int
373 setup_return(struct pt_regs *regs, struct k_sigaction *ka,
374 	     unsigned long __user *rc, void __user *frame, int usig)
375 {
376 	unsigned long handler = (unsigned long)ka->sa.sa_handler;
377 	unsigned long retcode;
378 	int thumb = 0;
379 	unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
380 
381 	/*
382 	 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
383 	 */
384 	if (ka->sa.sa_flags & SA_THIRTYTWO)
385 		cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
386 
387 #ifdef CONFIG_ARM_THUMB
388 	if (elf_hwcap & HWCAP_THUMB) {
389 		/*
390 		 * The LSB of the handler determines if we're going to
391 		 * be using THUMB or ARM mode for this signal handler.
392 		 */
393 		thumb = handler & 1;
394 
395 		if (thumb) {
396 			cpsr |= PSR_T_BIT;
397 #if __LINUX_ARM_ARCH__ >= 7
398 			/* clear the If-Then Thumb-2 execution state */
399 			cpsr &= ~PSR_IT_MASK;
400 #endif
401 		} else
402 			cpsr &= ~PSR_T_BIT;
403 	}
404 #endif
405 
406 	if (ka->sa.sa_flags & SA_RESTORER) {
407 		retcode = (unsigned long)ka->sa.sa_restorer;
408 	} else {
409 		unsigned int idx = thumb << 1;
410 
411 		if (ka->sa.sa_flags & SA_SIGINFO)
412 			idx += 3;
413 
414 		if (__put_user(sigreturn_codes[idx],   rc) ||
415 		    __put_user(sigreturn_codes[idx+1], rc+1))
416 			return 1;
417 
418 		if (cpsr & MODE32_BIT) {
419 			/*
420 			 * 32-bit code can use the new high-page
421 			 * signal return code support.
422 			 */
423 			retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
424 		} else {
425 			/*
426 			 * Ensure that the instruction cache sees
427 			 * the return code written onto the stack.
428 			 */
429 			flush_icache_range((unsigned long)rc,
430 					   (unsigned long)(rc + 2));
431 
432 			retcode = ((unsigned long)rc) + thumb;
433 		}
434 	}
435 
436 	regs->ARM_r0 = usig;
437 	regs->ARM_sp = (unsigned long)frame;
438 	regs->ARM_lr = retcode;
439 	regs->ARM_pc = handler;
440 	regs->ARM_cpsr = cpsr;
441 
442 	return 0;
443 }
444 
445 static int
446 setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs)
447 {
448 	struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
449 	int err = 0;
450 
451 	if (!frame)
452 		return 1;
453 
454 	/*
455 	 * Set uc.uc_flags to a value which sc.trap_no would never have.
456 	 */
457 	__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
458 
459 	err |= setup_sigframe(frame, regs, set);
460 	if (err == 0)
461 		err = setup_return(regs, ka, frame->retcode, frame, usig);
462 
463 	return err;
464 }
465 
466 static int
467 setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
468 	       sigset_t *set, struct pt_regs *regs)
469 {
470 	struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
471 	stack_t stack;
472 	int err = 0;
473 
474 	if (!frame)
475 		return 1;
476 
477 	err |= copy_siginfo_to_user(&frame->info, info);
478 
479 	__put_user_error(0, &frame->sig.uc.uc_flags, err);
480 	__put_user_error(NULL, &frame->sig.uc.uc_link, err);
481 
482 	memset(&stack, 0, sizeof(stack));
483 	stack.ss_sp = (void __user *)current->sas_ss_sp;
484 	stack.ss_flags = sas_ss_flags(regs->ARM_sp);
485 	stack.ss_size = current->sas_ss_size;
486 	err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack));
487 
488 	err |= setup_sigframe(&frame->sig, regs, set);
489 	if (err == 0)
490 		err = setup_return(regs, ka, frame->sig.retcode, frame, usig);
491 
492 	if (err == 0) {
493 		/*
494 		 * For realtime signals we must also set the second and third
495 		 * arguments for the signal handler.
496 		 *   -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
497 		 */
498 		regs->ARM_r1 = (unsigned long)&frame->info;
499 		regs->ARM_r2 = (unsigned long)&frame->sig.uc;
500 	}
501 
502 	return err;
503 }
504 
505 static inline void setup_syscall_restart(struct pt_regs *regs)
506 {
507 	regs->ARM_r0 = regs->ARM_ORIG_r0;
508 	regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
509 }
510 
511 /*
512  * OK, we're invoking a handler
513  */
514 static int
515 handle_signal(unsigned long sig, struct k_sigaction *ka,
516 	      siginfo_t *info, sigset_t *oldset,
517 	      struct pt_regs * regs, int syscall)
518 {
519 	struct thread_info *thread = current_thread_info();
520 	struct task_struct *tsk = current;
521 	int usig = sig;
522 	int ret;
523 
524 	/*
525 	 * If we were from a system call, check for system call restarting...
526 	 */
527 	if (syscall) {
528 		switch (regs->ARM_r0) {
529 		case -ERESTART_RESTARTBLOCK:
530 		case -ERESTARTNOHAND:
531 			regs->ARM_r0 = -EINTR;
532 			break;
533 		case -ERESTARTSYS:
534 			if (!(ka->sa.sa_flags & SA_RESTART)) {
535 				regs->ARM_r0 = -EINTR;
536 				break;
537 			}
538 			/* fallthrough */
539 		case -ERESTARTNOINTR:
540 			setup_syscall_restart(regs);
541 		}
542 	}
543 
544 	/*
545 	 * translate the signal
546 	 */
547 	if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
548 		usig = thread->exec_domain->signal_invmap[usig];
549 
550 	/*
551 	 * Set up the stack frame
552 	 */
553 	if (ka->sa.sa_flags & SA_SIGINFO)
554 		ret = setup_rt_frame(usig, ka, info, oldset, regs);
555 	else
556 		ret = setup_frame(usig, ka, oldset, regs);
557 
558 	/*
559 	 * Check that the resulting registers are actually sane.
560 	 */
561 	ret |= !valid_user_regs(regs);
562 
563 	if (ret != 0) {
564 		force_sigsegv(sig, tsk);
565 		return ret;
566 	}
567 
568 	/*
569 	 * Block the signal if we were successful.
570 	 */
571 	spin_lock_irq(&tsk->sighand->siglock);
572 	sigorsets(&tsk->blocked, &tsk->blocked,
573 		  &ka->sa.sa_mask);
574 	if (!(ka->sa.sa_flags & SA_NODEFER))
575 		sigaddset(&tsk->blocked, sig);
576 	recalc_sigpending();
577 	spin_unlock_irq(&tsk->sighand->siglock);
578 
579 	return 0;
580 }
581 
582 /*
583  * Note that 'init' is a special process: it doesn't get signals it doesn't
584  * want to handle. Thus you cannot kill init even with a SIGKILL even by
585  * mistake.
586  *
587  * Note that we go through the signals twice: once to check the signals that
588  * the kernel can handle, and then we build all the user-level signal handling
589  * stack-frames in one go after that.
590  */
591 static void do_signal(struct pt_regs *regs, int syscall)
592 {
593 	struct k_sigaction ka;
594 	siginfo_t info;
595 	int signr;
596 
597 	/*
598 	 * We want the common case to go fast, which
599 	 * is why we may in certain cases get here from
600 	 * kernel mode. Just return without doing anything
601 	 * if so.
602 	 */
603 	if (!user_mode(regs))
604 		return;
605 
606 	if (try_to_freeze())
607 		goto no_signal;
608 
609 	single_step_clear(current);
610 
611 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
612 	if (signr > 0) {
613 		sigset_t *oldset;
614 
615 		if (test_thread_flag(TIF_RESTORE_SIGMASK))
616 			oldset = &current->saved_sigmask;
617 		else
618 			oldset = &current->blocked;
619 		if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) {
620 			/*
621 			 * A signal was successfully delivered; the saved
622 			 * sigmask will have been stored in the signal frame,
623 			 * and will be restored by sigreturn, so we can simply
624 			 * clear the TIF_RESTORE_SIGMASK flag.
625 			 */
626 			if (test_thread_flag(TIF_RESTORE_SIGMASK))
627 				clear_thread_flag(TIF_RESTORE_SIGMASK);
628 		}
629 		single_step_set(current);
630 		return;
631 	}
632 
633  no_signal:
634 	/*
635 	 * No signal to deliver to the process - restart the syscall.
636 	 */
637 	if (syscall) {
638 		if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
639 			if (thumb_mode(regs)) {
640 				regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
641 				regs->ARM_pc -= 2;
642 			} else {
643 #if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
644 				regs->ARM_r7 = __NR_restart_syscall;
645 				regs->ARM_pc -= 4;
646 #else
647 				u32 __user *usp;
648 				u32 swival = __NR_restart_syscall;
649 
650 				regs->ARM_sp -= 12;
651 				usp = (u32 __user *)regs->ARM_sp;
652 
653 				/*
654 				 * Either we supports OABI only, or we have
655 				 * EABI with the OABI compat layer enabled.
656 				 * In the later case we don't know if user
657 				 * space is EABI or not, and if not we must
658 				 * not clobber r7.  Always using the OABI
659 				 * syscall solves that issue and works for
660 				 * all those cases.
661 				 */
662 				swival = swival - __NR_SYSCALL_BASE + __NR_OABI_SYSCALL_BASE;
663 
664 				put_user(regs->ARM_pc, &usp[0]);
665 				/* swi __NR_restart_syscall */
666 				put_user(0xef000000 | swival, &usp[1]);
667 				/* ldr	pc, [sp], #12 */
668 				put_user(0xe49df00c, &usp[2]);
669 
670 				flush_icache_range((unsigned long)usp,
671 						   (unsigned long)(usp + 3));
672 
673 				regs->ARM_pc = regs->ARM_sp + 4;
674 #endif
675 			}
676 		}
677 		if (regs->ARM_r0 == -ERESTARTNOHAND ||
678 		    regs->ARM_r0 == -ERESTARTSYS ||
679 		    regs->ARM_r0 == -ERESTARTNOINTR) {
680 			setup_syscall_restart(regs);
681 		}
682 
683 		/* If there's no signal to deliver, we just put the saved sigmask
684 		 * back.
685 		 */
686 		if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
687 			clear_thread_flag(TIF_RESTORE_SIGMASK);
688 			sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
689 		}
690 	}
691 	single_step_set(current);
692 }
693 
694 asmlinkage void
695 do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
696 {
697 	if (thread_flags & _TIF_SIGPENDING)
698 		do_signal(regs, syscall);
699 
700 	if (thread_flags & _TIF_NOTIFY_RESUME) {
701 		clear_thread_flag(TIF_NOTIFY_RESUME);
702 		tracehook_notify_resume(regs);
703 		if (current->replacement_session_keyring)
704 			key_replace_session_keyring();
705 	}
706 }
707