xref: /openbmc/linux/kernel/signal.c (revision 7dd65feb)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
33 
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h"	/* audit_signal_info() */
39 
40 /*
41  * SLAB caches for signal bits.
42  */
43 
44 static struct kmem_cache *sigqueue_cachep;
45 
46 int print_fatal_signals __read_mostly;
47 
48 static void __user *sig_handler(struct task_struct *t, int sig)
49 {
50 	return t->sighand->action[sig - 1].sa.sa_handler;
51 }
52 
53 static int sig_handler_ignored(void __user *handler, int sig)
54 {
55 	/* Is it explicitly or implicitly ignored? */
56 	return handler == SIG_IGN ||
57 		(handler == SIG_DFL && sig_kernel_ignore(sig));
58 }
59 
60 static int sig_task_ignored(struct task_struct *t, int sig,
61 		int from_ancestor_ns)
62 {
63 	void __user *handler;
64 
65 	handler = sig_handler(t, sig);
66 
67 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68 			handler == SIG_DFL && !from_ancestor_ns)
69 		return 1;
70 
71 	return sig_handler_ignored(handler, sig);
72 }
73 
74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
75 {
76 	/*
77 	 * Blocked signals are never ignored, since the
78 	 * signal handler may change by the time it is
79 	 * unblocked.
80 	 */
81 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
82 		return 0;
83 
84 	if (!sig_task_ignored(t, sig, from_ancestor_ns))
85 		return 0;
86 
87 	/*
88 	 * Tracers may want to know about even ignored signals.
89 	 */
90 	return !tracehook_consider_ignored_signal(t, sig);
91 }
92 
93 /*
94  * Re-calculate pending state from the set of locally pending
95  * signals, globally pending signals, and blocked signals.
96  */
97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98 {
99 	unsigned long ready;
100 	long i;
101 
102 	switch (_NSIG_WORDS) {
103 	default:
104 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 			ready |= signal->sig[i] &~ blocked->sig[i];
106 		break;
107 
108 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
109 		ready |= signal->sig[2] &~ blocked->sig[2];
110 		ready |= signal->sig[1] &~ blocked->sig[1];
111 		ready |= signal->sig[0] &~ blocked->sig[0];
112 		break;
113 
114 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
115 		ready |= signal->sig[0] &~ blocked->sig[0];
116 		break;
117 
118 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
119 	}
120 	return ready !=	0;
121 }
122 
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124 
125 static int recalc_sigpending_tsk(struct task_struct *t)
126 {
127 	if (t->signal->group_stop_count > 0 ||
128 	    PENDING(&t->pending, &t->blocked) ||
129 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
130 		set_tsk_thread_flag(t, TIF_SIGPENDING);
131 		return 1;
132 	}
133 	/*
134 	 * We must never clear the flag in another thread, or in current
135 	 * when it's possible the current syscall is returning -ERESTART*.
136 	 * So we don't clear it here, and only callers who know they should do.
137 	 */
138 	return 0;
139 }
140 
141 /*
142  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143  * This is superfluous when called on current, the wakeup is a harmless no-op.
144  */
145 void recalc_sigpending_and_wake(struct task_struct *t)
146 {
147 	if (recalc_sigpending_tsk(t))
148 		signal_wake_up(t, 0);
149 }
150 
151 void recalc_sigpending(void)
152 {
153 	if (unlikely(tracehook_force_sigpending()))
154 		set_thread_flag(TIF_SIGPENDING);
155 	else if (!recalc_sigpending_tsk(current) && !freezing(current))
156 		clear_thread_flag(TIF_SIGPENDING);
157 
158 }
159 
160 /* Given the mask, find the first available signal that should be serviced. */
161 
162 int next_signal(struct sigpending *pending, sigset_t *mask)
163 {
164 	unsigned long i, *s, *m, x;
165 	int sig = 0;
166 
167 	s = pending->signal.sig;
168 	m = mask->sig;
169 	switch (_NSIG_WORDS) {
170 	default:
171 		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
172 			if ((x = *s &~ *m) != 0) {
173 				sig = ffz(~x) + i*_NSIG_BPW + 1;
174 				break;
175 			}
176 		break;
177 
178 	case 2: if ((x = s[0] &~ m[0]) != 0)
179 			sig = 1;
180 		else if ((x = s[1] &~ m[1]) != 0)
181 			sig = _NSIG_BPW + 1;
182 		else
183 			break;
184 		sig += ffz(~x);
185 		break;
186 
187 	case 1: if ((x = *s &~ *m) != 0)
188 			sig = ffz(~x) + 1;
189 		break;
190 	}
191 
192 	return sig;
193 }
194 
195 static inline void print_dropped_signal(int sig)
196 {
197 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
198 
199 	if (!print_fatal_signals)
200 		return;
201 
202 	if (!__ratelimit(&ratelimit_state))
203 		return;
204 
205 	printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
206 				current->comm, current->pid, sig);
207 }
208 
209 /*
210  * allocate a new signal queue record
211  * - this may be called without locks if and only if t == current, otherwise an
212  *   appopriate lock must be held to stop the target task from exiting
213  */
214 static struct sigqueue *
215 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
216 {
217 	struct sigqueue *q = NULL;
218 	struct user_struct *user;
219 
220 	/*
221 	 * Protect access to @t credentials. This can go away when all
222 	 * callers hold rcu read lock.
223 	 */
224 	rcu_read_lock();
225 	user = get_uid(__task_cred(t)->user);
226 	atomic_inc(&user->sigpending);
227 	rcu_read_unlock();
228 
229 	if (override_rlimit ||
230 	    atomic_read(&user->sigpending) <=
231 			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) {
232 		q = kmem_cache_alloc(sigqueue_cachep, flags);
233 	} else {
234 		print_dropped_signal(sig);
235 	}
236 
237 	if (unlikely(q == NULL)) {
238 		atomic_dec(&user->sigpending);
239 		free_uid(user);
240 	} else {
241 		INIT_LIST_HEAD(&q->list);
242 		q->flags = 0;
243 		q->user = user;
244 	}
245 
246 	return q;
247 }
248 
249 static void __sigqueue_free(struct sigqueue *q)
250 {
251 	if (q->flags & SIGQUEUE_PREALLOC)
252 		return;
253 	atomic_dec(&q->user->sigpending);
254 	free_uid(q->user);
255 	kmem_cache_free(sigqueue_cachep, q);
256 }
257 
258 void flush_sigqueue(struct sigpending *queue)
259 {
260 	struct sigqueue *q;
261 
262 	sigemptyset(&queue->signal);
263 	while (!list_empty(&queue->list)) {
264 		q = list_entry(queue->list.next, struct sigqueue , list);
265 		list_del_init(&q->list);
266 		__sigqueue_free(q);
267 	}
268 }
269 
270 /*
271  * Flush all pending signals for a task.
272  */
273 void __flush_signals(struct task_struct *t)
274 {
275 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
276 	flush_sigqueue(&t->pending);
277 	flush_sigqueue(&t->signal->shared_pending);
278 }
279 
280 void flush_signals(struct task_struct *t)
281 {
282 	unsigned long flags;
283 
284 	spin_lock_irqsave(&t->sighand->siglock, flags);
285 	__flush_signals(t);
286 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
287 }
288 
289 static void __flush_itimer_signals(struct sigpending *pending)
290 {
291 	sigset_t signal, retain;
292 	struct sigqueue *q, *n;
293 
294 	signal = pending->signal;
295 	sigemptyset(&retain);
296 
297 	list_for_each_entry_safe(q, n, &pending->list, list) {
298 		int sig = q->info.si_signo;
299 
300 		if (likely(q->info.si_code != SI_TIMER)) {
301 			sigaddset(&retain, sig);
302 		} else {
303 			sigdelset(&signal, sig);
304 			list_del_init(&q->list);
305 			__sigqueue_free(q);
306 		}
307 	}
308 
309 	sigorsets(&pending->signal, &signal, &retain);
310 }
311 
312 void flush_itimer_signals(void)
313 {
314 	struct task_struct *tsk = current;
315 	unsigned long flags;
316 
317 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
318 	__flush_itimer_signals(&tsk->pending);
319 	__flush_itimer_signals(&tsk->signal->shared_pending);
320 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
321 }
322 
323 void ignore_signals(struct task_struct *t)
324 {
325 	int i;
326 
327 	for (i = 0; i < _NSIG; ++i)
328 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
329 
330 	flush_signals(t);
331 }
332 
333 /*
334  * Flush all handlers for a task.
335  */
336 
337 void
338 flush_signal_handlers(struct task_struct *t, int force_default)
339 {
340 	int i;
341 	struct k_sigaction *ka = &t->sighand->action[0];
342 	for (i = _NSIG ; i != 0 ; i--) {
343 		if (force_default || ka->sa.sa_handler != SIG_IGN)
344 			ka->sa.sa_handler = SIG_DFL;
345 		ka->sa.sa_flags = 0;
346 		sigemptyset(&ka->sa.sa_mask);
347 		ka++;
348 	}
349 }
350 
351 int unhandled_signal(struct task_struct *tsk, int sig)
352 {
353 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
354 	if (is_global_init(tsk))
355 		return 1;
356 	if (handler != SIG_IGN && handler != SIG_DFL)
357 		return 0;
358 	return !tracehook_consider_fatal_signal(tsk, sig);
359 }
360 
361 
362 /* Notify the system that a driver wants to block all signals for this
363  * process, and wants to be notified if any signals at all were to be
364  * sent/acted upon.  If the notifier routine returns non-zero, then the
365  * signal will be acted upon after all.  If the notifier routine returns 0,
366  * then then signal will be blocked.  Only one block per process is
367  * allowed.  priv is a pointer to private data that the notifier routine
368  * can use to determine if the signal should be blocked or not.  */
369 
370 void
371 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
372 {
373 	unsigned long flags;
374 
375 	spin_lock_irqsave(&current->sighand->siglock, flags);
376 	current->notifier_mask = mask;
377 	current->notifier_data = priv;
378 	current->notifier = notifier;
379 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
380 }
381 
382 /* Notify the system that blocking has ended. */
383 
384 void
385 unblock_all_signals(void)
386 {
387 	unsigned long flags;
388 
389 	spin_lock_irqsave(&current->sighand->siglock, flags);
390 	current->notifier = NULL;
391 	current->notifier_data = NULL;
392 	recalc_sigpending();
393 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
394 }
395 
396 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
397 {
398 	struct sigqueue *q, *first = NULL;
399 
400 	/*
401 	 * Collect the siginfo appropriate to this signal.  Check if
402 	 * there is another siginfo for the same signal.
403 	*/
404 	list_for_each_entry(q, &list->list, list) {
405 		if (q->info.si_signo == sig) {
406 			if (first)
407 				goto still_pending;
408 			first = q;
409 		}
410 	}
411 
412 	sigdelset(&list->signal, sig);
413 
414 	if (first) {
415 still_pending:
416 		list_del_init(&first->list);
417 		copy_siginfo(info, &first->info);
418 		__sigqueue_free(first);
419 	} else {
420 		/* Ok, it wasn't in the queue.  This must be
421 		   a fast-pathed signal or we must have been
422 		   out of queue space.  So zero out the info.
423 		 */
424 		info->si_signo = sig;
425 		info->si_errno = 0;
426 		info->si_code = SI_USER;
427 		info->si_pid = 0;
428 		info->si_uid = 0;
429 	}
430 }
431 
432 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
433 			siginfo_t *info)
434 {
435 	int sig = next_signal(pending, mask);
436 
437 	if (sig) {
438 		if (current->notifier) {
439 			if (sigismember(current->notifier_mask, sig)) {
440 				if (!(current->notifier)(current->notifier_data)) {
441 					clear_thread_flag(TIF_SIGPENDING);
442 					return 0;
443 				}
444 			}
445 		}
446 
447 		collect_signal(sig, pending, info);
448 	}
449 
450 	return sig;
451 }
452 
453 /*
454  * Dequeue a signal and return the element to the caller, which is
455  * expected to free it.
456  *
457  * All callers have to hold the siglock.
458  */
459 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
460 {
461 	int signr;
462 
463 	/* We only dequeue private signals from ourselves, we don't let
464 	 * signalfd steal them
465 	 */
466 	signr = __dequeue_signal(&tsk->pending, mask, info);
467 	if (!signr) {
468 		signr = __dequeue_signal(&tsk->signal->shared_pending,
469 					 mask, info);
470 		/*
471 		 * itimer signal ?
472 		 *
473 		 * itimers are process shared and we restart periodic
474 		 * itimers in the signal delivery path to prevent DoS
475 		 * attacks in the high resolution timer case. This is
476 		 * compliant with the old way of self restarting
477 		 * itimers, as the SIGALRM is a legacy signal and only
478 		 * queued once. Changing the restart behaviour to
479 		 * restart the timer in the signal dequeue path is
480 		 * reducing the timer noise on heavy loaded !highres
481 		 * systems too.
482 		 */
483 		if (unlikely(signr == SIGALRM)) {
484 			struct hrtimer *tmr = &tsk->signal->real_timer;
485 
486 			if (!hrtimer_is_queued(tmr) &&
487 			    tsk->signal->it_real_incr.tv64 != 0) {
488 				hrtimer_forward(tmr, tmr->base->get_time(),
489 						tsk->signal->it_real_incr);
490 				hrtimer_restart(tmr);
491 			}
492 		}
493 	}
494 
495 	recalc_sigpending();
496 	if (!signr)
497 		return 0;
498 
499 	if (unlikely(sig_kernel_stop(signr))) {
500 		/*
501 		 * Set a marker that we have dequeued a stop signal.  Our
502 		 * caller might release the siglock and then the pending
503 		 * stop signal it is about to process is no longer in the
504 		 * pending bitmasks, but must still be cleared by a SIGCONT
505 		 * (and overruled by a SIGKILL).  So those cases clear this
506 		 * shared flag after we've set it.  Note that this flag may
507 		 * remain set after the signal we return is ignored or
508 		 * handled.  That doesn't matter because its only purpose
509 		 * is to alert stop-signal processing code when another
510 		 * processor has come along and cleared the flag.
511 		 */
512 		tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
513 	}
514 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
515 		/*
516 		 * Release the siglock to ensure proper locking order
517 		 * of timer locks outside of siglocks.  Note, we leave
518 		 * irqs disabled here, since the posix-timers code is
519 		 * about to disable them again anyway.
520 		 */
521 		spin_unlock(&tsk->sighand->siglock);
522 		do_schedule_next_timer(info);
523 		spin_lock(&tsk->sighand->siglock);
524 	}
525 	return signr;
526 }
527 
528 /*
529  * Tell a process that it has a new active signal..
530  *
531  * NOTE! we rely on the previous spin_lock to
532  * lock interrupts for us! We can only be called with
533  * "siglock" held, and the local interrupt must
534  * have been disabled when that got acquired!
535  *
536  * No need to set need_resched since signal event passing
537  * goes through ->blocked
538  */
539 void signal_wake_up(struct task_struct *t, int resume)
540 {
541 	unsigned int mask;
542 
543 	set_tsk_thread_flag(t, TIF_SIGPENDING);
544 
545 	/*
546 	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
547 	 * case. We don't check t->state here because there is a race with it
548 	 * executing another processor and just now entering stopped state.
549 	 * By using wake_up_state, we ensure the process will wake up and
550 	 * handle its death signal.
551 	 */
552 	mask = TASK_INTERRUPTIBLE;
553 	if (resume)
554 		mask |= TASK_WAKEKILL;
555 	if (!wake_up_state(t, mask))
556 		kick_process(t);
557 }
558 
559 /*
560  * Remove signals in mask from the pending set and queue.
561  * Returns 1 if any signals were found.
562  *
563  * All callers must be holding the siglock.
564  *
565  * This version takes a sigset mask and looks at all signals,
566  * not just those in the first mask word.
567  */
568 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
569 {
570 	struct sigqueue *q, *n;
571 	sigset_t m;
572 
573 	sigandsets(&m, mask, &s->signal);
574 	if (sigisemptyset(&m))
575 		return 0;
576 
577 	signandsets(&s->signal, &s->signal, mask);
578 	list_for_each_entry_safe(q, n, &s->list, list) {
579 		if (sigismember(mask, q->info.si_signo)) {
580 			list_del_init(&q->list);
581 			__sigqueue_free(q);
582 		}
583 	}
584 	return 1;
585 }
586 /*
587  * Remove signals in mask from the pending set and queue.
588  * Returns 1 if any signals were found.
589  *
590  * All callers must be holding the siglock.
591  */
592 static int rm_from_queue(unsigned long mask, struct sigpending *s)
593 {
594 	struct sigqueue *q, *n;
595 
596 	if (!sigtestsetmask(&s->signal, mask))
597 		return 0;
598 
599 	sigdelsetmask(&s->signal, mask);
600 	list_for_each_entry_safe(q, n, &s->list, list) {
601 		if (q->info.si_signo < SIGRTMIN &&
602 		    (mask & sigmask(q->info.si_signo))) {
603 			list_del_init(&q->list);
604 			__sigqueue_free(q);
605 		}
606 	}
607 	return 1;
608 }
609 
610 static inline int is_si_special(const struct siginfo *info)
611 {
612 	return info <= SEND_SIG_FORCED;
613 }
614 
615 static inline bool si_fromuser(const struct siginfo *info)
616 {
617 	return info == SEND_SIG_NOINFO ||
618 		(!is_si_special(info) && SI_FROMUSER(info));
619 }
620 
621 /*
622  * Bad permissions for sending the signal
623  * - the caller must hold at least the RCU read lock
624  */
625 static int check_kill_permission(int sig, struct siginfo *info,
626 				 struct task_struct *t)
627 {
628 	const struct cred *cred = current_cred(), *tcred;
629 	struct pid *sid;
630 	int error;
631 
632 	if (!valid_signal(sig))
633 		return -EINVAL;
634 
635 	if (!si_fromuser(info))
636 		return 0;
637 
638 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
639 	if (error)
640 		return error;
641 
642 	tcred = __task_cred(t);
643 	if ((cred->euid ^ tcred->suid) &&
644 	    (cred->euid ^ tcred->uid) &&
645 	    (cred->uid  ^ tcred->suid) &&
646 	    (cred->uid  ^ tcred->uid) &&
647 	    !capable(CAP_KILL)) {
648 		switch (sig) {
649 		case SIGCONT:
650 			sid = task_session(t);
651 			/*
652 			 * We don't return the error if sid == NULL. The
653 			 * task was unhashed, the caller must notice this.
654 			 */
655 			if (!sid || sid == task_session(current))
656 				break;
657 		default:
658 			return -EPERM;
659 		}
660 	}
661 
662 	return security_task_kill(t, info, sig, 0);
663 }
664 
665 /*
666  * Handle magic process-wide effects of stop/continue signals. Unlike
667  * the signal actions, these happen immediately at signal-generation
668  * time regardless of blocking, ignoring, or handling.  This does the
669  * actual continuing for SIGCONT, but not the actual stopping for stop
670  * signals. The process stop is done as a signal action for SIG_DFL.
671  *
672  * Returns true if the signal should be actually delivered, otherwise
673  * it should be dropped.
674  */
675 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
676 {
677 	struct signal_struct *signal = p->signal;
678 	struct task_struct *t;
679 
680 	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
681 		/*
682 		 * The process is in the middle of dying, nothing to do.
683 		 */
684 	} else if (sig_kernel_stop(sig)) {
685 		/*
686 		 * This is a stop signal.  Remove SIGCONT from all queues.
687 		 */
688 		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
689 		t = p;
690 		do {
691 			rm_from_queue(sigmask(SIGCONT), &t->pending);
692 		} while_each_thread(p, t);
693 	} else if (sig == SIGCONT) {
694 		unsigned int why;
695 		/*
696 		 * Remove all stop signals from all queues,
697 		 * and wake all threads.
698 		 */
699 		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
700 		t = p;
701 		do {
702 			unsigned int state;
703 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
704 			/*
705 			 * If there is a handler for SIGCONT, we must make
706 			 * sure that no thread returns to user mode before
707 			 * we post the signal, in case it was the only
708 			 * thread eligible to run the signal handler--then
709 			 * it must not do anything between resuming and
710 			 * running the handler.  With the TIF_SIGPENDING
711 			 * flag set, the thread will pause and acquire the
712 			 * siglock that we hold now and until we've queued
713 			 * the pending signal.
714 			 *
715 			 * Wake up the stopped thread _after_ setting
716 			 * TIF_SIGPENDING
717 			 */
718 			state = __TASK_STOPPED;
719 			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
720 				set_tsk_thread_flag(t, TIF_SIGPENDING);
721 				state |= TASK_INTERRUPTIBLE;
722 			}
723 			wake_up_state(t, state);
724 		} while_each_thread(p, t);
725 
726 		/*
727 		 * Notify the parent with CLD_CONTINUED if we were stopped.
728 		 *
729 		 * If we were in the middle of a group stop, we pretend it
730 		 * was already finished, and then continued. Since SIGCHLD
731 		 * doesn't queue we report only CLD_STOPPED, as if the next
732 		 * CLD_CONTINUED was dropped.
733 		 */
734 		why = 0;
735 		if (signal->flags & SIGNAL_STOP_STOPPED)
736 			why |= SIGNAL_CLD_CONTINUED;
737 		else if (signal->group_stop_count)
738 			why |= SIGNAL_CLD_STOPPED;
739 
740 		if (why) {
741 			/*
742 			 * The first thread which returns from do_signal_stop()
743 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
744 			 * notify its parent. See get_signal_to_deliver().
745 			 */
746 			signal->flags = why | SIGNAL_STOP_CONTINUED;
747 			signal->group_stop_count = 0;
748 			signal->group_exit_code = 0;
749 		} else {
750 			/*
751 			 * We are not stopped, but there could be a stop
752 			 * signal in the middle of being processed after
753 			 * being removed from the queue.  Clear that too.
754 			 */
755 			signal->flags &= ~SIGNAL_STOP_DEQUEUED;
756 		}
757 	}
758 
759 	return !sig_ignored(p, sig, from_ancestor_ns);
760 }
761 
762 /*
763  * Test if P wants to take SIG.  After we've checked all threads with this,
764  * it's equivalent to finding no threads not blocking SIG.  Any threads not
765  * blocking SIG were ruled out because they are not running and already
766  * have pending signals.  Such threads will dequeue from the shared queue
767  * as soon as they're available, so putting the signal on the shared queue
768  * will be equivalent to sending it to one such thread.
769  */
770 static inline int wants_signal(int sig, struct task_struct *p)
771 {
772 	if (sigismember(&p->blocked, sig))
773 		return 0;
774 	if (p->flags & PF_EXITING)
775 		return 0;
776 	if (sig == SIGKILL)
777 		return 1;
778 	if (task_is_stopped_or_traced(p))
779 		return 0;
780 	return task_curr(p) || !signal_pending(p);
781 }
782 
783 static void complete_signal(int sig, struct task_struct *p, int group)
784 {
785 	struct signal_struct *signal = p->signal;
786 	struct task_struct *t;
787 
788 	/*
789 	 * Now find a thread we can wake up to take the signal off the queue.
790 	 *
791 	 * If the main thread wants the signal, it gets first crack.
792 	 * Probably the least surprising to the average bear.
793 	 */
794 	if (wants_signal(sig, p))
795 		t = p;
796 	else if (!group || thread_group_empty(p))
797 		/*
798 		 * There is just one thread and it does not need to be woken.
799 		 * It will dequeue unblocked signals before it runs again.
800 		 */
801 		return;
802 	else {
803 		/*
804 		 * Otherwise try to find a suitable thread.
805 		 */
806 		t = signal->curr_target;
807 		while (!wants_signal(sig, t)) {
808 			t = next_thread(t);
809 			if (t == signal->curr_target)
810 				/*
811 				 * No thread needs to be woken.
812 				 * Any eligible threads will see
813 				 * the signal in the queue soon.
814 				 */
815 				return;
816 		}
817 		signal->curr_target = t;
818 	}
819 
820 	/*
821 	 * Found a killable thread.  If the signal will be fatal,
822 	 * then start taking the whole group down immediately.
823 	 */
824 	if (sig_fatal(p, sig) &&
825 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
826 	    !sigismember(&t->real_blocked, sig) &&
827 	    (sig == SIGKILL ||
828 	     !tracehook_consider_fatal_signal(t, sig))) {
829 		/*
830 		 * This signal will be fatal to the whole group.
831 		 */
832 		if (!sig_kernel_coredump(sig)) {
833 			/*
834 			 * Start a group exit and wake everybody up.
835 			 * This way we don't have other threads
836 			 * running and doing things after a slower
837 			 * thread has the fatal signal pending.
838 			 */
839 			signal->flags = SIGNAL_GROUP_EXIT;
840 			signal->group_exit_code = sig;
841 			signal->group_stop_count = 0;
842 			t = p;
843 			do {
844 				sigaddset(&t->pending.signal, SIGKILL);
845 				signal_wake_up(t, 1);
846 			} while_each_thread(p, t);
847 			return;
848 		}
849 	}
850 
851 	/*
852 	 * The signal is already in the shared-pending queue.
853 	 * Tell the chosen thread to wake up and dequeue it.
854 	 */
855 	signal_wake_up(t, sig == SIGKILL);
856 	return;
857 }
858 
859 static inline int legacy_queue(struct sigpending *signals, int sig)
860 {
861 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
862 }
863 
864 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
865 			int group, int from_ancestor_ns)
866 {
867 	struct sigpending *pending;
868 	struct sigqueue *q;
869 	int override_rlimit;
870 
871 	trace_signal_generate(sig, info, t);
872 
873 	assert_spin_locked(&t->sighand->siglock);
874 
875 	if (!prepare_signal(sig, t, from_ancestor_ns))
876 		return 0;
877 
878 	pending = group ? &t->signal->shared_pending : &t->pending;
879 	/*
880 	 * Short-circuit ignored signals and support queuing
881 	 * exactly one non-rt signal, so that we can get more
882 	 * detailed information about the cause of the signal.
883 	 */
884 	if (legacy_queue(pending, sig))
885 		return 0;
886 	/*
887 	 * fast-pathed signals for kernel-internal things like SIGSTOP
888 	 * or SIGKILL.
889 	 */
890 	if (info == SEND_SIG_FORCED)
891 		goto out_set;
892 
893 	/* Real-time signals must be queued if sent by sigqueue, or
894 	   some other real-time mechanism.  It is implementation
895 	   defined whether kill() does so.  We attempt to do so, on
896 	   the principle of least surprise, but since kill is not
897 	   allowed to fail with EAGAIN when low on memory we just
898 	   make sure at least one signal gets delivered and don't
899 	   pass on the info struct.  */
900 
901 	if (sig < SIGRTMIN)
902 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
903 	else
904 		override_rlimit = 0;
905 
906 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
907 		override_rlimit);
908 	if (q) {
909 		list_add_tail(&q->list, &pending->list);
910 		switch ((unsigned long) info) {
911 		case (unsigned long) SEND_SIG_NOINFO:
912 			q->info.si_signo = sig;
913 			q->info.si_errno = 0;
914 			q->info.si_code = SI_USER;
915 			q->info.si_pid = task_tgid_nr_ns(current,
916 							task_active_pid_ns(t));
917 			q->info.si_uid = current_uid();
918 			break;
919 		case (unsigned long) SEND_SIG_PRIV:
920 			q->info.si_signo = sig;
921 			q->info.si_errno = 0;
922 			q->info.si_code = SI_KERNEL;
923 			q->info.si_pid = 0;
924 			q->info.si_uid = 0;
925 			break;
926 		default:
927 			copy_siginfo(&q->info, info);
928 			if (from_ancestor_ns)
929 				q->info.si_pid = 0;
930 			break;
931 		}
932 	} else if (!is_si_special(info)) {
933 		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
934 			/*
935 			 * Queue overflow, abort.  We may abort if the
936 			 * signal was rt and sent by user using something
937 			 * other than kill().
938 			 */
939 			trace_signal_overflow_fail(sig, group, info);
940 			return -EAGAIN;
941 		} else {
942 			/*
943 			 * This is a silent loss of information.  We still
944 			 * send the signal, but the *info bits are lost.
945 			 */
946 			trace_signal_lose_info(sig, group, info);
947 		}
948 	}
949 
950 out_set:
951 	signalfd_notify(t, sig);
952 	sigaddset(&pending->signal, sig);
953 	complete_signal(sig, t, group);
954 	return 0;
955 }
956 
957 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
958 			int group)
959 {
960 	int from_ancestor_ns = 0;
961 
962 #ifdef CONFIG_PID_NS
963 	from_ancestor_ns = si_fromuser(info) &&
964 			   !task_pid_nr_ns(current, task_active_pid_ns(t));
965 #endif
966 
967 	return __send_signal(sig, info, t, group, from_ancestor_ns);
968 }
969 
970 static void print_fatal_signal(struct pt_regs *regs, int signr)
971 {
972 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
973 		current->comm, task_pid_nr(current), signr);
974 
975 #if defined(__i386__) && !defined(__arch_um__)
976 	printk("code at %08lx: ", regs->ip);
977 	{
978 		int i;
979 		for (i = 0; i < 16; i++) {
980 			unsigned char insn;
981 
982 			__get_user(insn, (unsigned char *)(regs->ip + i));
983 			printk("%02x ", insn);
984 		}
985 	}
986 #endif
987 	printk("\n");
988 	preempt_disable();
989 	show_regs(regs);
990 	preempt_enable();
991 }
992 
993 static int __init setup_print_fatal_signals(char *str)
994 {
995 	get_option (&str, &print_fatal_signals);
996 
997 	return 1;
998 }
999 
1000 __setup("print-fatal-signals=", setup_print_fatal_signals);
1001 
1002 int
1003 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1004 {
1005 	return send_signal(sig, info, p, 1);
1006 }
1007 
1008 static int
1009 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1010 {
1011 	return send_signal(sig, info, t, 0);
1012 }
1013 
1014 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1015 			bool group)
1016 {
1017 	unsigned long flags;
1018 	int ret = -ESRCH;
1019 
1020 	if (lock_task_sighand(p, &flags)) {
1021 		ret = send_signal(sig, info, p, group);
1022 		unlock_task_sighand(p, &flags);
1023 	}
1024 
1025 	return ret;
1026 }
1027 
1028 /*
1029  * Force a signal that the process can't ignore: if necessary
1030  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1031  *
1032  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1033  * since we do not want to have a signal handler that was blocked
1034  * be invoked when user space had explicitly blocked it.
1035  *
1036  * We don't want to have recursive SIGSEGV's etc, for example,
1037  * that is why we also clear SIGNAL_UNKILLABLE.
1038  */
1039 int
1040 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1041 {
1042 	unsigned long int flags;
1043 	int ret, blocked, ignored;
1044 	struct k_sigaction *action;
1045 
1046 	spin_lock_irqsave(&t->sighand->siglock, flags);
1047 	action = &t->sighand->action[sig-1];
1048 	ignored = action->sa.sa_handler == SIG_IGN;
1049 	blocked = sigismember(&t->blocked, sig);
1050 	if (blocked || ignored) {
1051 		action->sa.sa_handler = SIG_DFL;
1052 		if (blocked) {
1053 			sigdelset(&t->blocked, sig);
1054 			recalc_sigpending_and_wake(t);
1055 		}
1056 	}
1057 	if (action->sa.sa_handler == SIG_DFL)
1058 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1059 	ret = specific_send_sig_info(sig, info, t);
1060 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1061 
1062 	return ret;
1063 }
1064 
1065 /*
1066  * Nuke all other threads in the group.
1067  */
1068 void zap_other_threads(struct task_struct *p)
1069 {
1070 	struct task_struct *t;
1071 
1072 	p->signal->group_stop_count = 0;
1073 
1074 	for (t = next_thread(p); t != p; t = next_thread(t)) {
1075 		/*
1076 		 * Don't bother with already dead threads
1077 		 */
1078 		if (t->exit_state)
1079 			continue;
1080 
1081 		/* SIGKILL will be handled before any pending SIGSTOP */
1082 		sigaddset(&t->pending.signal, SIGKILL);
1083 		signal_wake_up(t, 1);
1084 	}
1085 }
1086 
1087 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1088 {
1089 	struct sighand_struct *sighand;
1090 
1091 	rcu_read_lock();
1092 	for (;;) {
1093 		sighand = rcu_dereference(tsk->sighand);
1094 		if (unlikely(sighand == NULL))
1095 			break;
1096 
1097 		spin_lock_irqsave(&sighand->siglock, *flags);
1098 		if (likely(sighand == tsk->sighand))
1099 			break;
1100 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1101 	}
1102 	rcu_read_unlock();
1103 
1104 	return sighand;
1105 }
1106 
1107 /*
1108  * send signal info to all the members of a group
1109  * - the caller must hold the RCU read lock at least
1110  */
1111 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1112 {
1113 	int ret = check_kill_permission(sig, info, p);
1114 
1115 	if (!ret && sig)
1116 		ret = do_send_sig_info(sig, info, p, true);
1117 
1118 	return ret;
1119 }
1120 
1121 /*
1122  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1123  * control characters do (^C, ^Z etc)
1124  * - the caller must hold at least a readlock on tasklist_lock
1125  */
1126 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1127 {
1128 	struct task_struct *p = NULL;
1129 	int retval, success;
1130 
1131 	success = 0;
1132 	retval = -ESRCH;
1133 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1134 		int err = group_send_sig_info(sig, info, p);
1135 		success |= !err;
1136 		retval = err;
1137 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1138 	return success ? 0 : retval;
1139 }
1140 
1141 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1142 {
1143 	int error = -ESRCH;
1144 	struct task_struct *p;
1145 
1146 	rcu_read_lock();
1147 retry:
1148 	p = pid_task(pid, PIDTYPE_PID);
1149 	if (p) {
1150 		error = group_send_sig_info(sig, info, p);
1151 		if (unlikely(error == -ESRCH))
1152 			/*
1153 			 * The task was unhashed in between, try again.
1154 			 * If it is dead, pid_task() will return NULL,
1155 			 * if we race with de_thread() it will find the
1156 			 * new leader.
1157 			 */
1158 			goto retry;
1159 	}
1160 	rcu_read_unlock();
1161 
1162 	return error;
1163 }
1164 
1165 int
1166 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1167 {
1168 	int error;
1169 	rcu_read_lock();
1170 	error = kill_pid_info(sig, info, find_vpid(pid));
1171 	rcu_read_unlock();
1172 	return error;
1173 }
1174 
1175 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1176 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1177 		      uid_t uid, uid_t euid, u32 secid)
1178 {
1179 	int ret = -EINVAL;
1180 	struct task_struct *p;
1181 	const struct cred *pcred;
1182 	unsigned long flags;
1183 
1184 	if (!valid_signal(sig))
1185 		return ret;
1186 
1187 	rcu_read_lock();
1188 	p = pid_task(pid, PIDTYPE_PID);
1189 	if (!p) {
1190 		ret = -ESRCH;
1191 		goto out_unlock;
1192 	}
1193 	pcred = __task_cred(p);
1194 	if (si_fromuser(info) &&
1195 	    euid != pcred->suid && euid != pcred->uid &&
1196 	    uid  != pcred->suid && uid  != pcred->uid) {
1197 		ret = -EPERM;
1198 		goto out_unlock;
1199 	}
1200 	ret = security_task_kill(p, info, sig, secid);
1201 	if (ret)
1202 		goto out_unlock;
1203 
1204 	if (sig) {
1205 		if (lock_task_sighand(p, &flags)) {
1206 			ret = __send_signal(sig, info, p, 1, 0);
1207 			unlock_task_sighand(p, &flags);
1208 		} else
1209 			ret = -ESRCH;
1210 	}
1211 out_unlock:
1212 	rcu_read_unlock();
1213 	return ret;
1214 }
1215 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1216 
1217 /*
1218  * kill_something_info() interprets pid in interesting ways just like kill(2).
1219  *
1220  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1221  * is probably wrong.  Should make it like BSD or SYSV.
1222  */
1223 
1224 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1225 {
1226 	int ret;
1227 
1228 	if (pid > 0) {
1229 		rcu_read_lock();
1230 		ret = kill_pid_info(sig, info, find_vpid(pid));
1231 		rcu_read_unlock();
1232 		return ret;
1233 	}
1234 
1235 	read_lock(&tasklist_lock);
1236 	if (pid != -1) {
1237 		ret = __kill_pgrp_info(sig, info,
1238 				pid ? find_vpid(-pid) : task_pgrp(current));
1239 	} else {
1240 		int retval = 0, count = 0;
1241 		struct task_struct * p;
1242 
1243 		for_each_process(p) {
1244 			if (task_pid_vnr(p) > 1 &&
1245 					!same_thread_group(p, current)) {
1246 				int err = group_send_sig_info(sig, info, p);
1247 				++count;
1248 				if (err != -EPERM)
1249 					retval = err;
1250 			}
1251 		}
1252 		ret = count ? retval : -ESRCH;
1253 	}
1254 	read_unlock(&tasklist_lock);
1255 
1256 	return ret;
1257 }
1258 
1259 /*
1260  * These are for backward compatibility with the rest of the kernel source.
1261  */
1262 
1263 int
1264 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1265 {
1266 	/*
1267 	 * Make sure legacy kernel users don't send in bad values
1268 	 * (normal paths check this in check_kill_permission).
1269 	 */
1270 	if (!valid_signal(sig))
1271 		return -EINVAL;
1272 
1273 	return do_send_sig_info(sig, info, p, false);
1274 }
1275 
1276 #define __si_special(priv) \
1277 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1278 
1279 int
1280 send_sig(int sig, struct task_struct *p, int priv)
1281 {
1282 	return send_sig_info(sig, __si_special(priv), p);
1283 }
1284 
1285 void
1286 force_sig(int sig, struct task_struct *p)
1287 {
1288 	force_sig_info(sig, SEND_SIG_PRIV, p);
1289 }
1290 
1291 /*
1292  * When things go south during signal handling, we
1293  * will force a SIGSEGV. And if the signal that caused
1294  * the problem was already a SIGSEGV, we'll want to
1295  * make sure we don't even try to deliver the signal..
1296  */
1297 int
1298 force_sigsegv(int sig, struct task_struct *p)
1299 {
1300 	if (sig == SIGSEGV) {
1301 		unsigned long flags;
1302 		spin_lock_irqsave(&p->sighand->siglock, flags);
1303 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1304 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1305 	}
1306 	force_sig(SIGSEGV, p);
1307 	return 0;
1308 }
1309 
1310 int kill_pgrp(struct pid *pid, int sig, int priv)
1311 {
1312 	int ret;
1313 
1314 	read_lock(&tasklist_lock);
1315 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1316 	read_unlock(&tasklist_lock);
1317 
1318 	return ret;
1319 }
1320 EXPORT_SYMBOL(kill_pgrp);
1321 
1322 int kill_pid(struct pid *pid, int sig, int priv)
1323 {
1324 	return kill_pid_info(sig, __si_special(priv), pid);
1325 }
1326 EXPORT_SYMBOL(kill_pid);
1327 
1328 /*
1329  * These functions support sending signals using preallocated sigqueue
1330  * structures.  This is needed "because realtime applications cannot
1331  * afford to lose notifications of asynchronous events, like timer
1332  * expirations or I/O completions".  In the case of Posix Timers
1333  * we allocate the sigqueue structure from the timer_create.  If this
1334  * allocation fails we are able to report the failure to the application
1335  * with an EAGAIN error.
1336  */
1337 struct sigqueue *sigqueue_alloc(void)
1338 {
1339 	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1340 
1341 	if (q)
1342 		q->flags |= SIGQUEUE_PREALLOC;
1343 
1344 	return q;
1345 }
1346 
1347 void sigqueue_free(struct sigqueue *q)
1348 {
1349 	unsigned long flags;
1350 	spinlock_t *lock = &current->sighand->siglock;
1351 
1352 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1353 	/*
1354 	 * We must hold ->siglock while testing q->list
1355 	 * to serialize with collect_signal() or with
1356 	 * __exit_signal()->flush_sigqueue().
1357 	 */
1358 	spin_lock_irqsave(lock, flags);
1359 	q->flags &= ~SIGQUEUE_PREALLOC;
1360 	/*
1361 	 * If it is queued it will be freed when dequeued,
1362 	 * like the "regular" sigqueue.
1363 	 */
1364 	if (!list_empty(&q->list))
1365 		q = NULL;
1366 	spin_unlock_irqrestore(lock, flags);
1367 
1368 	if (q)
1369 		__sigqueue_free(q);
1370 }
1371 
1372 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1373 {
1374 	int sig = q->info.si_signo;
1375 	struct sigpending *pending;
1376 	unsigned long flags;
1377 	int ret;
1378 
1379 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1380 
1381 	ret = -1;
1382 	if (!likely(lock_task_sighand(t, &flags)))
1383 		goto ret;
1384 
1385 	ret = 1; /* the signal is ignored */
1386 	if (!prepare_signal(sig, t, 0))
1387 		goto out;
1388 
1389 	ret = 0;
1390 	if (unlikely(!list_empty(&q->list))) {
1391 		/*
1392 		 * If an SI_TIMER entry is already queue just increment
1393 		 * the overrun count.
1394 		 */
1395 		BUG_ON(q->info.si_code != SI_TIMER);
1396 		q->info.si_overrun++;
1397 		goto out;
1398 	}
1399 	q->info.si_overrun = 0;
1400 
1401 	signalfd_notify(t, sig);
1402 	pending = group ? &t->signal->shared_pending : &t->pending;
1403 	list_add_tail(&q->list, &pending->list);
1404 	sigaddset(&pending->signal, sig);
1405 	complete_signal(sig, t, group);
1406 out:
1407 	unlock_task_sighand(t, &flags);
1408 ret:
1409 	return ret;
1410 }
1411 
1412 /*
1413  * Let a parent know about the death of a child.
1414  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1415  *
1416  * Returns -1 if our parent ignored us and so we've switched to
1417  * self-reaping, or else @sig.
1418  */
1419 int do_notify_parent(struct task_struct *tsk, int sig)
1420 {
1421 	struct siginfo info;
1422 	unsigned long flags;
1423 	struct sighand_struct *psig;
1424 	int ret = sig;
1425 
1426 	BUG_ON(sig == -1);
1427 
1428  	/* do_notify_parent_cldstop should have been called instead.  */
1429  	BUG_ON(task_is_stopped_or_traced(tsk));
1430 
1431 	BUG_ON(!task_ptrace(tsk) &&
1432 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1433 
1434 	info.si_signo = sig;
1435 	info.si_errno = 0;
1436 	/*
1437 	 * we are under tasklist_lock here so our parent is tied to
1438 	 * us and cannot exit and release its namespace.
1439 	 *
1440 	 * the only it can is to switch its nsproxy with sys_unshare,
1441 	 * bu uncharing pid namespaces is not allowed, so we'll always
1442 	 * see relevant namespace
1443 	 *
1444 	 * write_lock() currently calls preempt_disable() which is the
1445 	 * same as rcu_read_lock(), but according to Oleg, this is not
1446 	 * correct to rely on this
1447 	 */
1448 	rcu_read_lock();
1449 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1450 	info.si_uid = __task_cred(tsk)->uid;
1451 	rcu_read_unlock();
1452 
1453 	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1454 				tsk->signal->utime));
1455 	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1456 				tsk->signal->stime));
1457 
1458 	info.si_status = tsk->exit_code & 0x7f;
1459 	if (tsk->exit_code & 0x80)
1460 		info.si_code = CLD_DUMPED;
1461 	else if (tsk->exit_code & 0x7f)
1462 		info.si_code = CLD_KILLED;
1463 	else {
1464 		info.si_code = CLD_EXITED;
1465 		info.si_status = tsk->exit_code >> 8;
1466 	}
1467 
1468 	psig = tsk->parent->sighand;
1469 	spin_lock_irqsave(&psig->siglock, flags);
1470 	if (!task_ptrace(tsk) && sig == SIGCHLD &&
1471 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1472 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1473 		/*
1474 		 * We are exiting and our parent doesn't care.  POSIX.1
1475 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1476 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1477 		 * automatically and not left for our parent's wait4 call.
1478 		 * Rather than having the parent do it as a magic kind of
1479 		 * signal handler, we just set this to tell do_exit that we
1480 		 * can be cleaned up without becoming a zombie.  Note that
1481 		 * we still call __wake_up_parent in this case, because a
1482 		 * blocked sys_wait4 might now return -ECHILD.
1483 		 *
1484 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1485 		 * is implementation-defined: we do (if you don't want
1486 		 * it, just use SIG_IGN instead).
1487 		 */
1488 		ret = tsk->exit_signal = -1;
1489 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1490 			sig = -1;
1491 	}
1492 	if (valid_signal(sig) && sig > 0)
1493 		__group_send_sig_info(sig, &info, tsk->parent);
1494 	__wake_up_parent(tsk, tsk->parent);
1495 	spin_unlock_irqrestore(&psig->siglock, flags);
1496 
1497 	return ret;
1498 }
1499 
1500 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1501 {
1502 	struct siginfo info;
1503 	unsigned long flags;
1504 	struct task_struct *parent;
1505 	struct sighand_struct *sighand;
1506 
1507 	if (task_ptrace(tsk))
1508 		parent = tsk->parent;
1509 	else {
1510 		tsk = tsk->group_leader;
1511 		parent = tsk->real_parent;
1512 	}
1513 
1514 	info.si_signo = SIGCHLD;
1515 	info.si_errno = 0;
1516 	/*
1517 	 * see comment in do_notify_parent() abot the following 3 lines
1518 	 */
1519 	rcu_read_lock();
1520 	info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1521 	info.si_uid = __task_cred(tsk)->uid;
1522 	rcu_read_unlock();
1523 
1524 	info.si_utime = cputime_to_clock_t(tsk->utime);
1525 	info.si_stime = cputime_to_clock_t(tsk->stime);
1526 
1527  	info.si_code = why;
1528  	switch (why) {
1529  	case CLD_CONTINUED:
1530  		info.si_status = SIGCONT;
1531  		break;
1532  	case CLD_STOPPED:
1533  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1534  		break;
1535  	case CLD_TRAPPED:
1536  		info.si_status = tsk->exit_code & 0x7f;
1537  		break;
1538  	default:
1539  		BUG();
1540  	}
1541 
1542 	sighand = parent->sighand;
1543 	spin_lock_irqsave(&sighand->siglock, flags);
1544 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1545 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1546 		__group_send_sig_info(SIGCHLD, &info, parent);
1547 	/*
1548 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1549 	 */
1550 	__wake_up_parent(tsk, parent);
1551 	spin_unlock_irqrestore(&sighand->siglock, flags);
1552 }
1553 
1554 static inline int may_ptrace_stop(void)
1555 {
1556 	if (!likely(task_ptrace(current)))
1557 		return 0;
1558 	/*
1559 	 * Are we in the middle of do_coredump?
1560 	 * If so and our tracer is also part of the coredump stopping
1561 	 * is a deadlock situation, and pointless because our tracer
1562 	 * is dead so don't allow us to stop.
1563 	 * If SIGKILL was already sent before the caller unlocked
1564 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1565 	 * is safe to enter schedule().
1566 	 */
1567 	if (unlikely(current->mm->core_state) &&
1568 	    unlikely(current->mm == current->parent->mm))
1569 		return 0;
1570 
1571 	return 1;
1572 }
1573 
1574 /*
1575  * Return nonzero if there is a SIGKILL that should be waking us up.
1576  * Called with the siglock held.
1577  */
1578 static int sigkill_pending(struct task_struct *tsk)
1579 {
1580 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1581 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1582 }
1583 
1584 /*
1585  * This must be called with current->sighand->siglock held.
1586  *
1587  * This should be the path for all ptrace stops.
1588  * We always set current->last_siginfo while stopped here.
1589  * That makes it a way to test a stopped process for
1590  * being ptrace-stopped vs being job-control-stopped.
1591  *
1592  * If we actually decide not to stop at all because the tracer
1593  * is gone, we keep current->exit_code unless clear_code.
1594  */
1595 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1596 {
1597 	if (arch_ptrace_stop_needed(exit_code, info)) {
1598 		/*
1599 		 * The arch code has something special to do before a
1600 		 * ptrace stop.  This is allowed to block, e.g. for faults
1601 		 * on user stack pages.  We can't keep the siglock while
1602 		 * calling arch_ptrace_stop, so we must release it now.
1603 		 * To preserve proper semantics, we must do this before
1604 		 * any signal bookkeeping like checking group_stop_count.
1605 		 * Meanwhile, a SIGKILL could come in before we retake the
1606 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1607 		 * So after regaining the lock, we must check for SIGKILL.
1608 		 */
1609 		spin_unlock_irq(&current->sighand->siglock);
1610 		arch_ptrace_stop(exit_code, info);
1611 		spin_lock_irq(&current->sighand->siglock);
1612 		if (sigkill_pending(current))
1613 			return;
1614 	}
1615 
1616 	/*
1617 	 * If there is a group stop in progress,
1618 	 * we must participate in the bookkeeping.
1619 	 */
1620 	if (current->signal->group_stop_count > 0)
1621 		--current->signal->group_stop_count;
1622 
1623 	current->last_siginfo = info;
1624 	current->exit_code = exit_code;
1625 
1626 	/* Let the debugger run.  */
1627 	__set_current_state(TASK_TRACED);
1628 	spin_unlock_irq(&current->sighand->siglock);
1629 	read_lock(&tasklist_lock);
1630 	if (may_ptrace_stop()) {
1631 		do_notify_parent_cldstop(current, CLD_TRAPPED);
1632 		/*
1633 		 * Don't want to allow preemption here, because
1634 		 * sys_ptrace() needs this task to be inactive.
1635 		 *
1636 		 * XXX: implement read_unlock_no_resched().
1637 		 */
1638 		preempt_disable();
1639 		read_unlock(&tasklist_lock);
1640 		preempt_enable_no_resched();
1641 		schedule();
1642 	} else {
1643 		/*
1644 		 * By the time we got the lock, our tracer went away.
1645 		 * Don't drop the lock yet, another tracer may come.
1646 		 */
1647 		__set_current_state(TASK_RUNNING);
1648 		if (clear_code)
1649 			current->exit_code = 0;
1650 		read_unlock(&tasklist_lock);
1651 	}
1652 
1653 	/*
1654 	 * While in TASK_TRACED, we were considered "frozen enough".
1655 	 * Now that we woke up, it's crucial if we're supposed to be
1656 	 * frozen that we freeze now before running anything substantial.
1657 	 */
1658 	try_to_freeze();
1659 
1660 	/*
1661 	 * We are back.  Now reacquire the siglock before touching
1662 	 * last_siginfo, so that we are sure to have synchronized with
1663 	 * any signal-sending on another CPU that wants to examine it.
1664 	 */
1665 	spin_lock_irq(&current->sighand->siglock);
1666 	current->last_siginfo = NULL;
1667 
1668 	/*
1669 	 * Queued signals ignored us while we were stopped for tracing.
1670 	 * So check for any that we should take before resuming user mode.
1671 	 * This sets TIF_SIGPENDING, but never clears it.
1672 	 */
1673 	recalc_sigpending_tsk(current);
1674 }
1675 
1676 void ptrace_notify(int exit_code)
1677 {
1678 	siginfo_t info;
1679 
1680 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1681 
1682 	memset(&info, 0, sizeof info);
1683 	info.si_signo = SIGTRAP;
1684 	info.si_code = exit_code;
1685 	info.si_pid = task_pid_vnr(current);
1686 	info.si_uid = current_uid();
1687 
1688 	/* Let the debugger run.  */
1689 	spin_lock_irq(&current->sighand->siglock);
1690 	ptrace_stop(exit_code, 1, &info);
1691 	spin_unlock_irq(&current->sighand->siglock);
1692 }
1693 
1694 /*
1695  * This performs the stopping for SIGSTOP and other stop signals.
1696  * We have to stop all threads in the thread group.
1697  * Returns nonzero if we've actually stopped and released the siglock.
1698  * Returns zero if we didn't stop and still hold the siglock.
1699  */
1700 static int do_signal_stop(int signr)
1701 {
1702 	struct signal_struct *sig = current->signal;
1703 	int notify;
1704 
1705 	if (!sig->group_stop_count) {
1706 		struct task_struct *t;
1707 
1708 		if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1709 		    unlikely(signal_group_exit(sig)))
1710 			return 0;
1711 		/*
1712 		 * There is no group stop already in progress.
1713 		 * We must initiate one now.
1714 		 */
1715 		sig->group_exit_code = signr;
1716 
1717 		sig->group_stop_count = 1;
1718 		for (t = next_thread(current); t != current; t = next_thread(t))
1719 			/*
1720 			 * Setting state to TASK_STOPPED for a group
1721 			 * stop is always done with the siglock held,
1722 			 * so this check has no races.
1723 			 */
1724 			if (!(t->flags & PF_EXITING) &&
1725 			    !task_is_stopped_or_traced(t)) {
1726 				sig->group_stop_count++;
1727 				signal_wake_up(t, 0);
1728 			}
1729 	}
1730 	/*
1731 	 * If there are no other threads in the group, or if there is
1732 	 * a group stop in progress and we are the last to stop, report
1733 	 * to the parent.  When ptraced, every thread reports itself.
1734 	 */
1735 	notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1736 	notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1737 	/*
1738 	 * tracehook_notify_jctl() can drop and reacquire siglock, so
1739 	 * we keep ->group_stop_count != 0 before the call. If SIGCONT
1740 	 * or SIGKILL comes in between ->group_stop_count == 0.
1741 	 */
1742 	if (sig->group_stop_count) {
1743 		if (!--sig->group_stop_count)
1744 			sig->flags = SIGNAL_STOP_STOPPED;
1745 		current->exit_code = sig->group_exit_code;
1746 		__set_current_state(TASK_STOPPED);
1747 	}
1748 	spin_unlock_irq(&current->sighand->siglock);
1749 
1750 	if (notify) {
1751 		read_lock(&tasklist_lock);
1752 		do_notify_parent_cldstop(current, notify);
1753 		read_unlock(&tasklist_lock);
1754 	}
1755 
1756 	/* Now we don't run again until woken by SIGCONT or SIGKILL */
1757 	do {
1758 		schedule();
1759 	} while (try_to_freeze());
1760 
1761 	tracehook_finish_jctl();
1762 	current->exit_code = 0;
1763 
1764 	return 1;
1765 }
1766 
1767 static int ptrace_signal(int signr, siginfo_t *info,
1768 			 struct pt_regs *regs, void *cookie)
1769 {
1770 	if (!task_ptrace(current))
1771 		return signr;
1772 
1773 	ptrace_signal_deliver(regs, cookie);
1774 
1775 	/* Let the debugger run.  */
1776 	ptrace_stop(signr, 0, info);
1777 
1778 	/* We're back.  Did the debugger cancel the sig?  */
1779 	signr = current->exit_code;
1780 	if (signr == 0)
1781 		return signr;
1782 
1783 	current->exit_code = 0;
1784 
1785 	/* Update the siginfo structure if the signal has
1786 	   changed.  If the debugger wanted something
1787 	   specific in the siginfo structure then it should
1788 	   have updated *info via PTRACE_SETSIGINFO.  */
1789 	if (signr != info->si_signo) {
1790 		info->si_signo = signr;
1791 		info->si_errno = 0;
1792 		info->si_code = SI_USER;
1793 		info->si_pid = task_pid_vnr(current->parent);
1794 		info->si_uid = task_uid(current->parent);
1795 	}
1796 
1797 	/* If the (new) signal is now blocked, requeue it.  */
1798 	if (sigismember(&current->blocked, signr)) {
1799 		specific_send_sig_info(signr, info, current);
1800 		signr = 0;
1801 	}
1802 
1803 	return signr;
1804 }
1805 
1806 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1807 			  struct pt_regs *regs, void *cookie)
1808 {
1809 	struct sighand_struct *sighand = current->sighand;
1810 	struct signal_struct *signal = current->signal;
1811 	int signr;
1812 
1813 relock:
1814 	/*
1815 	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1816 	 * While in TASK_STOPPED, we were considered "frozen enough".
1817 	 * Now that we woke up, it's crucial if we're supposed to be
1818 	 * frozen that we freeze now before running anything substantial.
1819 	 */
1820 	try_to_freeze();
1821 
1822 	spin_lock_irq(&sighand->siglock);
1823 	/*
1824 	 * Every stopped thread goes here after wakeup. Check to see if
1825 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
1826 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1827 	 */
1828 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1829 		int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1830 				? CLD_CONTINUED : CLD_STOPPED;
1831 		signal->flags &= ~SIGNAL_CLD_MASK;
1832 
1833 		why = tracehook_notify_jctl(why, CLD_CONTINUED);
1834 		spin_unlock_irq(&sighand->siglock);
1835 
1836 		if (why) {
1837 			read_lock(&tasklist_lock);
1838 			do_notify_parent_cldstop(current->group_leader, why);
1839 			read_unlock(&tasklist_lock);
1840 		}
1841 		goto relock;
1842 	}
1843 
1844 	for (;;) {
1845 		struct k_sigaction *ka;
1846 		/*
1847 		 * Tracing can induce an artifical signal and choose sigaction.
1848 		 * The return value in @signr determines the default action,
1849 		 * but @info->si_signo is the signal number we will report.
1850 		 */
1851 		signr = tracehook_get_signal(current, regs, info, return_ka);
1852 		if (unlikely(signr < 0))
1853 			goto relock;
1854 		if (unlikely(signr != 0))
1855 			ka = return_ka;
1856 		else {
1857 			if (unlikely(signal->group_stop_count > 0) &&
1858 			    do_signal_stop(0))
1859 				goto relock;
1860 
1861 			signr = dequeue_signal(current, &current->blocked,
1862 					       info);
1863 
1864 			if (!signr)
1865 				break; /* will return 0 */
1866 
1867 			if (signr != SIGKILL) {
1868 				signr = ptrace_signal(signr, info,
1869 						      regs, cookie);
1870 				if (!signr)
1871 					continue;
1872 			}
1873 
1874 			ka = &sighand->action[signr-1];
1875 		}
1876 
1877 		/* Trace actually delivered signals. */
1878 		trace_signal_deliver(signr, info, ka);
1879 
1880 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1881 			continue;
1882 		if (ka->sa.sa_handler != SIG_DFL) {
1883 			/* Run the handler.  */
1884 			*return_ka = *ka;
1885 
1886 			if (ka->sa.sa_flags & SA_ONESHOT)
1887 				ka->sa.sa_handler = SIG_DFL;
1888 
1889 			break; /* will return non-zero "signr" value */
1890 		}
1891 
1892 		/*
1893 		 * Now we are doing the default action for this signal.
1894 		 */
1895 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
1896 			continue;
1897 
1898 		/*
1899 		 * Global init gets no signals it doesn't want.
1900 		 * Container-init gets no signals it doesn't want from same
1901 		 * container.
1902 		 *
1903 		 * Note that if global/container-init sees a sig_kernel_only()
1904 		 * signal here, the signal must have been generated internally
1905 		 * or must have come from an ancestor namespace. In either
1906 		 * case, the signal cannot be dropped.
1907 		 */
1908 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1909 				!sig_kernel_only(signr))
1910 			continue;
1911 
1912 		if (sig_kernel_stop(signr)) {
1913 			/*
1914 			 * The default action is to stop all threads in
1915 			 * the thread group.  The job control signals
1916 			 * do nothing in an orphaned pgrp, but SIGSTOP
1917 			 * always works.  Note that siglock needs to be
1918 			 * dropped during the call to is_orphaned_pgrp()
1919 			 * because of lock ordering with tasklist_lock.
1920 			 * This allows an intervening SIGCONT to be posted.
1921 			 * We need to check for that and bail out if necessary.
1922 			 */
1923 			if (signr != SIGSTOP) {
1924 				spin_unlock_irq(&sighand->siglock);
1925 
1926 				/* signals can be posted during this window */
1927 
1928 				if (is_current_pgrp_orphaned())
1929 					goto relock;
1930 
1931 				spin_lock_irq(&sighand->siglock);
1932 			}
1933 
1934 			if (likely(do_signal_stop(info->si_signo))) {
1935 				/* It released the siglock.  */
1936 				goto relock;
1937 			}
1938 
1939 			/*
1940 			 * We didn't actually stop, due to a race
1941 			 * with SIGCONT or something like that.
1942 			 */
1943 			continue;
1944 		}
1945 
1946 		spin_unlock_irq(&sighand->siglock);
1947 
1948 		/*
1949 		 * Anything else is fatal, maybe with a core dump.
1950 		 */
1951 		current->flags |= PF_SIGNALED;
1952 
1953 		if (sig_kernel_coredump(signr)) {
1954 			if (print_fatal_signals)
1955 				print_fatal_signal(regs, info->si_signo);
1956 			/*
1957 			 * If it was able to dump core, this kills all
1958 			 * other threads in the group and synchronizes with
1959 			 * their demise.  If we lost the race with another
1960 			 * thread getting here, it set group_exit_code
1961 			 * first and our do_group_exit call below will use
1962 			 * that value and ignore the one we pass it.
1963 			 */
1964 			do_coredump(info->si_signo, info->si_signo, regs);
1965 		}
1966 
1967 		/*
1968 		 * Death signals, no core dump.
1969 		 */
1970 		do_group_exit(info->si_signo);
1971 		/* NOTREACHED */
1972 	}
1973 	spin_unlock_irq(&sighand->siglock);
1974 	return signr;
1975 }
1976 
1977 void exit_signals(struct task_struct *tsk)
1978 {
1979 	int group_stop = 0;
1980 	struct task_struct *t;
1981 
1982 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1983 		tsk->flags |= PF_EXITING;
1984 		return;
1985 	}
1986 
1987 	spin_lock_irq(&tsk->sighand->siglock);
1988 	/*
1989 	 * From now this task is not visible for group-wide signals,
1990 	 * see wants_signal(), do_signal_stop().
1991 	 */
1992 	tsk->flags |= PF_EXITING;
1993 	if (!signal_pending(tsk))
1994 		goto out;
1995 
1996 	/* It could be that __group_complete_signal() choose us to
1997 	 * notify about group-wide signal. Another thread should be
1998 	 * woken now to take the signal since we will not.
1999 	 */
2000 	for (t = tsk; (t = next_thread(t)) != tsk; )
2001 		if (!signal_pending(t) && !(t->flags & PF_EXITING))
2002 			recalc_sigpending_and_wake(t);
2003 
2004 	if (unlikely(tsk->signal->group_stop_count) &&
2005 			!--tsk->signal->group_stop_count) {
2006 		tsk->signal->flags = SIGNAL_STOP_STOPPED;
2007 		group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
2008 	}
2009 out:
2010 	spin_unlock_irq(&tsk->sighand->siglock);
2011 
2012 	if (unlikely(group_stop)) {
2013 		read_lock(&tasklist_lock);
2014 		do_notify_parent_cldstop(tsk, group_stop);
2015 		read_unlock(&tasklist_lock);
2016 	}
2017 }
2018 
2019 EXPORT_SYMBOL(recalc_sigpending);
2020 EXPORT_SYMBOL_GPL(dequeue_signal);
2021 EXPORT_SYMBOL(flush_signals);
2022 EXPORT_SYMBOL(force_sig);
2023 EXPORT_SYMBOL(send_sig);
2024 EXPORT_SYMBOL(send_sig_info);
2025 EXPORT_SYMBOL(sigprocmask);
2026 EXPORT_SYMBOL(block_all_signals);
2027 EXPORT_SYMBOL(unblock_all_signals);
2028 
2029 
2030 /*
2031  * System call entry points.
2032  */
2033 
2034 SYSCALL_DEFINE0(restart_syscall)
2035 {
2036 	struct restart_block *restart = &current_thread_info()->restart_block;
2037 	return restart->fn(restart);
2038 }
2039 
2040 long do_no_restart_syscall(struct restart_block *param)
2041 {
2042 	return -EINTR;
2043 }
2044 
2045 /*
2046  * We don't need to get the kernel lock - this is all local to this
2047  * particular thread.. (and that's good, because this is _heavily_
2048  * used by various programs)
2049  */
2050 
2051 /*
2052  * This is also useful for kernel threads that want to temporarily
2053  * (or permanently) block certain signals.
2054  *
2055  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2056  * interface happily blocks "unblockable" signals like SIGKILL
2057  * and friends.
2058  */
2059 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2060 {
2061 	int error;
2062 
2063 	spin_lock_irq(&current->sighand->siglock);
2064 	if (oldset)
2065 		*oldset = current->blocked;
2066 
2067 	error = 0;
2068 	switch (how) {
2069 	case SIG_BLOCK:
2070 		sigorsets(&current->blocked, &current->blocked, set);
2071 		break;
2072 	case SIG_UNBLOCK:
2073 		signandsets(&current->blocked, &current->blocked, set);
2074 		break;
2075 	case SIG_SETMASK:
2076 		current->blocked = *set;
2077 		break;
2078 	default:
2079 		error = -EINVAL;
2080 	}
2081 	recalc_sigpending();
2082 	spin_unlock_irq(&current->sighand->siglock);
2083 
2084 	return error;
2085 }
2086 
2087 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2088 		sigset_t __user *, oset, size_t, sigsetsize)
2089 {
2090 	int error = -EINVAL;
2091 	sigset_t old_set, new_set;
2092 
2093 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2094 	if (sigsetsize != sizeof(sigset_t))
2095 		goto out;
2096 
2097 	if (set) {
2098 		error = -EFAULT;
2099 		if (copy_from_user(&new_set, set, sizeof(*set)))
2100 			goto out;
2101 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2102 
2103 		error = sigprocmask(how, &new_set, &old_set);
2104 		if (error)
2105 			goto out;
2106 		if (oset)
2107 			goto set_old;
2108 	} else if (oset) {
2109 		spin_lock_irq(&current->sighand->siglock);
2110 		old_set = current->blocked;
2111 		spin_unlock_irq(&current->sighand->siglock);
2112 
2113 	set_old:
2114 		error = -EFAULT;
2115 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2116 			goto out;
2117 	}
2118 	error = 0;
2119 out:
2120 	return error;
2121 }
2122 
2123 long do_sigpending(void __user *set, unsigned long sigsetsize)
2124 {
2125 	long error = -EINVAL;
2126 	sigset_t pending;
2127 
2128 	if (sigsetsize > sizeof(sigset_t))
2129 		goto out;
2130 
2131 	spin_lock_irq(&current->sighand->siglock);
2132 	sigorsets(&pending, &current->pending.signal,
2133 		  &current->signal->shared_pending.signal);
2134 	spin_unlock_irq(&current->sighand->siglock);
2135 
2136 	/* Outside the lock because only this thread touches it.  */
2137 	sigandsets(&pending, &current->blocked, &pending);
2138 
2139 	error = -EFAULT;
2140 	if (!copy_to_user(set, &pending, sigsetsize))
2141 		error = 0;
2142 
2143 out:
2144 	return error;
2145 }
2146 
2147 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2148 {
2149 	return do_sigpending(set, sigsetsize);
2150 }
2151 
2152 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2153 
2154 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2155 {
2156 	int err;
2157 
2158 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2159 		return -EFAULT;
2160 	if (from->si_code < 0)
2161 		return __copy_to_user(to, from, sizeof(siginfo_t))
2162 			? -EFAULT : 0;
2163 	/*
2164 	 * If you change siginfo_t structure, please be sure
2165 	 * this code is fixed accordingly.
2166 	 * Please remember to update the signalfd_copyinfo() function
2167 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2168 	 * It should never copy any pad contained in the structure
2169 	 * to avoid security leaks, but must copy the generic
2170 	 * 3 ints plus the relevant union member.
2171 	 */
2172 	err = __put_user(from->si_signo, &to->si_signo);
2173 	err |= __put_user(from->si_errno, &to->si_errno);
2174 	err |= __put_user((short)from->si_code, &to->si_code);
2175 	switch (from->si_code & __SI_MASK) {
2176 	case __SI_KILL:
2177 		err |= __put_user(from->si_pid, &to->si_pid);
2178 		err |= __put_user(from->si_uid, &to->si_uid);
2179 		break;
2180 	case __SI_TIMER:
2181 		 err |= __put_user(from->si_tid, &to->si_tid);
2182 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2183 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2184 		break;
2185 	case __SI_POLL:
2186 		err |= __put_user(from->si_band, &to->si_band);
2187 		err |= __put_user(from->si_fd, &to->si_fd);
2188 		break;
2189 	case __SI_FAULT:
2190 		err |= __put_user(from->si_addr, &to->si_addr);
2191 #ifdef __ARCH_SI_TRAPNO
2192 		err |= __put_user(from->si_trapno, &to->si_trapno);
2193 #endif
2194 		break;
2195 	case __SI_CHLD:
2196 		err |= __put_user(from->si_pid, &to->si_pid);
2197 		err |= __put_user(from->si_uid, &to->si_uid);
2198 		err |= __put_user(from->si_status, &to->si_status);
2199 		err |= __put_user(from->si_utime, &to->si_utime);
2200 		err |= __put_user(from->si_stime, &to->si_stime);
2201 		break;
2202 	case __SI_RT: /* This is not generated by the kernel as of now. */
2203 	case __SI_MESGQ: /* But this is */
2204 		err |= __put_user(from->si_pid, &to->si_pid);
2205 		err |= __put_user(from->si_uid, &to->si_uid);
2206 		err |= __put_user(from->si_ptr, &to->si_ptr);
2207 		break;
2208 	default: /* this is just in case for now ... */
2209 		err |= __put_user(from->si_pid, &to->si_pid);
2210 		err |= __put_user(from->si_uid, &to->si_uid);
2211 		break;
2212 	}
2213 	return err;
2214 }
2215 
2216 #endif
2217 
2218 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2219 		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2220 		size_t, sigsetsize)
2221 {
2222 	int ret, sig;
2223 	sigset_t these;
2224 	struct timespec ts;
2225 	siginfo_t info;
2226 	long timeout = 0;
2227 
2228 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2229 	if (sigsetsize != sizeof(sigset_t))
2230 		return -EINVAL;
2231 
2232 	if (copy_from_user(&these, uthese, sizeof(these)))
2233 		return -EFAULT;
2234 
2235 	/*
2236 	 * Invert the set of allowed signals to get those we
2237 	 * want to block.
2238 	 */
2239 	sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2240 	signotset(&these);
2241 
2242 	if (uts) {
2243 		if (copy_from_user(&ts, uts, sizeof(ts)))
2244 			return -EFAULT;
2245 		if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2246 		    || ts.tv_sec < 0)
2247 			return -EINVAL;
2248 	}
2249 
2250 	spin_lock_irq(&current->sighand->siglock);
2251 	sig = dequeue_signal(current, &these, &info);
2252 	if (!sig) {
2253 		timeout = MAX_SCHEDULE_TIMEOUT;
2254 		if (uts)
2255 			timeout = (timespec_to_jiffies(&ts)
2256 				   + (ts.tv_sec || ts.tv_nsec));
2257 
2258 		if (timeout) {
2259 			/* None ready -- temporarily unblock those we're
2260 			 * interested while we are sleeping in so that we'll
2261 			 * be awakened when they arrive.  */
2262 			current->real_blocked = current->blocked;
2263 			sigandsets(&current->blocked, &current->blocked, &these);
2264 			recalc_sigpending();
2265 			spin_unlock_irq(&current->sighand->siglock);
2266 
2267 			timeout = schedule_timeout_interruptible(timeout);
2268 
2269 			spin_lock_irq(&current->sighand->siglock);
2270 			sig = dequeue_signal(current, &these, &info);
2271 			current->blocked = current->real_blocked;
2272 			siginitset(&current->real_blocked, 0);
2273 			recalc_sigpending();
2274 		}
2275 	}
2276 	spin_unlock_irq(&current->sighand->siglock);
2277 
2278 	if (sig) {
2279 		ret = sig;
2280 		if (uinfo) {
2281 			if (copy_siginfo_to_user(uinfo, &info))
2282 				ret = -EFAULT;
2283 		}
2284 	} else {
2285 		ret = -EAGAIN;
2286 		if (timeout)
2287 			ret = -EINTR;
2288 	}
2289 
2290 	return ret;
2291 }
2292 
2293 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2294 {
2295 	struct siginfo info;
2296 
2297 	info.si_signo = sig;
2298 	info.si_errno = 0;
2299 	info.si_code = SI_USER;
2300 	info.si_pid = task_tgid_vnr(current);
2301 	info.si_uid = current_uid();
2302 
2303 	return kill_something_info(sig, &info, pid);
2304 }
2305 
2306 static int
2307 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2308 {
2309 	struct task_struct *p;
2310 	int error = -ESRCH;
2311 
2312 	rcu_read_lock();
2313 	p = find_task_by_vpid(pid);
2314 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2315 		error = check_kill_permission(sig, info, p);
2316 		/*
2317 		 * The null signal is a permissions and process existence
2318 		 * probe.  No signal is actually delivered.
2319 		 */
2320 		if (!error && sig) {
2321 			error = do_send_sig_info(sig, info, p, false);
2322 			/*
2323 			 * If lock_task_sighand() failed we pretend the task
2324 			 * dies after receiving the signal. The window is tiny,
2325 			 * and the signal is private anyway.
2326 			 */
2327 			if (unlikely(error == -ESRCH))
2328 				error = 0;
2329 		}
2330 	}
2331 	rcu_read_unlock();
2332 
2333 	return error;
2334 }
2335 
2336 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2337 {
2338 	struct siginfo info;
2339 
2340 	info.si_signo = sig;
2341 	info.si_errno = 0;
2342 	info.si_code = SI_TKILL;
2343 	info.si_pid = task_tgid_vnr(current);
2344 	info.si_uid = current_uid();
2345 
2346 	return do_send_specific(tgid, pid, sig, &info);
2347 }
2348 
2349 /**
2350  *  sys_tgkill - send signal to one specific thread
2351  *  @tgid: the thread group ID of the thread
2352  *  @pid: the PID of the thread
2353  *  @sig: signal to be sent
2354  *
2355  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2356  *  exists but it's not belonging to the target process anymore. This
2357  *  method solves the problem of threads exiting and PIDs getting reused.
2358  */
2359 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2360 {
2361 	/* This is only valid for single tasks */
2362 	if (pid <= 0 || tgid <= 0)
2363 		return -EINVAL;
2364 
2365 	return do_tkill(tgid, pid, sig);
2366 }
2367 
2368 /*
2369  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2370  */
2371 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2372 {
2373 	/* This is only valid for single tasks */
2374 	if (pid <= 0)
2375 		return -EINVAL;
2376 
2377 	return do_tkill(0, pid, sig);
2378 }
2379 
2380 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2381 		siginfo_t __user *, uinfo)
2382 {
2383 	siginfo_t info;
2384 
2385 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2386 		return -EFAULT;
2387 
2388 	/* Not even root can pretend to send signals from the kernel.
2389 	   Nor can they impersonate a kill(), which adds source info.  */
2390 	if (info.si_code >= 0)
2391 		return -EPERM;
2392 	info.si_signo = sig;
2393 
2394 	/* POSIX.1b doesn't mention process groups.  */
2395 	return kill_proc_info(sig, &info, pid);
2396 }
2397 
2398 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2399 {
2400 	/* This is only valid for single tasks */
2401 	if (pid <= 0 || tgid <= 0)
2402 		return -EINVAL;
2403 
2404 	/* Not even root can pretend to send signals from the kernel.
2405 	   Nor can they impersonate a kill(), which adds source info.  */
2406 	if (info->si_code >= 0)
2407 		return -EPERM;
2408 	info->si_signo = sig;
2409 
2410 	return do_send_specific(tgid, pid, sig, info);
2411 }
2412 
2413 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2414 		siginfo_t __user *, uinfo)
2415 {
2416 	siginfo_t info;
2417 
2418 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2419 		return -EFAULT;
2420 
2421 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2422 }
2423 
2424 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2425 {
2426 	struct task_struct *t = current;
2427 	struct k_sigaction *k;
2428 	sigset_t mask;
2429 
2430 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2431 		return -EINVAL;
2432 
2433 	k = &t->sighand->action[sig-1];
2434 
2435 	spin_lock_irq(&current->sighand->siglock);
2436 	if (oact)
2437 		*oact = *k;
2438 
2439 	if (act) {
2440 		sigdelsetmask(&act->sa.sa_mask,
2441 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2442 		*k = *act;
2443 		/*
2444 		 * POSIX 3.3.1.3:
2445 		 *  "Setting a signal action to SIG_IGN for a signal that is
2446 		 *   pending shall cause the pending signal to be discarded,
2447 		 *   whether or not it is blocked."
2448 		 *
2449 		 *  "Setting a signal action to SIG_DFL for a signal that is
2450 		 *   pending and whose default action is to ignore the signal
2451 		 *   (for example, SIGCHLD), shall cause the pending signal to
2452 		 *   be discarded, whether or not it is blocked"
2453 		 */
2454 		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2455 			sigemptyset(&mask);
2456 			sigaddset(&mask, sig);
2457 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2458 			do {
2459 				rm_from_queue_full(&mask, &t->pending);
2460 				t = next_thread(t);
2461 			} while (t != current);
2462 		}
2463 	}
2464 
2465 	spin_unlock_irq(&current->sighand->siglock);
2466 	return 0;
2467 }
2468 
2469 int
2470 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2471 {
2472 	stack_t oss;
2473 	int error;
2474 
2475 	oss.ss_sp = (void __user *) current->sas_ss_sp;
2476 	oss.ss_size = current->sas_ss_size;
2477 	oss.ss_flags = sas_ss_flags(sp);
2478 
2479 	if (uss) {
2480 		void __user *ss_sp;
2481 		size_t ss_size;
2482 		int ss_flags;
2483 
2484 		error = -EFAULT;
2485 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2486 			goto out;
2487 		error = __get_user(ss_sp, &uss->ss_sp) |
2488 			__get_user(ss_flags, &uss->ss_flags) |
2489 			__get_user(ss_size, &uss->ss_size);
2490 		if (error)
2491 			goto out;
2492 
2493 		error = -EPERM;
2494 		if (on_sig_stack(sp))
2495 			goto out;
2496 
2497 		error = -EINVAL;
2498 		/*
2499 		 *
2500 		 * Note - this code used to test ss_flags incorrectly
2501 		 *  	  old code may have been written using ss_flags==0
2502 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2503 		 *	  way that worked) - this fix preserves that older
2504 		 *	  mechanism
2505 		 */
2506 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2507 			goto out;
2508 
2509 		if (ss_flags == SS_DISABLE) {
2510 			ss_size = 0;
2511 			ss_sp = NULL;
2512 		} else {
2513 			error = -ENOMEM;
2514 			if (ss_size < MINSIGSTKSZ)
2515 				goto out;
2516 		}
2517 
2518 		current->sas_ss_sp = (unsigned long) ss_sp;
2519 		current->sas_ss_size = ss_size;
2520 	}
2521 
2522 	error = 0;
2523 	if (uoss) {
2524 		error = -EFAULT;
2525 		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2526 			goto out;
2527 		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2528 			__put_user(oss.ss_size, &uoss->ss_size) |
2529 			__put_user(oss.ss_flags, &uoss->ss_flags);
2530 	}
2531 
2532 out:
2533 	return error;
2534 }
2535 
2536 #ifdef __ARCH_WANT_SYS_SIGPENDING
2537 
2538 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2539 {
2540 	return do_sigpending(set, sizeof(*set));
2541 }
2542 
2543 #endif
2544 
2545 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2546 /* Some platforms have their own version with special arguments others
2547    support only sys_rt_sigprocmask.  */
2548 
2549 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2550 		old_sigset_t __user *, oset)
2551 {
2552 	int error;
2553 	old_sigset_t old_set, new_set;
2554 
2555 	if (set) {
2556 		error = -EFAULT;
2557 		if (copy_from_user(&new_set, set, sizeof(*set)))
2558 			goto out;
2559 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2560 
2561 		spin_lock_irq(&current->sighand->siglock);
2562 		old_set = current->blocked.sig[0];
2563 
2564 		error = 0;
2565 		switch (how) {
2566 		default:
2567 			error = -EINVAL;
2568 			break;
2569 		case SIG_BLOCK:
2570 			sigaddsetmask(&current->blocked, new_set);
2571 			break;
2572 		case SIG_UNBLOCK:
2573 			sigdelsetmask(&current->blocked, new_set);
2574 			break;
2575 		case SIG_SETMASK:
2576 			current->blocked.sig[0] = new_set;
2577 			break;
2578 		}
2579 
2580 		recalc_sigpending();
2581 		spin_unlock_irq(&current->sighand->siglock);
2582 		if (error)
2583 			goto out;
2584 		if (oset)
2585 			goto set_old;
2586 	} else if (oset) {
2587 		old_set = current->blocked.sig[0];
2588 	set_old:
2589 		error = -EFAULT;
2590 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
2591 			goto out;
2592 	}
2593 	error = 0;
2594 out:
2595 	return error;
2596 }
2597 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2598 
2599 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2600 SYSCALL_DEFINE4(rt_sigaction, int, sig,
2601 		const struct sigaction __user *, act,
2602 		struct sigaction __user *, oact,
2603 		size_t, sigsetsize)
2604 {
2605 	struct k_sigaction new_sa, old_sa;
2606 	int ret = -EINVAL;
2607 
2608 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2609 	if (sigsetsize != sizeof(sigset_t))
2610 		goto out;
2611 
2612 	if (act) {
2613 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2614 			return -EFAULT;
2615 	}
2616 
2617 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2618 
2619 	if (!ret && oact) {
2620 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2621 			return -EFAULT;
2622 	}
2623 out:
2624 	return ret;
2625 }
2626 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2627 
2628 #ifdef __ARCH_WANT_SYS_SGETMASK
2629 
2630 /*
2631  * For backwards compatibility.  Functionality superseded by sigprocmask.
2632  */
2633 SYSCALL_DEFINE0(sgetmask)
2634 {
2635 	/* SMP safe */
2636 	return current->blocked.sig[0];
2637 }
2638 
2639 SYSCALL_DEFINE1(ssetmask, int, newmask)
2640 {
2641 	int old;
2642 
2643 	spin_lock_irq(&current->sighand->siglock);
2644 	old = current->blocked.sig[0];
2645 
2646 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2647 						  sigmask(SIGSTOP)));
2648 	recalc_sigpending();
2649 	spin_unlock_irq(&current->sighand->siglock);
2650 
2651 	return old;
2652 }
2653 #endif /* __ARCH_WANT_SGETMASK */
2654 
2655 #ifdef __ARCH_WANT_SYS_SIGNAL
2656 /*
2657  * For backwards compatibility.  Functionality superseded by sigaction.
2658  */
2659 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2660 {
2661 	struct k_sigaction new_sa, old_sa;
2662 	int ret;
2663 
2664 	new_sa.sa.sa_handler = handler;
2665 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2666 	sigemptyset(&new_sa.sa.sa_mask);
2667 
2668 	ret = do_sigaction(sig, &new_sa, &old_sa);
2669 
2670 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2671 }
2672 #endif /* __ARCH_WANT_SYS_SIGNAL */
2673 
2674 #ifdef __ARCH_WANT_SYS_PAUSE
2675 
2676 SYSCALL_DEFINE0(pause)
2677 {
2678 	current->state = TASK_INTERRUPTIBLE;
2679 	schedule();
2680 	return -ERESTARTNOHAND;
2681 }
2682 
2683 #endif
2684 
2685 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2686 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2687 {
2688 	sigset_t newset;
2689 
2690 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2691 	if (sigsetsize != sizeof(sigset_t))
2692 		return -EINVAL;
2693 
2694 	if (copy_from_user(&newset, unewset, sizeof(newset)))
2695 		return -EFAULT;
2696 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2697 
2698 	spin_lock_irq(&current->sighand->siglock);
2699 	current->saved_sigmask = current->blocked;
2700 	current->blocked = newset;
2701 	recalc_sigpending();
2702 	spin_unlock_irq(&current->sighand->siglock);
2703 
2704 	current->state = TASK_INTERRUPTIBLE;
2705 	schedule();
2706 	set_restore_sigmask();
2707 	return -ERESTARTNOHAND;
2708 }
2709 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2710 
2711 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2712 {
2713 	return NULL;
2714 }
2715 
2716 void __init signals_init(void)
2717 {
2718 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2719 }
2720