xref: /openbmc/linux/kernel/signal.c (revision 81d67439)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
33 
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h"	/* audit_signal_info() */
39 
40 /*
41  * SLAB caches for signal bits.
42  */
43 
44 static struct kmem_cache *sigqueue_cachep;
45 
46 int print_fatal_signals __read_mostly;
47 
48 static void __user *sig_handler(struct task_struct *t, int sig)
49 {
50 	return t->sighand->action[sig - 1].sa.sa_handler;
51 }
52 
53 static int sig_handler_ignored(void __user *handler, int sig)
54 {
55 	/* Is it explicitly or implicitly ignored? */
56 	return handler == SIG_IGN ||
57 		(handler == SIG_DFL && sig_kernel_ignore(sig));
58 }
59 
60 static int sig_task_ignored(struct task_struct *t, int sig,
61 		int from_ancestor_ns)
62 {
63 	void __user *handler;
64 
65 	handler = sig_handler(t, sig);
66 
67 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68 			handler == SIG_DFL && !from_ancestor_ns)
69 		return 1;
70 
71 	return sig_handler_ignored(handler, sig);
72 }
73 
74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
75 {
76 	/*
77 	 * Blocked signals are never ignored, since the
78 	 * signal handler may change by the time it is
79 	 * unblocked.
80 	 */
81 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
82 		return 0;
83 
84 	if (!sig_task_ignored(t, sig, from_ancestor_ns))
85 		return 0;
86 
87 	/*
88 	 * Tracers may want to know about even ignored signals.
89 	 */
90 	return !t->ptrace;
91 }
92 
93 /*
94  * Re-calculate pending state from the set of locally pending
95  * signals, globally pending signals, and blocked signals.
96  */
97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98 {
99 	unsigned long ready;
100 	long i;
101 
102 	switch (_NSIG_WORDS) {
103 	default:
104 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 			ready |= signal->sig[i] &~ blocked->sig[i];
106 		break;
107 
108 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
109 		ready |= signal->sig[2] &~ blocked->sig[2];
110 		ready |= signal->sig[1] &~ blocked->sig[1];
111 		ready |= signal->sig[0] &~ blocked->sig[0];
112 		break;
113 
114 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
115 		ready |= signal->sig[0] &~ blocked->sig[0];
116 		break;
117 
118 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
119 	}
120 	return ready !=	0;
121 }
122 
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124 
125 static int recalc_sigpending_tsk(struct task_struct *t)
126 {
127 	if ((t->jobctl & JOBCTL_PENDING_MASK) ||
128 	    PENDING(&t->pending, &t->blocked) ||
129 	    PENDING(&t->signal->shared_pending, &t->blocked)) {
130 		set_tsk_thread_flag(t, TIF_SIGPENDING);
131 		return 1;
132 	}
133 	/*
134 	 * We must never clear the flag in another thread, or in current
135 	 * when it's possible the current syscall is returning -ERESTART*.
136 	 * So we don't clear it here, and only callers who know they should do.
137 	 */
138 	return 0;
139 }
140 
141 /*
142  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143  * This is superfluous when called on current, the wakeup is a harmless no-op.
144  */
145 void recalc_sigpending_and_wake(struct task_struct *t)
146 {
147 	if (recalc_sigpending_tsk(t))
148 		signal_wake_up(t, 0);
149 }
150 
151 void recalc_sigpending(void)
152 {
153 	if (!recalc_sigpending_tsk(current) && !freezing(current))
154 		clear_thread_flag(TIF_SIGPENDING);
155 
156 }
157 
158 /* Given the mask, find the first available signal that should be serviced. */
159 
160 #define SYNCHRONOUS_MASK \
161 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
162 	 sigmask(SIGTRAP) | sigmask(SIGFPE))
163 
164 int next_signal(struct sigpending *pending, sigset_t *mask)
165 {
166 	unsigned long i, *s, *m, x;
167 	int sig = 0;
168 
169 	s = pending->signal.sig;
170 	m = mask->sig;
171 
172 	/*
173 	 * Handle the first word specially: it contains the
174 	 * synchronous signals that need to be dequeued first.
175 	 */
176 	x = *s &~ *m;
177 	if (x) {
178 		if (x & SYNCHRONOUS_MASK)
179 			x &= SYNCHRONOUS_MASK;
180 		sig = ffz(~x) + 1;
181 		return sig;
182 	}
183 
184 	switch (_NSIG_WORDS) {
185 	default:
186 		for (i = 1; i < _NSIG_WORDS; ++i) {
187 			x = *++s &~ *++m;
188 			if (!x)
189 				continue;
190 			sig = ffz(~x) + i*_NSIG_BPW + 1;
191 			break;
192 		}
193 		break;
194 
195 	case 2:
196 		x = s[1] &~ m[1];
197 		if (!x)
198 			break;
199 		sig = ffz(~x) + _NSIG_BPW + 1;
200 		break;
201 
202 	case 1:
203 		/* Nothing to do */
204 		break;
205 	}
206 
207 	return sig;
208 }
209 
210 static inline void print_dropped_signal(int sig)
211 {
212 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
213 
214 	if (!print_fatal_signals)
215 		return;
216 
217 	if (!__ratelimit(&ratelimit_state))
218 		return;
219 
220 	printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
221 				current->comm, current->pid, sig);
222 }
223 
224 /**
225  * task_set_jobctl_pending - set jobctl pending bits
226  * @task: target task
227  * @mask: pending bits to set
228  *
229  * Clear @mask from @task->jobctl.  @mask must be subset of
230  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
231  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
232  * cleared.  If @task is already being killed or exiting, this function
233  * becomes noop.
234  *
235  * CONTEXT:
236  * Must be called with @task->sighand->siglock held.
237  *
238  * RETURNS:
239  * %true if @mask is set, %false if made noop because @task was dying.
240  */
241 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
242 {
243 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
244 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
245 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
246 
247 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
248 		return false;
249 
250 	if (mask & JOBCTL_STOP_SIGMASK)
251 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
252 
253 	task->jobctl |= mask;
254 	return true;
255 }
256 
257 /**
258  * task_clear_jobctl_trapping - clear jobctl trapping bit
259  * @task: target task
260  *
261  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
262  * Clear it and wake up the ptracer.  Note that we don't need any further
263  * locking.  @task->siglock guarantees that @task->parent points to the
264  * ptracer.
265  *
266  * CONTEXT:
267  * Must be called with @task->sighand->siglock held.
268  */
269 void task_clear_jobctl_trapping(struct task_struct *task)
270 {
271 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
272 		task->jobctl &= ~JOBCTL_TRAPPING;
273 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
274 	}
275 }
276 
277 /**
278  * task_clear_jobctl_pending - clear jobctl pending bits
279  * @task: target task
280  * @mask: pending bits to clear
281  *
282  * Clear @mask from @task->jobctl.  @mask must be subset of
283  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
284  * STOP bits are cleared together.
285  *
286  * If clearing of @mask leaves no stop or trap pending, this function calls
287  * task_clear_jobctl_trapping().
288  *
289  * CONTEXT:
290  * Must be called with @task->sighand->siglock held.
291  */
292 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
293 {
294 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
295 
296 	if (mask & JOBCTL_STOP_PENDING)
297 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
298 
299 	task->jobctl &= ~mask;
300 
301 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
302 		task_clear_jobctl_trapping(task);
303 }
304 
305 /**
306  * task_participate_group_stop - participate in a group stop
307  * @task: task participating in a group stop
308  *
309  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
310  * Group stop states are cleared and the group stop count is consumed if
311  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
312  * stop, the appropriate %SIGNAL_* flags are set.
313  *
314  * CONTEXT:
315  * Must be called with @task->sighand->siglock held.
316  *
317  * RETURNS:
318  * %true if group stop completion should be notified to the parent, %false
319  * otherwise.
320  */
321 static bool task_participate_group_stop(struct task_struct *task)
322 {
323 	struct signal_struct *sig = task->signal;
324 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
325 
326 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
327 
328 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
329 
330 	if (!consume)
331 		return false;
332 
333 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
334 		sig->group_stop_count--;
335 
336 	/*
337 	 * Tell the caller to notify completion iff we are entering into a
338 	 * fresh group stop.  Read comment in do_signal_stop() for details.
339 	 */
340 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
341 		sig->flags = SIGNAL_STOP_STOPPED;
342 		return true;
343 	}
344 	return false;
345 }
346 
347 /*
348  * allocate a new signal queue record
349  * - this may be called without locks if and only if t == current, otherwise an
350  *   appropriate lock must be held to stop the target task from exiting
351  */
352 static struct sigqueue *
353 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
354 {
355 	struct sigqueue *q = NULL;
356 	struct user_struct *user;
357 
358 	/*
359 	 * Protect access to @t credentials. This can go away when all
360 	 * callers hold rcu read lock.
361 	 */
362 	rcu_read_lock();
363 	user = get_uid(__task_cred(t)->user);
364 	atomic_inc(&user->sigpending);
365 	rcu_read_unlock();
366 
367 	if (override_rlimit ||
368 	    atomic_read(&user->sigpending) <=
369 			task_rlimit(t, RLIMIT_SIGPENDING)) {
370 		q = kmem_cache_alloc(sigqueue_cachep, flags);
371 	} else {
372 		print_dropped_signal(sig);
373 	}
374 
375 	if (unlikely(q == NULL)) {
376 		atomic_dec(&user->sigpending);
377 		free_uid(user);
378 	} else {
379 		INIT_LIST_HEAD(&q->list);
380 		q->flags = 0;
381 		q->user = user;
382 	}
383 
384 	return q;
385 }
386 
387 static void __sigqueue_free(struct sigqueue *q)
388 {
389 	if (q->flags & SIGQUEUE_PREALLOC)
390 		return;
391 	atomic_dec(&q->user->sigpending);
392 	free_uid(q->user);
393 	kmem_cache_free(sigqueue_cachep, q);
394 }
395 
396 void flush_sigqueue(struct sigpending *queue)
397 {
398 	struct sigqueue *q;
399 
400 	sigemptyset(&queue->signal);
401 	while (!list_empty(&queue->list)) {
402 		q = list_entry(queue->list.next, struct sigqueue , list);
403 		list_del_init(&q->list);
404 		__sigqueue_free(q);
405 	}
406 }
407 
408 /*
409  * Flush all pending signals for a task.
410  */
411 void __flush_signals(struct task_struct *t)
412 {
413 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
414 	flush_sigqueue(&t->pending);
415 	flush_sigqueue(&t->signal->shared_pending);
416 }
417 
418 void flush_signals(struct task_struct *t)
419 {
420 	unsigned long flags;
421 
422 	spin_lock_irqsave(&t->sighand->siglock, flags);
423 	__flush_signals(t);
424 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
425 }
426 
427 static void __flush_itimer_signals(struct sigpending *pending)
428 {
429 	sigset_t signal, retain;
430 	struct sigqueue *q, *n;
431 
432 	signal = pending->signal;
433 	sigemptyset(&retain);
434 
435 	list_for_each_entry_safe(q, n, &pending->list, list) {
436 		int sig = q->info.si_signo;
437 
438 		if (likely(q->info.si_code != SI_TIMER)) {
439 			sigaddset(&retain, sig);
440 		} else {
441 			sigdelset(&signal, sig);
442 			list_del_init(&q->list);
443 			__sigqueue_free(q);
444 		}
445 	}
446 
447 	sigorsets(&pending->signal, &signal, &retain);
448 }
449 
450 void flush_itimer_signals(void)
451 {
452 	struct task_struct *tsk = current;
453 	unsigned long flags;
454 
455 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
456 	__flush_itimer_signals(&tsk->pending);
457 	__flush_itimer_signals(&tsk->signal->shared_pending);
458 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
459 }
460 
461 void ignore_signals(struct task_struct *t)
462 {
463 	int i;
464 
465 	for (i = 0; i < _NSIG; ++i)
466 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
467 
468 	flush_signals(t);
469 }
470 
471 /*
472  * Flush all handlers for a task.
473  */
474 
475 void
476 flush_signal_handlers(struct task_struct *t, int force_default)
477 {
478 	int i;
479 	struct k_sigaction *ka = &t->sighand->action[0];
480 	for (i = _NSIG ; i != 0 ; i--) {
481 		if (force_default || ka->sa.sa_handler != SIG_IGN)
482 			ka->sa.sa_handler = SIG_DFL;
483 		ka->sa.sa_flags = 0;
484 		sigemptyset(&ka->sa.sa_mask);
485 		ka++;
486 	}
487 }
488 
489 int unhandled_signal(struct task_struct *tsk, int sig)
490 {
491 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
492 	if (is_global_init(tsk))
493 		return 1;
494 	if (handler != SIG_IGN && handler != SIG_DFL)
495 		return 0;
496 	/* if ptraced, let the tracer determine */
497 	return !tsk->ptrace;
498 }
499 
500 /*
501  * Notify the system that a driver wants to block all signals for this
502  * process, and wants to be notified if any signals at all were to be
503  * sent/acted upon.  If the notifier routine returns non-zero, then the
504  * signal will be acted upon after all.  If the notifier routine returns 0,
505  * then then signal will be blocked.  Only one block per process is
506  * allowed.  priv is a pointer to private data that the notifier routine
507  * can use to determine if the signal should be blocked or not.
508  */
509 void
510 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
511 {
512 	unsigned long flags;
513 
514 	spin_lock_irqsave(&current->sighand->siglock, flags);
515 	current->notifier_mask = mask;
516 	current->notifier_data = priv;
517 	current->notifier = notifier;
518 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
519 }
520 
521 /* Notify the system that blocking has ended. */
522 
523 void
524 unblock_all_signals(void)
525 {
526 	unsigned long flags;
527 
528 	spin_lock_irqsave(&current->sighand->siglock, flags);
529 	current->notifier = NULL;
530 	current->notifier_data = NULL;
531 	recalc_sigpending();
532 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
533 }
534 
535 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
536 {
537 	struct sigqueue *q, *first = NULL;
538 
539 	/*
540 	 * Collect the siginfo appropriate to this signal.  Check if
541 	 * there is another siginfo for the same signal.
542 	*/
543 	list_for_each_entry(q, &list->list, list) {
544 		if (q->info.si_signo == sig) {
545 			if (first)
546 				goto still_pending;
547 			first = q;
548 		}
549 	}
550 
551 	sigdelset(&list->signal, sig);
552 
553 	if (first) {
554 still_pending:
555 		list_del_init(&first->list);
556 		copy_siginfo(info, &first->info);
557 		__sigqueue_free(first);
558 	} else {
559 		/*
560 		 * Ok, it wasn't in the queue.  This must be
561 		 * a fast-pathed signal or we must have been
562 		 * out of queue space.  So zero out the info.
563 		 */
564 		info->si_signo = sig;
565 		info->si_errno = 0;
566 		info->si_code = SI_USER;
567 		info->si_pid = 0;
568 		info->si_uid = 0;
569 	}
570 }
571 
572 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
573 			siginfo_t *info)
574 {
575 	int sig = next_signal(pending, mask);
576 
577 	if (sig) {
578 		if (current->notifier) {
579 			if (sigismember(current->notifier_mask, sig)) {
580 				if (!(current->notifier)(current->notifier_data)) {
581 					clear_thread_flag(TIF_SIGPENDING);
582 					return 0;
583 				}
584 			}
585 		}
586 
587 		collect_signal(sig, pending, info);
588 	}
589 
590 	return sig;
591 }
592 
593 /*
594  * Dequeue a signal and return the element to the caller, which is
595  * expected to free it.
596  *
597  * All callers have to hold the siglock.
598  */
599 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
600 {
601 	int signr;
602 
603 	/* We only dequeue private signals from ourselves, we don't let
604 	 * signalfd steal them
605 	 */
606 	signr = __dequeue_signal(&tsk->pending, mask, info);
607 	if (!signr) {
608 		signr = __dequeue_signal(&tsk->signal->shared_pending,
609 					 mask, info);
610 		/*
611 		 * itimer signal ?
612 		 *
613 		 * itimers are process shared and we restart periodic
614 		 * itimers in the signal delivery path to prevent DoS
615 		 * attacks in the high resolution timer case. This is
616 		 * compliant with the old way of self-restarting
617 		 * itimers, as the SIGALRM is a legacy signal and only
618 		 * queued once. Changing the restart behaviour to
619 		 * restart the timer in the signal dequeue path is
620 		 * reducing the timer noise on heavy loaded !highres
621 		 * systems too.
622 		 */
623 		if (unlikely(signr == SIGALRM)) {
624 			struct hrtimer *tmr = &tsk->signal->real_timer;
625 
626 			if (!hrtimer_is_queued(tmr) &&
627 			    tsk->signal->it_real_incr.tv64 != 0) {
628 				hrtimer_forward(tmr, tmr->base->get_time(),
629 						tsk->signal->it_real_incr);
630 				hrtimer_restart(tmr);
631 			}
632 		}
633 	}
634 
635 	recalc_sigpending();
636 	if (!signr)
637 		return 0;
638 
639 	if (unlikely(sig_kernel_stop(signr))) {
640 		/*
641 		 * Set a marker that we have dequeued a stop signal.  Our
642 		 * caller might release the siglock and then the pending
643 		 * stop signal it is about to process is no longer in the
644 		 * pending bitmasks, but must still be cleared by a SIGCONT
645 		 * (and overruled by a SIGKILL).  So those cases clear this
646 		 * shared flag after we've set it.  Note that this flag may
647 		 * remain set after the signal we return is ignored or
648 		 * handled.  That doesn't matter because its only purpose
649 		 * is to alert stop-signal processing code when another
650 		 * processor has come along and cleared the flag.
651 		 */
652 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
653 	}
654 	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
655 		/*
656 		 * Release the siglock to ensure proper locking order
657 		 * of timer locks outside of siglocks.  Note, we leave
658 		 * irqs disabled here, since the posix-timers code is
659 		 * about to disable them again anyway.
660 		 */
661 		spin_unlock(&tsk->sighand->siglock);
662 		do_schedule_next_timer(info);
663 		spin_lock(&tsk->sighand->siglock);
664 	}
665 	return signr;
666 }
667 
668 /*
669  * Tell a process that it has a new active signal..
670  *
671  * NOTE! we rely on the previous spin_lock to
672  * lock interrupts for us! We can only be called with
673  * "siglock" held, and the local interrupt must
674  * have been disabled when that got acquired!
675  *
676  * No need to set need_resched since signal event passing
677  * goes through ->blocked
678  */
679 void signal_wake_up(struct task_struct *t, int resume)
680 {
681 	unsigned int mask;
682 
683 	set_tsk_thread_flag(t, TIF_SIGPENDING);
684 
685 	/*
686 	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
687 	 * case. We don't check t->state here because there is a race with it
688 	 * executing another processor and just now entering stopped state.
689 	 * By using wake_up_state, we ensure the process will wake up and
690 	 * handle its death signal.
691 	 */
692 	mask = TASK_INTERRUPTIBLE;
693 	if (resume)
694 		mask |= TASK_WAKEKILL;
695 	if (!wake_up_state(t, mask))
696 		kick_process(t);
697 }
698 
699 /*
700  * Remove signals in mask from the pending set and queue.
701  * Returns 1 if any signals were found.
702  *
703  * All callers must be holding the siglock.
704  *
705  * This version takes a sigset mask and looks at all signals,
706  * not just those in the first mask word.
707  */
708 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
709 {
710 	struct sigqueue *q, *n;
711 	sigset_t m;
712 
713 	sigandsets(&m, mask, &s->signal);
714 	if (sigisemptyset(&m))
715 		return 0;
716 
717 	sigandnsets(&s->signal, &s->signal, mask);
718 	list_for_each_entry_safe(q, n, &s->list, list) {
719 		if (sigismember(mask, q->info.si_signo)) {
720 			list_del_init(&q->list);
721 			__sigqueue_free(q);
722 		}
723 	}
724 	return 1;
725 }
726 /*
727  * Remove signals in mask from the pending set and queue.
728  * Returns 1 if any signals were found.
729  *
730  * All callers must be holding the siglock.
731  */
732 static int rm_from_queue(unsigned long mask, struct sigpending *s)
733 {
734 	struct sigqueue *q, *n;
735 
736 	if (!sigtestsetmask(&s->signal, mask))
737 		return 0;
738 
739 	sigdelsetmask(&s->signal, mask);
740 	list_for_each_entry_safe(q, n, &s->list, list) {
741 		if (q->info.si_signo < SIGRTMIN &&
742 		    (mask & sigmask(q->info.si_signo))) {
743 			list_del_init(&q->list);
744 			__sigqueue_free(q);
745 		}
746 	}
747 	return 1;
748 }
749 
750 static inline int is_si_special(const struct siginfo *info)
751 {
752 	return info <= SEND_SIG_FORCED;
753 }
754 
755 static inline bool si_fromuser(const struct siginfo *info)
756 {
757 	return info == SEND_SIG_NOINFO ||
758 		(!is_si_special(info) && SI_FROMUSER(info));
759 }
760 
761 /*
762  * called with RCU read lock from check_kill_permission()
763  */
764 static int kill_ok_by_cred(struct task_struct *t)
765 {
766 	const struct cred *cred = current_cred();
767 	const struct cred *tcred = __task_cred(t);
768 
769 	if (cred->user->user_ns == tcred->user->user_ns &&
770 	    (cred->euid == tcred->suid ||
771 	     cred->euid == tcred->uid ||
772 	     cred->uid  == tcred->suid ||
773 	     cred->uid  == tcred->uid))
774 		return 1;
775 
776 	if (ns_capable(tcred->user->user_ns, CAP_KILL))
777 		return 1;
778 
779 	return 0;
780 }
781 
782 /*
783  * Bad permissions for sending the signal
784  * - the caller must hold the RCU read lock
785  */
786 static int check_kill_permission(int sig, struct siginfo *info,
787 				 struct task_struct *t)
788 {
789 	struct pid *sid;
790 	int error;
791 
792 	if (!valid_signal(sig))
793 		return -EINVAL;
794 
795 	if (!si_fromuser(info))
796 		return 0;
797 
798 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
799 	if (error)
800 		return error;
801 
802 	if (!same_thread_group(current, t) &&
803 	    !kill_ok_by_cred(t)) {
804 		switch (sig) {
805 		case SIGCONT:
806 			sid = task_session(t);
807 			/*
808 			 * We don't return the error if sid == NULL. The
809 			 * task was unhashed, the caller must notice this.
810 			 */
811 			if (!sid || sid == task_session(current))
812 				break;
813 		default:
814 			return -EPERM;
815 		}
816 	}
817 
818 	return security_task_kill(t, info, sig, 0);
819 }
820 
821 /**
822  * ptrace_trap_notify - schedule trap to notify ptracer
823  * @t: tracee wanting to notify tracer
824  *
825  * This function schedules sticky ptrace trap which is cleared on the next
826  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
827  * ptracer.
828  *
829  * If @t is running, STOP trap will be taken.  If trapped for STOP and
830  * ptracer is listening for events, tracee is woken up so that it can
831  * re-trap for the new event.  If trapped otherwise, STOP trap will be
832  * eventually taken without returning to userland after the existing traps
833  * are finished by PTRACE_CONT.
834  *
835  * CONTEXT:
836  * Must be called with @task->sighand->siglock held.
837  */
838 static void ptrace_trap_notify(struct task_struct *t)
839 {
840 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
841 	assert_spin_locked(&t->sighand->siglock);
842 
843 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
844 	signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
845 }
846 
847 /*
848  * Handle magic process-wide effects of stop/continue signals. Unlike
849  * the signal actions, these happen immediately at signal-generation
850  * time regardless of blocking, ignoring, or handling.  This does the
851  * actual continuing for SIGCONT, but not the actual stopping for stop
852  * signals. The process stop is done as a signal action for SIG_DFL.
853  *
854  * Returns true if the signal should be actually delivered, otherwise
855  * it should be dropped.
856  */
857 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
858 {
859 	struct signal_struct *signal = p->signal;
860 	struct task_struct *t;
861 
862 	if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
863 		/*
864 		 * The process is in the middle of dying, nothing to do.
865 		 */
866 	} else if (sig_kernel_stop(sig)) {
867 		/*
868 		 * This is a stop signal.  Remove SIGCONT from all queues.
869 		 */
870 		rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
871 		t = p;
872 		do {
873 			rm_from_queue(sigmask(SIGCONT), &t->pending);
874 		} while_each_thread(p, t);
875 	} else if (sig == SIGCONT) {
876 		unsigned int why;
877 		/*
878 		 * Remove all stop signals from all queues, wake all threads.
879 		 */
880 		rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
881 		t = p;
882 		do {
883 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
884 			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
885 			if (likely(!(t->ptrace & PT_SEIZED)))
886 				wake_up_state(t, __TASK_STOPPED);
887 			else
888 				ptrace_trap_notify(t);
889 		} while_each_thread(p, t);
890 
891 		/*
892 		 * Notify the parent with CLD_CONTINUED if we were stopped.
893 		 *
894 		 * If we were in the middle of a group stop, we pretend it
895 		 * was already finished, and then continued. Since SIGCHLD
896 		 * doesn't queue we report only CLD_STOPPED, as if the next
897 		 * CLD_CONTINUED was dropped.
898 		 */
899 		why = 0;
900 		if (signal->flags & SIGNAL_STOP_STOPPED)
901 			why |= SIGNAL_CLD_CONTINUED;
902 		else if (signal->group_stop_count)
903 			why |= SIGNAL_CLD_STOPPED;
904 
905 		if (why) {
906 			/*
907 			 * The first thread which returns from do_signal_stop()
908 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
909 			 * notify its parent. See get_signal_to_deliver().
910 			 */
911 			signal->flags = why | SIGNAL_STOP_CONTINUED;
912 			signal->group_stop_count = 0;
913 			signal->group_exit_code = 0;
914 		}
915 	}
916 
917 	return !sig_ignored(p, sig, from_ancestor_ns);
918 }
919 
920 /*
921  * Test if P wants to take SIG.  After we've checked all threads with this,
922  * it's equivalent to finding no threads not blocking SIG.  Any threads not
923  * blocking SIG were ruled out because they are not running and already
924  * have pending signals.  Such threads will dequeue from the shared queue
925  * as soon as they're available, so putting the signal on the shared queue
926  * will be equivalent to sending it to one such thread.
927  */
928 static inline int wants_signal(int sig, struct task_struct *p)
929 {
930 	if (sigismember(&p->blocked, sig))
931 		return 0;
932 	if (p->flags & PF_EXITING)
933 		return 0;
934 	if (sig == SIGKILL)
935 		return 1;
936 	if (task_is_stopped_or_traced(p))
937 		return 0;
938 	return task_curr(p) || !signal_pending(p);
939 }
940 
941 static void complete_signal(int sig, struct task_struct *p, int group)
942 {
943 	struct signal_struct *signal = p->signal;
944 	struct task_struct *t;
945 
946 	/*
947 	 * Now find a thread we can wake up to take the signal off the queue.
948 	 *
949 	 * If the main thread wants the signal, it gets first crack.
950 	 * Probably the least surprising to the average bear.
951 	 */
952 	if (wants_signal(sig, p))
953 		t = p;
954 	else if (!group || thread_group_empty(p))
955 		/*
956 		 * There is just one thread and it does not need to be woken.
957 		 * It will dequeue unblocked signals before it runs again.
958 		 */
959 		return;
960 	else {
961 		/*
962 		 * Otherwise try to find a suitable thread.
963 		 */
964 		t = signal->curr_target;
965 		while (!wants_signal(sig, t)) {
966 			t = next_thread(t);
967 			if (t == signal->curr_target)
968 				/*
969 				 * No thread needs to be woken.
970 				 * Any eligible threads will see
971 				 * the signal in the queue soon.
972 				 */
973 				return;
974 		}
975 		signal->curr_target = t;
976 	}
977 
978 	/*
979 	 * Found a killable thread.  If the signal will be fatal,
980 	 * then start taking the whole group down immediately.
981 	 */
982 	if (sig_fatal(p, sig) &&
983 	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
984 	    !sigismember(&t->real_blocked, sig) &&
985 	    (sig == SIGKILL || !t->ptrace)) {
986 		/*
987 		 * This signal will be fatal to the whole group.
988 		 */
989 		if (!sig_kernel_coredump(sig)) {
990 			/*
991 			 * Start a group exit and wake everybody up.
992 			 * This way we don't have other threads
993 			 * running and doing things after a slower
994 			 * thread has the fatal signal pending.
995 			 */
996 			signal->flags = SIGNAL_GROUP_EXIT;
997 			signal->group_exit_code = sig;
998 			signal->group_stop_count = 0;
999 			t = p;
1000 			do {
1001 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1002 				sigaddset(&t->pending.signal, SIGKILL);
1003 				signal_wake_up(t, 1);
1004 			} while_each_thread(p, t);
1005 			return;
1006 		}
1007 	}
1008 
1009 	/*
1010 	 * The signal is already in the shared-pending queue.
1011 	 * Tell the chosen thread to wake up and dequeue it.
1012 	 */
1013 	signal_wake_up(t, sig == SIGKILL);
1014 	return;
1015 }
1016 
1017 static inline int legacy_queue(struct sigpending *signals, int sig)
1018 {
1019 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1020 }
1021 
1022 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1023 			int group, int from_ancestor_ns)
1024 {
1025 	struct sigpending *pending;
1026 	struct sigqueue *q;
1027 	int override_rlimit;
1028 
1029 	trace_signal_generate(sig, info, t);
1030 
1031 	assert_spin_locked(&t->sighand->siglock);
1032 
1033 	if (!prepare_signal(sig, t, from_ancestor_ns))
1034 		return 0;
1035 
1036 	pending = group ? &t->signal->shared_pending : &t->pending;
1037 	/*
1038 	 * Short-circuit ignored signals and support queuing
1039 	 * exactly one non-rt signal, so that we can get more
1040 	 * detailed information about the cause of the signal.
1041 	 */
1042 	if (legacy_queue(pending, sig))
1043 		return 0;
1044 	/*
1045 	 * fast-pathed signals for kernel-internal things like SIGSTOP
1046 	 * or SIGKILL.
1047 	 */
1048 	if (info == SEND_SIG_FORCED)
1049 		goto out_set;
1050 
1051 	/*
1052 	 * Real-time signals must be queued if sent by sigqueue, or
1053 	 * some other real-time mechanism.  It is implementation
1054 	 * defined whether kill() does so.  We attempt to do so, on
1055 	 * the principle of least surprise, but since kill is not
1056 	 * allowed to fail with EAGAIN when low on memory we just
1057 	 * make sure at least one signal gets delivered and don't
1058 	 * pass on the info struct.
1059 	 */
1060 	if (sig < SIGRTMIN)
1061 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1062 	else
1063 		override_rlimit = 0;
1064 
1065 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1066 		override_rlimit);
1067 	if (q) {
1068 		list_add_tail(&q->list, &pending->list);
1069 		switch ((unsigned long) info) {
1070 		case (unsigned long) SEND_SIG_NOINFO:
1071 			q->info.si_signo = sig;
1072 			q->info.si_errno = 0;
1073 			q->info.si_code = SI_USER;
1074 			q->info.si_pid = task_tgid_nr_ns(current,
1075 							task_active_pid_ns(t));
1076 			q->info.si_uid = current_uid();
1077 			break;
1078 		case (unsigned long) SEND_SIG_PRIV:
1079 			q->info.si_signo = sig;
1080 			q->info.si_errno = 0;
1081 			q->info.si_code = SI_KERNEL;
1082 			q->info.si_pid = 0;
1083 			q->info.si_uid = 0;
1084 			break;
1085 		default:
1086 			copy_siginfo(&q->info, info);
1087 			if (from_ancestor_ns)
1088 				q->info.si_pid = 0;
1089 			break;
1090 		}
1091 	} else if (!is_si_special(info)) {
1092 		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1093 			/*
1094 			 * Queue overflow, abort.  We may abort if the
1095 			 * signal was rt and sent by user using something
1096 			 * other than kill().
1097 			 */
1098 			trace_signal_overflow_fail(sig, group, info);
1099 			return -EAGAIN;
1100 		} else {
1101 			/*
1102 			 * This is a silent loss of information.  We still
1103 			 * send the signal, but the *info bits are lost.
1104 			 */
1105 			trace_signal_lose_info(sig, group, info);
1106 		}
1107 	}
1108 
1109 out_set:
1110 	signalfd_notify(t, sig);
1111 	sigaddset(&pending->signal, sig);
1112 	complete_signal(sig, t, group);
1113 	return 0;
1114 }
1115 
1116 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1117 			int group)
1118 {
1119 	int from_ancestor_ns = 0;
1120 
1121 #ifdef CONFIG_PID_NS
1122 	from_ancestor_ns = si_fromuser(info) &&
1123 			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1124 #endif
1125 
1126 	return __send_signal(sig, info, t, group, from_ancestor_ns);
1127 }
1128 
1129 static void print_fatal_signal(struct pt_regs *regs, int signr)
1130 {
1131 	printk("%s/%d: potentially unexpected fatal signal %d.\n",
1132 		current->comm, task_pid_nr(current), signr);
1133 
1134 #if defined(__i386__) && !defined(__arch_um__)
1135 	printk("code at %08lx: ", regs->ip);
1136 	{
1137 		int i;
1138 		for (i = 0; i < 16; i++) {
1139 			unsigned char insn;
1140 
1141 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1142 				break;
1143 			printk("%02x ", insn);
1144 		}
1145 	}
1146 #endif
1147 	printk("\n");
1148 	preempt_disable();
1149 	show_regs(regs);
1150 	preempt_enable();
1151 }
1152 
1153 static int __init setup_print_fatal_signals(char *str)
1154 {
1155 	get_option (&str, &print_fatal_signals);
1156 
1157 	return 1;
1158 }
1159 
1160 __setup("print-fatal-signals=", setup_print_fatal_signals);
1161 
1162 int
1163 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1164 {
1165 	return send_signal(sig, info, p, 1);
1166 }
1167 
1168 static int
1169 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1170 {
1171 	return send_signal(sig, info, t, 0);
1172 }
1173 
1174 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1175 			bool group)
1176 {
1177 	unsigned long flags;
1178 	int ret = -ESRCH;
1179 
1180 	if (lock_task_sighand(p, &flags)) {
1181 		ret = send_signal(sig, info, p, group);
1182 		unlock_task_sighand(p, &flags);
1183 	}
1184 
1185 	return ret;
1186 }
1187 
1188 /*
1189  * Force a signal that the process can't ignore: if necessary
1190  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1191  *
1192  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1193  * since we do not want to have a signal handler that was blocked
1194  * be invoked when user space had explicitly blocked it.
1195  *
1196  * We don't want to have recursive SIGSEGV's etc, for example,
1197  * that is why we also clear SIGNAL_UNKILLABLE.
1198  */
1199 int
1200 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1201 {
1202 	unsigned long int flags;
1203 	int ret, blocked, ignored;
1204 	struct k_sigaction *action;
1205 
1206 	spin_lock_irqsave(&t->sighand->siglock, flags);
1207 	action = &t->sighand->action[sig-1];
1208 	ignored = action->sa.sa_handler == SIG_IGN;
1209 	blocked = sigismember(&t->blocked, sig);
1210 	if (blocked || ignored) {
1211 		action->sa.sa_handler = SIG_DFL;
1212 		if (blocked) {
1213 			sigdelset(&t->blocked, sig);
1214 			recalc_sigpending_and_wake(t);
1215 		}
1216 	}
1217 	if (action->sa.sa_handler == SIG_DFL)
1218 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1219 	ret = specific_send_sig_info(sig, info, t);
1220 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1221 
1222 	return ret;
1223 }
1224 
1225 /*
1226  * Nuke all other threads in the group.
1227  */
1228 int zap_other_threads(struct task_struct *p)
1229 {
1230 	struct task_struct *t = p;
1231 	int count = 0;
1232 
1233 	p->signal->group_stop_count = 0;
1234 
1235 	while_each_thread(p, t) {
1236 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1237 		count++;
1238 
1239 		/* Don't bother with already dead threads */
1240 		if (t->exit_state)
1241 			continue;
1242 		sigaddset(&t->pending.signal, SIGKILL);
1243 		signal_wake_up(t, 1);
1244 	}
1245 
1246 	return count;
1247 }
1248 
1249 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1250 					   unsigned long *flags)
1251 {
1252 	struct sighand_struct *sighand;
1253 
1254 	for (;;) {
1255 		local_irq_save(*flags);
1256 		rcu_read_lock();
1257 		sighand = rcu_dereference(tsk->sighand);
1258 		if (unlikely(sighand == NULL)) {
1259 			rcu_read_unlock();
1260 			local_irq_restore(*flags);
1261 			break;
1262 		}
1263 
1264 		spin_lock(&sighand->siglock);
1265 		if (likely(sighand == tsk->sighand)) {
1266 			rcu_read_unlock();
1267 			break;
1268 		}
1269 		spin_unlock(&sighand->siglock);
1270 		rcu_read_unlock();
1271 		local_irq_restore(*flags);
1272 	}
1273 
1274 	return sighand;
1275 }
1276 
1277 /*
1278  * send signal info to all the members of a group
1279  */
1280 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1281 {
1282 	int ret;
1283 
1284 	rcu_read_lock();
1285 	ret = check_kill_permission(sig, info, p);
1286 	rcu_read_unlock();
1287 
1288 	if (!ret && sig)
1289 		ret = do_send_sig_info(sig, info, p, true);
1290 
1291 	return ret;
1292 }
1293 
1294 /*
1295  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1296  * control characters do (^C, ^Z etc)
1297  * - the caller must hold at least a readlock on tasklist_lock
1298  */
1299 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1300 {
1301 	struct task_struct *p = NULL;
1302 	int retval, success;
1303 
1304 	success = 0;
1305 	retval = -ESRCH;
1306 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1307 		int err = group_send_sig_info(sig, info, p);
1308 		success |= !err;
1309 		retval = err;
1310 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1311 	return success ? 0 : retval;
1312 }
1313 
1314 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1315 {
1316 	int error = -ESRCH;
1317 	struct task_struct *p;
1318 
1319 	rcu_read_lock();
1320 retry:
1321 	p = pid_task(pid, PIDTYPE_PID);
1322 	if (p) {
1323 		error = group_send_sig_info(sig, info, p);
1324 		if (unlikely(error == -ESRCH))
1325 			/*
1326 			 * The task was unhashed in between, try again.
1327 			 * If it is dead, pid_task() will return NULL,
1328 			 * if we race with de_thread() it will find the
1329 			 * new leader.
1330 			 */
1331 			goto retry;
1332 	}
1333 	rcu_read_unlock();
1334 
1335 	return error;
1336 }
1337 
1338 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1339 {
1340 	int error;
1341 	rcu_read_lock();
1342 	error = kill_pid_info(sig, info, find_vpid(pid));
1343 	rcu_read_unlock();
1344 	return error;
1345 }
1346 
1347 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1348 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1349 		      uid_t uid, uid_t euid, u32 secid)
1350 {
1351 	int ret = -EINVAL;
1352 	struct task_struct *p;
1353 	const struct cred *pcred;
1354 	unsigned long flags;
1355 
1356 	if (!valid_signal(sig))
1357 		return ret;
1358 
1359 	rcu_read_lock();
1360 	p = pid_task(pid, PIDTYPE_PID);
1361 	if (!p) {
1362 		ret = -ESRCH;
1363 		goto out_unlock;
1364 	}
1365 	pcred = __task_cred(p);
1366 	if (si_fromuser(info) &&
1367 	    euid != pcred->suid && euid != pcred->uid &&
1368 	    uid  != pcred->suid && uid  != pcred->uid) {
1369 		ret = -EPERM;
1370 		goto out_unlock;
1371 	}
1372 	ret = security_task_kill(p, info, sig, secid);
1373 	if (ret)
1374 		goto out_unlock;
1375 
1376 	if (sig) {
1377 		if (lock_task_sighand(p, &flags)) {
1378 			ret = __send_signal(sig, info, p, 1, 0);
1379 			unlock_task_sighand(p, &flags);
1380 		} else
1381 			ret = -ESRCH;
1382 	}
1383 out_unlock:
1384 	rcu_read_unlock();
1385 	return ret;
1386 }
1387 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1388 
1389 /*
1390  * kill_something_info() interprets pid in interesting ways just like kill(2).
1391  *
1392  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1393  * is probably wrong.  Should make it like BSD or SYSV.
1394  */
1395 
1396 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1397 {
1398 	int ret;
1399 
1400 	if (pid > 0) {
1401 		rcu_read_lock();
1402 		ret = kill_pid_info(sig, info, find_vpid(pid));
1403 		rcu_read_unlock();
1404 		return ret;
1405 	}
1406 
1407 	read_lock(&tasklist_lock);
1408 	if (pid != -1) {
1409 		ret = __kill_pgrp_info(sig, info,
1410 				pid ? find_vpid(-pid) : task_pgrp(current));
1411 	} else {
1412 		int retval = 0, count = 0;
1413 		struct task_struct * p;
1414 
1415 		for_each_process(p) {
1416 			if (task_pid_vnr(p) > 1 &&
1417 					!same_thread_group(p, current)) {
1418 				int err = group_send_sig_info(sig, info, p);
1419 				++count;
1420 				if (err != -EPERM)
1421 					retval = err;
1422 			}
1423 		}
1424 		ret = count ? retval : -ESRCH;
1425 	}
1426 	read_unlock(&tasklist_lock);
1427 
1428 	return ret;
1429 }
1430 
1431 /*
1432  * These are for backward compatibility with the rest of the kernel source.
1433  */
1434 
1435 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1436 {
1437 	/*
1438 	 * Make sure legacy kernel users don't send in bad values
1439 	 * (normal paths check this in check_kill_permission).
1440 	 */
1441 	if (!valid_signal(sig))
1442 		return -EINVAL;
1443 
1444 	return do_send_sig_info(sig, info, p, false);
1445 }
1446 
1447 #define __si_special(priv) \
1448 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1449 
1450 int
1451 send_sig(int sig, struct task_struct *p, int priv)
1452 {
1453 	return send_sig_info(sig, __si_special(priv), p);
1454 }
1455 
1456 void
1457 force_sig(int sig, struct task_struct *p)
1458 {
1459 	force_sig_info(sig, SEND_SIG_PRIV, p);
1460 }
1461 
1462 /*
1463  * When things go south during signal handling, we
1464  * will force a SIGSEGV. And if the signal that caused
1465  * the problem was already a SIGSEGV, we'll want to
1466  * make sure we don't even try to deliver the signal..
1467  */
1468 int
1469 force_sigsegv(int sig, struct task_struct *p)
1470 {
1471 	if (sig == SIGSEGV) {
1472 		unsigned long flags;
1473 		spin_lock_irqsave(&p->sighand->siglock, flags);
1474 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1475 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1476 	}
1477 	force_sig(SIGSEGV, p);
1478 	return 0;
1479 }
1480 
1481 int kill_pgrp(struct pid *pid, int sig, int priv)
1482 {
1483 	int ret;
1484 
1485 	read_lock(&tasklist_lock);
1486 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1487 	read_unlock(&tasklist_lock);
1488 
1489 	return ret;
1490 }
1491 EXPORT_SYMBOL(kill_pgrp);
1492 
1493 int kill_pid(struct pid *pid, int sig, int priv)
1494 {
1495 	return kill_pid_info(sig, __si_special(priv), pid);
1496 }
1497 EXPORT_SYMBOL(kill_pid);
1498 
1499 /*
1500  * These functions support sending signals using preallocated sigqueue
1501  * structures.  This is needed "because realtime applications cannot
1502  * afford to lose notifications of asynchronous events, like timer
1503  * expirations or I/O completions".  In the case of POSIX Timers
1504  * we allocate the sigqueue structure from the timer_create.  If this
1505  * allocation fails we are able to report the failure to the application
1506  * with an EAGAIN error.
1507  */
1508 struct sigqueue *sigqueue_alloc(void)
1509 {
1510 	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1511 
1512 	if (q)
1513 		q->flags |= SIGQUEUE_PREALLOC;
1514 
1515 	return q;
1516 }
1517 
1518 void sigqueue_free(struct sigqueue *q)
1519 {
1520 	unsigned long flags;
1521 	spinlock_t *lock = &current->sighand->siglock;
1522 
1523 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1524 	/*
1525 	 * We must hold ->siglock while testing q->list
1526 	 * to serialize with collect_signal() or with
1527 	 * __exit_signal()->flush_sigqueue().
1528 	 */
1529 	spin_lock_irqsave(lock, flags);
1530 	q->flags &= ~SIGQUEUE_PREALLOC;
1531 	/*
1532 	 * If it is queued it will be freed when dequeued,
1533 	 * like the "regular" sigqueue.
1534 	 */
1535 	if (!list_empty(&q->list))
1536 		q = NULL;
1537 	spin_unlock_irqrestore(lock, flags);
1538 
1539 	if (q)
1540 		__sigqueue_free(q);
1541 }
1542 
1543 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1544 {
1545 	int sig = q->info.si_signo;
1546 	struct sigpending *pending;
1547 	unsigned long flags;
1548 	int ret;
1549 
1550 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1551 
1552 	ret = -1;
1553 	if (!likely(lock_task_sighand(t, &flags)))
1554 		goto ret;
1555 
1556 	ret = 1; /* the signal is ignored */
1557 	if (!prepare_signal(sig, t, 0))
1558 		goto out;
1559 
1560 	ret = 0;
1561 	if (unlikely(!list_empty(&q->list))) {
1562 		/*
1563 		 * If an SI_TIMER entry is already queue just increment
1564 		 * the overrun count.
1565 		 */
1566 		BUG_ON(q->info.si_code != SI_TIMER);
1567 		q->info.si_overrun++;
1568 		goto out;
1569 	}
1570 	q->info.si_overrun = 0;
1571 
1572 	signalfd_notify(t, sig);
1573 	pending = group ? &t->signal->shared_pending : &t->pending;
1574 	list_add_tail(&q->list, &pending->list);
1575 	sigaddset(&pending->signal, sig);
1576 	complete_signal(sig, t, group);
1577 out:
1578 	unlock_task_sighand(t, &flags);
1579 ret:
1580 	return ret;
1581 }
1582 
1583 /*
1584  * Let a parent know about the death of a child.
1585  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1586  *
1587  * Returns true if our parent ignored us and so we've switched to
1588  * self-reaping.
1589  */
1590 bool do_notify_parent(struct task_struct *tsk, int sig)
1591 {
1592 	struct siginfo info;
1593 	unsigned long flags;
1594 	struct sighand_struct *psig;
1595 	bool autoreap = false;
1596 
1597 	BUG_ON(sig == -1);
1598 
1599  	/* do_notify_parent_cldstop should have been called instead.  */
1600  	BUG_ON(task_is_stopped_or_traced(tsk));
1601 
1602 	BUG_ON(!tsk->ptrace &&
1603 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1604 
1605 	info.si_signo = sig;
1606 	info.si_errno = 0;
1607 	/*
1608 	 * we are under tasklist_lock here so our parent is tied to
1609 	 * us and cannot exit and release its namespace.
1610 	 *
1611 	 * the only it can is to switch its nsproxy with sys_unshare,
1612 	 * bu uncharing pid namespaces is not allowed, so we'll always
1613 	 * see relevant namespace
1614 	 *
1615 	 * write_lock() currently calls preempt_disable() which is the
1616 	 * same as rcu_read_lock(), but according to Oleg, this is not
1617 	 * correct to rely on this
1618 	 */
1619 	rcu_read_lock();
1620 	info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1621 	info.si_uid = __task_cred(tsk)->uid;
1622 	rcu_read_unlock();
1623 
1624 	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1625 				tsk->signal->utime));
1626 	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1627 				tsk->signal->stime));
1628 
1629 	info.si_status = tsk->exit_code & 0x7f;
1630 	if (tsk->exit_code & 0x80)
1631 		info.si_code = CLD_DUMPED;
1632 	else if (tsk->exit_code & 0x7f)
1633 		info.si_code = CLD_KILLED;
1634 	else {
1635 		info.si_code = CLD_EXITED;
1636 		info.si_status = tsk->exit_code >> 8;
1637 	}
1638 
1639 	psig = tsk->parent->sighand;
1640 	spin_lock_irqsave(&psig->siglock, flags);
1641 	if (!tsk->ptrace && sig == SIGCHLD &&
1642 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1643 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1644 		/*
1645 		 * We are exiting and our parent doesn't care.  POSIX.1
1646 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1647 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1648 		 * automatically and not left for our parent's wait4 call.
1649 		 * Rather than having the parent do it as a magic kind of
1650 		 * signal handler, we just set this to tell do_exit that we
1651 		 * can be cleaned up without becoming a zombie.  Note that
1652 		 * we still call __wake_up_parent in this case, because a
1653 		 * blocked sys_wait4 might now return -ECHILD.
1654 		 *
1655 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1656 		 * is implementation-defined: we do (if you don't want
1657 		 * it, just use SIG_IGN instead).
1658 		 */
1659 		autoreap = true;
1660 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1661 			sig = 0;
1662 	}
1663 	if (valid_signal(sig) && sig)
1664 		__group_send_sig_info(sig, &info, tsk->parent);
1665 	__wake_up_parent(tsk, tsk->parent);
1666 	spin_unlock_irqrestore(&psig->siglock, flags);
1667 
1668 	return autoreap;
1669 }
1670 
1671 /**
1672  * do_notify_parent_cldstop - notify parent of stopped/continued state change
1673  * @tsk: task reporting the state change
1674  * @for_ptracer: the notification is for ptracer
1675  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1676  *
1677  * Notify @tsk's parent that the stopped/continued state has changed.  If
1678  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1679  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1680  *
1681  * CONTEXT:
1682  * Must be called with tasklist_lock at least read locked.
1683  */
1684 static void do_notify_parent_cldstop(struct task_struct *tsk,
1685 				     bool for_ptracer, int why)
1686 {
1687 	struct siginfo info;
1688 	unsigned long flags;
1689 	struct task_struct *parent;
1690 	struct sighand_struct *sighand;
1691 
1692 	if (for_ptracer) {
1693 		parent = tsk->parent;
1694 	} else {
1695 		tsk = tsk->group_leader;
1696 		parent = tsk->real_parent;
1697 	}
1698 
1699 	info.si_signo = SIGCHLD;
1700 	info.si_errno = 0;
1701 	/*
1702 	 * see comment in do_notify_parent() about the following 4 lines
1703 	 */
1704 	rcu_read_lock();
1705 	info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1706 	info.si_uid = __task_cred(tsk)->uid;
1707 	rcu_read_unlock();
1708 
1709 	info.si_utime = cputime_to_clock_t(tsk->utime);
1710 	info.si_stime = cputime_to_clock_t(tsk->stime);
1711 
1712  	info.si_code = why;
1713  	switch (why) {
1714  	case CLD_CONTINUED:
1715  		info.si_status = SIGCONT;
1716  		break;
1717  	case CLD_STOPPED:
1718  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1719  		break;
1720  	case CLD_TRAPPED:
1721  		info.si_status = tsk->exit_code & 0x7f;
1722  		break;
1723  	default:
1724  		BUG();
1725  	}
1726 
1727 	sighand = parent->sighand;
1728 	spin_lock_irqsave(&sighand->siglock, flags);
1729 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1730 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1731 		__group_send_sig_info(SIGCHLD, &info, parent);
1732 	/*
1733 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1734 	 */
1735 	__wake_up_parent(tsk, parent);
1736 	spin_unlock_irqrestore(&sighand->siglock, flags);
1737 }
1738 
1739 static inline int may_ptrace_stop(void)
1740 {
1741 	if (!likely(current->ptrace))
1742 		return 0;
1743 	/*
1744 	 * Are we in the middle of do_coredump?
1745 	 * If so and our tracer is also part of the coredump stopping
1746 	 * is a deadlock situation, and pointless because our tracer
1747 	 * is dead so don't allow us to stop.
1748 	 * If SIGKILL was already sent before the caller unlocked
1749 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1750 	 * is safe to enter schedule().
1751 	 */
1752 	if (unlikely(current->mm->core_state) &&
1753 	    unlikely(current->mm == current->parent->mm))
1754 		return 0;
1755 
1756 	return 1;
1757 }
1758 
1759 /*
1760  * Return non-zero if there is a SIGKILL that should be waking us up.
1761  * Called with the siglock held.
1762  */
1763 static int sigkill_pending(struct task_struct *tsk)
1764 {
1765 	return	sigismember(&tsk->pending.signal, SIGKILL) ||
1766 		sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1767 }
1768 
1769 /*
1770  * This must be called with current->sighand->siglock held.
1771  *
1772  * This should be the path for all ptrace stops.
1773  * We always set current->last_siginfo while stopped here.
1774  * That makes it a way to test a stopped process for
1775  * being ptrace-stopped vs being job-control-stopped.
1776  *
1777  * If we actually decide not to stop at all because the tracer
1778  * is gone, we keep current->exit_code unless clear_code.
1779  */
1780 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1781 	__releases(&current->sighand->siglock)
1782 	__acquires(&current->sighand->siglock)
1783 {
1784 	bool gstop_done = false;
1785 
1786 	if (arch_ptrace_stop_needed(exit_code, info)) {
1787 		/*
1788 		 * The arch code has something special to do before a
1789 		 * ptrace stop.  This is allowed to block, e.g. for faults
1790 		 * on user stack pages.  We can't keep the siglock while
1791 		 * calling arch_ptrace_stop, so we must release it now.
1792 		 * To preserve proper semantics, we must do this before
1793 		 * any signal bookkeeping like checking group_stop_count.
1794 		 * Meanwhile, a SIGKILL could come in before we retake the
1795 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
1796 		 * So after regaining the lock, we must check for SIGKILL.
1797 		 */
1798 		spin_unlock_irq(&current->sighand->siglock);
1799 		arch_ptrace_stop(exit_code, info);
1800 		spin_lock_irq(&current->sighand->siglock);
1801 		if (sigkill_pending(current))
1802 			return;
1803 	}
1804 
1805 	/*
1806 	 * We're committing to trapping.  TRACED should be visible before
1807 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1808 	 * Also, transition to TRACED and updates to ->jobctl should be
1809 	 * atomic with respect to siglock and should be done after the arch
1810 	 * hook as siglock is released and regrabbed across it.
1811 	 */
1812 	set_current_state(TASK_TRACED);
1813 
1814 	current->last_siginfo = info;
1815 	current->exit_code = exit_code;
1816 
1817 	/*
1818 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
1819 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
1820 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
1821 	 * could be clear now.  We act as if SIGCONT is received after
1822 	 * TASK_TRACED is entered - ignore it.
1823 	 */
1824 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1825 		gstop_done = task_participate_group_stop(current);
1826 
1827 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1828 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1829 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1830 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1831 
1832 	/* entering a trap, clear TRAPPING */
1833 	task_clear_jobctl_trapping(current);
1834 
1835 	spin_unlock_irq(&current->sighand->siglock);
1836 	read_lock(&tasklist_lock);
1837 	if (may_ptrace_stop()) {
1838 		/*
1839 		 * Notify parents of the stop.
1840 		 *
1841 		 * While ptraced, there are two parents - the ptracer and
1842 		 * the real_parent of the group_leader.  The ptracer should
1843 		 * know about every stop while the real parent is only
1844 		 * interested in the completion of group stop.  The states
1845 		 * for the two don't interact with each other.  Notify
1846 		 * separately unless they're gonna be duplicates.
1847 		 */
1848 		do_notify_parent_cldstop(current, true, why);
1849 		if (gstop_done && ptrace_reparented(current))
1850 			do_notify_parent_cldstop(current, false, why);
1851 
1852 		/*
1853 		 * Don't want to allow preemption here, because
1854 		 * sys_ptrace() needs this task to be inactive.
1855 		 *
1856 		 * XXX: implement read_unlock_no_resched().
1857 		 */
1858 		preempt_disable();
1859 		read_unlock(&tasklist_lock);
1860 		preempt_enable_no_resched();
1861 		schedule();
1862 	} else {
1863 		/*
1864 		 * By the time we got the lock, our tracer went away.
1865 		 * Don't drop the lock yet, another tracer may come.
1866 		 *
1867 		 * If @gstop_done, the ptracer went away between group stop
1868 		 * completion and here.  During detach, it would have set
1869 		 * JOBCTL_STOP_PENDING on us and we'll re-enter
1870 		 * TASK_STOPPED in do_signal_stop() on return, so notifying
1871 		 * the real parent of the group stop completion is enough.
1872 		 */
1873 		if (gstop_done)
1874 			do_notify_parent_cldstop(current, false, why);
1875 
1876 		__set_current_state(TASK_RUNNING);
1877 		if (clear_code)
1878 			current->exit_code = 0;
1879 		read_unlock(&tasklist_lock);
1880 	}
1881 
1882 	/*
1883 	 * While in TASK_TRACED, we were considered "frozen enough".
1884 	 * Now that we woke up, it's crucial if we're supposed to be
1885 	 * frozen that we freeze now before running anything substantial.
1886 	 */
1887 	try_to_freeze();
1888 
1889 	/*
1890 	 * We are back.  Now reacquire the siglock before touching
1891 	 * last_siginfo, so that we are sure to have synchronized with
1892 	 * any signal-sending on another CPU that wants to examine it.
1893 	 */
1894 	spin_lock_irq(&current->sighand->siglock);
1895 	current->last_siginfo = NULL;
1896 
1897 	/* LISTENING can be set only during STOP traps, clear it */
1898 	current->jobctl &= ~JOBCTL_LISTENING;
1899 
1900 	/*
1901 	 * Queued signals ignored us while we were stopped for tracing.
1902 	 * So check for any that we should take before resuming user mode.
1903 	 * This sets TIF_SIGPENDING, but never clears it.
1904 	 */
1905 	recalc_sigpending_tsk(current);
1906 }
1907 
1908 static void ptrace_do_notify(int signr, int exit_code, int why)
1909 {
1910 	siginfo_t info;
1911 
1912 	memset(&info, 0, sizeof info);
1913 	info.si_signo = signr;
1914 	info.si_code = exit_code;
1915 	info.si_pid = task_pid_vnr(current);
1916 	info.si_uid = current_uid();
1917 
1918 	/* Let the debugger run.  */
1919 	ptrace_stop(exit_code, why, 1, &info);
1920 }
1921 
1922 void ptrace_notify(int exit_code)
1923 {
1924 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1925 
1926 	spin_lock_irq(&current->sighand->siglock);
1927 	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1928 	spin_unlock_irq(&current->sighand->siglock);
1929 }
1930 
1931 /**
1932  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1933  * @signr: signr causing group stop if initiating
1934  *
1935  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1936  * and participate in it.  If already set, participate in the existing
1937  * group stop.  If participated in a group stop (and thus slept), %true is
1938  * returned with siglock released.
1939  *
1940  * If ptraced, this function doesn't handle stop itself.  Instead,
1941  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1942  * untouched.  The caller must ensure that INTERRUPT trap handling takes
1943  * places afterwards.
1944  *
1945  * CONTEXT:
1946  * Must be called with @current->sighand->siglock held, which is released
1947  * on %true return.
1948  *
1949  * RETURNS:
1950  * %false if group stop is already cancelled or ptrace trap is scheduled.
1951  * %true if participated in group stop.
1952  */
1953 static bool do_signal_stop(int signr)
1954 	__releases(&current->sighand->siglock)
1955 {
1956 	struct signal_struct *sig = current->signal;
1957 
1958 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1959 		unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1960 		struct task_struct *t;
1961 
1962 		/* signr will be recorded in task->jobctl for retries */
1963 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1964 
1965 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1966 		    unlikely(signal_group_exit(sig)))
1967 			return false;
1968 		/*
1969 		 * There is no group stop already in progress.  We must
1970 		 * initiate one now.
1971 		 *
1972 		 * While ptraced, a task may be resumed while group stop is
1973 		 * still in effect and then receive a stop signal and
1974 		 * initiate another group stop.  This deviates from the
1975 		 * usual behavior as two consecutive stop signals can't
1976 		 * cause two group stops when !ptraced.  That is why we
1977 		 * also check !task_is_stopped(t) below.
1978 		 *
1979 		 * The condition can be distinguished by testing whether
1980 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
1981 		 * group_exit_code in such case.
1982 		 *
1983 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
1984 		 * an intervening stop signal is required to cause two
1985 		 * continued events regardless of ptrace.
1986 		 */
1987 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
1988 			sig->group_exit_code = signr;
1989 		else
1990 			WARN_ON_ONCE(!current->ptrace);
1991 
1992 		sig->group_stop_count = 0;
1993 
1994 		if (task_set_jobctl_pending(current, signr | gstop))
1995 			sig->group_stop_count++;
1996 
1997 		for (t = next_thread(current); t != current;
1998 		     t = next_thread(t)) {
1999 			/*
2000 			 * Setting state to TASK_STOPPED for a group
2001 			 * stop is always done with the siglock held,
2002 			 * so this check has no races.
2003 			 */
2004 			if (!task_is_stopped(t) &&
2005 			    task_set_jobctl_pending(t, signr | gstop)) {
2006 				sig->group_stop_count++;
2007 				if (likely(!(t->ptrace & PT_SEIZED)))
2008 					signal_wake_up(t, 0);
2009 				else
2010 					ptrace_trap_notify(t);
2011 			}
2012 		}
2013 	}
2014 
2015 	if (likely(!current->ptrace)) {
2016 		int notify = 0;
2017 
2018 		/*
2019 		 * If there are no other threads in the group, or if there
2020 		 * is a group stop in progress and we are the last to stop,
2021 		 * report to the parent.
2022 		 */
2023 		if (task_participate_group_stop(current))
2024 			notify = CLD_STOPPED;
2025 
2026 		__set_current_state(TASK_STOPPED);
2027 		spin_unlock_irq(&current->sighand->siglock);
2028 
2029 		/*
2030 		 * Notify the parent of the group stop completion.  Because
2031 		 * we're not holding either the siglock or tasklist_lock
2032 		 * here, ptracer may attach inbetween; however, this is for
2033 		 * group stop and should always be delivered to the real
2034 		 * parent of the group leader.  The new ptracer will get
2035 		 * its notification when this task transitions into
2036 		 * TASK_TRACED.
2037 		 */
2038 		if (notify) {
2039 			read_lock(&tasklist_lock);
2040 			do_notify_parent_cldstop(current, false, notify);
2041 			read_unlock(&tasklist_lock);
2042 		}
2043 
2044 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2045 		schedule();
2046 		return true;
2047 	} else {
2048 		/*
2049 		 * While ptraced, group stop is handled by STOP trap.
2050 		 * Schedule it and let the caller deal with it.
2051 		 */
2052 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2053 		return false;
2054 	}
2055 }
2056 
2057 /**
2058  * do_jobctl_trap - take care of ptrace jobctl traps
2059  *
2060  * When PT_SEIZED, it's used for both group stop and explicit
2061  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2062  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2063  * the stop signal; otherwise, %SIGTRAP.
2064  *
2065  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2066  * number as exit_code and no siginfo.
2067  *
2068  * CONTEXT:
2069  * Must be called with @current->sighand->siglock held, which may be
2070  * released and re-acquired before returning with intervening sleep.
2071  */
2072 static void do_jobctl_trap(void)
2073 {
2074 	struct signal_struct *signal = current->signal;
2075 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2076 
2077 	if (current->ptrace & PT_SEIZED) {
2078 		if (!signal->group_stop_count &&
2079 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2080 			signr = SIGTRAP;
2081 		WARN_ON_ONCE(!signr);
2082 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2083 				 CLD_STOPPED);
2084 	} else {
2085 		WARN_ON_ONCE(!signr);
2086 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2087 		current->exit_code = 0;
2088 	}
2089 }
2090 
2091 static int ptrace_signal(int signr, siginfo_t *info,
2092 			 struct pt_regs *regs, void *cookie)
2093 {
2094 	ptrace_signal_deliver(regs, cookie);
2095 	/*
2096 	 * We do not check sig_kernel_stop(signr) but set this marker
2097 	 * unconditionally because we do not know whether debugger will
2098 	 * change signr. This flag has no meaning unless we are going
2099 	 * to stop after return from ptrace_stop(). In this case it will
2100 	 * be checked in do_signal_stop(), we should only stop if it was
2101 	 * not cleared by SIGCONT while we were sleeping. See also the
2102 	 * comment in dequeue_signal().
2103 	 */
2104 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2105 	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2106 
2107 	/* We're back.  Did the debugger cancel the sig?  */
2108 	signr = current->exit_code;
2109 	if (signr == 0)
2110 		return signr;
2111 
2112 	current->exit_code = 0;
2113 
2114 	/*
2115 	 * Update the siginfo structure if the signal has
2116 	 * changed.  If the debugger wanted something
2117 	 * specific in the siginfo structure then it should
2118 	 * have updated *info via PTRACE_SETSIGINFO.
2119 	 */
2120 	if (signr != info->si_signo) {
2121 		info->si_signo = signr;
2122 		info->si_errno = 0;
2123 		info->si_code = SI_USER;
2124 		info->si_pid = task_pid_vnr(current->parent);
2125 		info->si_uid = task_uid(current->parent);
2126 	}
2127 
2128 	/* If the (new) signal is now blocked, requeue it.  */
2129 	if (sigismember(&current->blocked, signr)) {
2130 		specific_send_sig_info(signr, info, current);
2131 		signr = 0;
2132 	}
2133 
2134 	return signr;
2135 }
2136 
2137 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2138 			  struct pt_regs *regs, void *cookie)
2139 {
2140 	struct sighand_struct *sighand = current->sighand;
2141 	struct signal_struct *signal = current->signal;
2142 	int signr;
2143 
2144 relock:
2145 	/*
2146 	 * We'll jump back here after any time we were stopped in TASK_STOPPED.
2147 	 * While in TASK_STOPPED, we were considered "frozen enough".
2148 	 * Now that we woke up, it's crucial if we're supposed to be
2149 	 * frozen that we freeze now before running anything substantial.
2150 	 */
2151 	try_to_freeze();
2152 
2153 	spin_lock_irq(&sighand->siglock);
2154 	/*
2155 	 * Every stopped thread goes here after wakeup. Check to see if
2156 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2157 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2158 	 */
2159 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2160 		int why;
2161 
2162 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2163 			why = CLD_CONTINUED;
2164 		else
2165 			why = CLD_STOPPED;
2166 
2167 		signal->flags &= ~SIGNAL_CLD_MASK;
2168 
2169 		spin_unlock_irq(&sighand->siglock);
2170 
2171 		/*
2172 		 * Notify the parent that we're continuing.  This event is
2173 		 * always per-process and doesn't make whole lot of sense
2174 		 * for ptracers, who shouldn't consume the state via
2175 		 * wait(2) either, but, for backward compatibility, notify
2176 		 * the ptracer of the group leader too unless it's gonna be
2177 		 * a duplicate.
2178 		 */
2179 		read_lock(&tasklist_lock);
2180 		do_notify_parent_cldstop(current, false, why);
2181 
2182 		if (ptrace_reparented(current->group_leader))
2183 			do_notify_parent_cldstop(current->group_leader,
2184 						true, why);
2185 		read_unlock(&tasklist_lock);
2186 
2187 		goto relock;
2188 	}
2189 
2190 	for (;;) {
2191 		struct k_sigaction *ka;
2192 
2193 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2194 		    do_signal_stop(0))
2195 			goto relock;
2196 
2197 		if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2198 			do_jobctl_trap();
2199 			spin_unlock_irq(&sighand->siglock);
2200 			goto relock;
2201 		}
2202 
2203 		signr = dequeue_signal(current, &current->blocked, info);
2204 
2205 		if (!signr)
2206 			break; /* will return 0 */
2207 
2208 		if (unlikely(current->ptrace) && signr != SIGKILL) {
2209 			signr = ptrace_signal(signr, info,
2210 					      regs, cookie);
2211 			if (!signr)
2212 				continue;
2213 		}
2214 
2215 		ka = &sighand->action[signr-1];
2216 
2217 		/* Trace actually delivered signals. */
2218 		trace_signal_deliver(signr, info, ka);
2219 
2220 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2221 			continue;
2222 		if (ka->sa.sa_handler != SIG_DFL) {
2223 			/* Run the handler.  */
2224 			*return_ka = *ka;
2225 
2226 			if (ka->sa.sa_flags & SA_ONESHOT)
2227 				ka->sa.sa_handler = SIG_DFL;
2228 
2229 			break; /* will return non-zero "signr" value */
2230 		}
2231 
2232 		/*
2233 		 * Now we are doing the default action for this signal.
2234 		 */
2235 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2236 			continue;
2237 
2238 		/*
2239 		 * Global init gets no signals it doesn't want.
2240 		 * Container-init gets no signals it doesn't want from same
2241 		 * container.
2242 		 *
2243 		 * Note that if global/container-init sees a sig_kernel_only()
2244 		 * signal here, the signal must have been generated internally
2245 		 * or must have come from an ancestor namespace. In either
2246 		 * case, the signal cannot be dropped.
2247 		 */
2248 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2249 				!sig_kernel_only(signr))
2250 			continue;
2251 
2252 		if (sig_kernel_stop(signr)) {
2253 			/*
2254 			 * The default action is to stop all threads in
2255 			 * the thread group.  The job control signals
2256 			 * do nothing in an orphaned pgrp, but SIGSTOP
2257 			 * always works.  Note that siglock needs to be
2258 			 * dropped during the call to is_orphaned_pgrp()
2259 			 * because of lock ordering with tasklist_lock.
2260 			 * This allows an intervening SIGCONT to be posted.
2261 			 * We need to check for that and bail out if necessary.
2262 			 */
2263 			if (signr != SIGSTOP) {
2264 				spin_unlock_irq(&sighand->siglock);
2265 
2266 				/* signals can be posted during this window */
2267 
2268 				if (is_current_pgrp_orphaned())
2269 					goto relock;
2270 
2271 				spin_lock_irq(&sighand->siglock);
2272 			}
2273 
2274 			if (likely(do_signal_stop(info->si_signo))) {
2275 				/* It released the siglock.  */
2276 				goto relock;
2277 			}
2278 
2279 			/*
2280 			 * We didn't actually stop, due to a race
2281 			 * with SIGCONT or something like that.
2282 			 */
2283 			continue;
2284 		}
2285 
2286 		spin_unlock_irq(&sighand->siglock);
2287 
2288 		/*
2289 		 * Anything else is fatal, maybe with a core dump.
2290 		 */
2291 		current->flags |= PF_SIGNALED;
2292 
2293 		if (sig_kernel_coredump(signr)) {
2294 			if (print_fatal_signals)
2295 				print_fatal_signal(regs, info->si_signo);
2296 			/*
2297 			 * If it was able to dump core, this kills all
2298 			 * other threads in the group and synchronizes with
2299 			 * their demise.  If we lost the race with another
2300 			 * thread getting here, it set group_exit_code
2301 			 * first and our do_group_exit call below will use
2302 			 * that value and ignore the one we pass it.
2303 			 */
2304 			do_coredump(info->si_signo, info->si_signo, regs);
2305 		}
2306 
2307 		/*
2308 		 * Death signals, no core dump.
2309 		 */
2310 		do_group_exit(info->si_signo);
2311 		/* NOTREACHED */
2312 	}
2313 	spin_unlock_irq(&sighand->siglock);
2314 	return signr;
2315 }
2316 
2317 /*
2318  * It could be that complete_signal() picked us to notify about the
2319  * group-wide signal. Other threads should be notified now to take
2320  * the shared signals in @which since we will not.
2321  */
2322 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2323 {
2324 	sigset_t retarget;
2325 	struct task_struct *t;
2326 
2327 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2328 	if (sigisemptyset(&retarget))
2329 		return;
2330 
2331 	t = tsk;
2332 	while_each_thread(tsk, t) {
2333 		if (t->flags & PF_EXITING)
2334 			continue;
2335 
2336 		if (!has_pending_signals(&retarget, &t->blocked))
2337 			continue;
2338 		/* Remove the signals this thread can handle. */
2339 		sigandsets(&retarget, &retarget, &t->blocked);
2340 
2341 		if (!signal_pending(t))
2342 			signal_wake_up(t, 0);
2343 
2344 		if (sigisemptyset(&retarget))
2345 			break;
2346 	}
2347 }
2348 
2349 void exit_signals(struct task_struct *tsk)
2350 {
2351 	int group_stop = 0;
2352 	sigset_t unblocked;
2353 
2354 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2355 		tsk->flags |= PF_EXITING;
2356 		return;
2357 	}
2358 
2359 	spin_lock_irq(&tsk->sighand->siglock);
2360 	/*
2361 	 * From now this task is not visible for group-wide signals,
2362 	 * see wants_signal(), do_signal_stop().
2363 	 */
2364 	tsk->flags |= PF_EXITING;
2365 	if (!signal_pending(tsk))
2366 		goto out;
2367 
2368 	unblocked = tsk->blocked;
2369 	signotset(&unblocked);
2370 	retarget_shared_pending(tsk, &unblocked);
2371 
2372 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2373 	    task_participate_group_stop(tsk))
2374 		group_stop = CLD_STOPPED;
2375 out:
2376 	spin_unlock_irq(&tsk->sighand->siglock);
2377 
2378 	/*
2379 	 * If group stop has completed, deliver the notification.  This
2380 	 * should always go to the real parent of the group leader.
2381 	 */
2382 	if (unlikely(group_stop)) {
2383 		read_lock(&tasklist_lock);
2384 		do_notify_parent_cldstop(tsk, false, group_stop);
2385 		read_unlock(&tasklist_lock);
2386 	}
2387 }
2388 
2389 EXPORT_SYMBOL(recalc_sigpending);
2390 EXPORT_SYMBOL_GPL(dequeue_signal);
2391 EXPORT_SYMBOL(flush_signals);
2392 EXPORT_SYMBOL(force_sig);
2393 EXPORT_SYMBOL(send_sig);
2394 EXPORT_SYMBOL(send_sig_info);
2395 EXPORT_SYMBOL(sigprocmask);
2396 EXPORT_SYMBOL(block_all_signals);
2397 EXPORT_SYMBOL(unblock_all_signals);
2398 
2399 
2400 /*
2401  * System call entry points.
2402  */
2403 
2404 /**
2405  *  sys_restart_syscall - restart a system call
2406  */
2407 SYSCALL_DEFINE0(restart_syscall)
2408 {
2409 	struct restart_block *restart = &current_thread_info()->restart_block;
2410 	return restart->fn(restart);
2411 }
2412 
2413 long do_no_restart_syscall(struct restart_block *param)
2414 {
2415 	return -EINTR;
2416 }
2417 
2418 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2419 {
2420 	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2421 		sigset_t newblocked;
2422 		/* A set of now blocked but previously unblocked signals. */
2423 		sigandnsets(&newblocked, newset, &current->blocked);
2424 		retarget_shared_pending(tsk, &newblocked);
2425 	}
2426 	tsk->blocked = *newset;
2427 	recalc_sigpending();
2428 }
2429 
2430 /**
2431  * set_current_blocked - change current->blocked mask
2432  * @newset: new mask
2433  *
2434  * It is wrong to change ->blocked directly, this helper should be used
2435  * to ensure the process can't miss a shared signal we are going to block.
2436  */
2437 void set_current_blocked(const sigset_t *newset)
2438 {
2439 	struct task_struct *tsk = current;
2440 
2441 	spin_lock_irq(&tsk->sighand->siglock);
2442 	__set_task_blocked(tsk, newset);
2443 	spin_unlock_irq(&tsk->sighand->siglock);
2444 }
2445 
2446 /*
2447  * This is also useful for kernel threads that want to temporarily
2448  * (or permanently) block certain signals.
2449  *
2450  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2451  * interface happily blocks "unblockable" signals like SIGKILL
2452  * and friends.
2453  */
2454 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2455 {
2456 	struct task_struct *tsk = current;
2457 	sigset_t newset;
2458 
2459 	/* Lockless, only current can change ->blocked, never from irq */
2460 	if (oldset)
2461 		*oldset = tsk->blocked;
2462 
2463 	switch (how) {
2464 	case SIG_BLOCK:
2465 		sigorsets(&newset, &tsk->blocked, set);
2466 		break;
2467 	case SIG_UNBLOCK:
2468 		sigandnsets(&newset, &tsk->blocked, set);
2469 		break;
2470 	case SIG_SETMASK:
2471 		newset = *set;
2472 		break;
2473 	default:
2474 		return -EINVAL;
2475 	}
2476 
2477 	set_current_blocked(&newset);
2478 	return 0;
2479 }
2480 
2481 /**
2482  *  sys_rt_sigprocmask - change the list of currently blocked signals
2483  *  @how: whether to add, remove, or set signals
2484  *  @nset: stores pending signals
2485  *  @oset: previous value of signal mask if non-null
2486  *  @sigsetsize: size of sigset_t type
2487  */
2488 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2489 		sigset_t __user *, oset, size_t, sigsetsize)
2490 {
2491 	sigset_t old_set, new_set;
2492 	int error;
2493 
2494 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2495 	if (sigsetsize != sizeof(sigset_t))
2496 		return -EINVAL;
2497 
2498 	old_set = current->blocked;
2499 
2500 	if (nset) {
2501 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2502 			return -EFAULT;
2503 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2504 
2505 		error = sigprocmask(how, &new_set, NULL);
2506 		if (error)
2507 			return error;
2508 	}
2509 
2510 	if (oset) {
2511 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2512 			return -EFAULT;
2513 	}
2514 
2515 	return 0;
2516 }
2517 
2518 long do_sigpending(void __user *set, unsigned long sigsetsize)
2519 {
2520 	long error = -EINVAL;
2521 	sigset_t pending;
2522 
2523 	if (sigsetsize > sizeof(sigset_t))
2524 		goto out;
2525 
2526 	spin_lock_irq(&current->sighand->siglock);
2527 	sigorsets(&pending, &current->pending.signal,
2528 		  &current->signal->shared_pending.signal);
2529 	spin_unlock_irq(&current->sighand->siglock);
2530 
2531 	/* Outside the lock because only this thread touches it.  */
2532 	sigandsets(&pending, &current->blocked, &pending);
2533 
2534 	error = -EFAULT;
2535 	if (!copy_to_user(set, &pending, sigsetsize))
2536 		error = 0;
2537 
2538 out:
2539 	return error;
2540 }
2541 
2542 /**
2543  *  sys_rt_sigpending - examine a pending signal that has been raised
2544  *			while blocked
2545  *  @set: stores pending signals
2546  *  @sigsetsize: size of sigset_t type or larger
2547  */
2548 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2549 {
2550 	return do_sigpending(set, sigsetsize);
2551 }
2552 
2553 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2554 
2555 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2556 {
2557 	int err;
2558 
2559 	if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2560 		return -EFAULT;
2561 	if (from->si_code < 0)
2562 		return __copy_to_user(to, from, sizeof(siginfo_t))
2563 			? -EFAULT : 0;
2564 	/*
2565 	 * If you change siginfo_t structure, please be sure
2566 	 * this code is fixed accordingly.
2567 	 * Please remember to update the signalfd_copyinfo() function
2568 	 * inside fs/signalfd.c too, in case siginfo_t changes.
2569 	 * It should never copy any pad contained in the structure
2570 	 * to avoid security leaks, but must copy the generic
2571 	 * 3 ints plus the relevant union member.
2572 	 */
2573 	err = __put_user(from->si_signo, &to->si_signo);
2574 	err |= __put_user(from->si_errno, &to->si_errno);
2575 	err |= __put_user((short)from->si_code, &to->si_code);
2576 	switch (from->si_code & __SI_MASK) {
2577 	case __SI_KILL:
2578 		err |= __put_user(from->si_pid, &to->si_pid);
2579 		err |= __put_user(from->si_uid, &to->si_uid);
2580 		break;
2581 	case __SI_TIMER:
2582 		 err |= __put_user(from->si_tid, &to->si_tid);
2583 		 err |= __put_user(from->si_overrun, &to->si_overrun);
2584 		 err |= __put_user(from->si_ptr, &to->si_ptr);
2585 		break;
2586 	case __SI_POLL:
2587 		err |= __put_user(from->si_band, &to->si_band);
2588 		err |= __put_user(from->si_fd, &to->si_fd);
2589 		break;
2590 	case __SI_FAULT:
2591 		err |= __put_user(from->si_addr, &to->si_addr);
2592 #ifdef __ARCH_SI_TRAPNO
2593 		err |= __put_user(from->si_trapno, &to->si_trapno);
2594 #endif
2595 #ifdef BUS_MCEERR_AO
2596 		/*
2597 		 * Other callers might not initialize the si_lsb field,
2598 		 * so check explicitly for the right codes here.
2599 		 */
2600 		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2601 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2602 #endif
2603 		break;
2604 	case __SI_CHLD:
2605 		err |= __put_user(from->si_pid, &to->si_pid);
2606 		err |= __put_user(from->si_uid, &to->si_uid);
2607 		err |= __put_user(from->si_status, &to->si_status);
2608 		err |= __put_user(from->si_utime, &to->si_utime);
2609 		err |= __put_user(from->si_stime, &to->si_stime);
2610 		break;
2611 	case __SI_RT: /* This is not generated by the kernel as of now. */
2612 	case __SI_MESGQ: /* But this is */
2613 		err |= __put_user(from->si_pid, &to->si_pid);
2614 		err |= __put_user(from->si_uid, &to->si_uid);
2615 		err |= __put_user(from->si_ptr, &to->si_ptr);
2616 		break;
2617 	default: /* this is just in case for now ... */
2618 		err |= __put_user(from->si_pid, &to->si_pid);
2619 		err |= __put_user(from->si_uid, &to->si_uid);
2620 		break;
2621 	}
2622 	return err;
2623 }
2624 
2625 #endif
2626 
2627 /**
2628  *  do_sigtimedwait - wait for queued signals specified in @which
2629  *  @which: queued signals to wait for
2630  *  @info: if non-null, the signal's siginfo is returned here
2631  *  @ts: upper bound on process time suspension
2632  */
2633 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2634 			const struct timespec *ts)
2635 {
2636 	struct task_struct *tsk = current;
2637 	long timeout = MAX_SCHEDULE_TIMEOUT;
2638 	sigset_t mask = *which;
2639 	int sig;
2640 
2641 	if (ts) {
2642 		if (!timespec_valid(ts))
2643 			return -EINVAL;
2644 		timeout = timespec_to_jiffies(ts);
2645 		/*
2646 		 * We can be close to the next tick, add another one
2647 		 * to ensure we will wait at least the time asked for.
2648 		 */
2649 		if (ts->tv_sec || ts->tv_nsec)
2650 			timeout++;
2651 	}
2652 
2653 	/*
2654 	 * Invert the set of allowed signals to get those we want to block.
2655 	 */
2656 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2657 	signotset(&mask);
2658 
2659 	spin_lock_irq(&tsk->sighand->siglock);
2660 	sig = dequeue_signal(tsk, &mask, info);
2661 	if (!sig && timeout) {
2662 		/*
2663 		 * None ready, temporarily unblock those we're interested
2664 		 * while we are sleeping in so that we'll be awakened when
2665 		 * they arrive. Unblocking is always fine, we can avoid
2666 		 * set_current_blocked().
2667 		 */
2668 		tsk->real_blocked = tsk->blocked;
2669 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2670 		recalc_sigpending();
2671 		spin_unlock_irq(&tsk->sighand->siglock);
2672 
2673 		timeout = schedule_timeout_interruptible(timeout);
2674 
2675 		spin_lock_irq(&tsk->sighand->siglock);
2676 		__set_task_blocked(tsk, &tsk->real_blocked);
2677 		siginitset(&tsk->real_blocked, 0);
2678 		sig = dequeue_signal(tsk, &mask, info);
2679 	}
2680 	spin_unlock_irq(&tsk->sighand->siglock);
2681 
2682 	if (sig)
2683 		return sig;
2684 	return timeout ? -EINTR : -EAGAIN;
2685 }
2686 
2687 /**
2688  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
2689  *			in @uthese
2690  *  @uthese: queued signals to wait for
2691  *  @uinfo: if non-null, the signal's siginfo is returned here
2692  *  @uts: upper bound on process time suspension
2693  *  @sigsetsize: size of sigset_t type
2694  */
2695 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2696 		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2697 		size_t, sigsetsize)
2698 {
2699 	sigset_t these;
2700 	struct timespec ts;
2701 	siginfo_t info;
2702 	int ret;
2703 
2704 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2705 	if (sigsetsize != sizeof(sigset_t))
2706 		return -EINVAL;
2707 
2708 	if (copy_from_user(&these, uthese, sizeof(these)))
2709 		return -EFAULT;
2710 
2711 	if (uts) {
2712 		if (copy_from_user(&ts, uts, sizeof(ts)))
2713 			return -EFAULT;
2714 	}
2715 
2716 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2717 
2718 	if (ret > 0 && uinfo) {
2719 		if (copy_siginfo_to_user(uinfo, &info))
2720 			ret = -EFAULT;
2721 	}
2722 
2723 	return ret;
2724 }
2725 
2726 /**
2727  *  sys_kill - send a signal to a process
2728  *  @pid: the PID of the process
2729  *  @sig: signal to be sent
2730  */
2731 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2732 {
2733 	struct siginfo info;
2734 
2735 	info.si_signo = sig;
2736 	info.si_errno = 0;
2737 	info.si_code = SI_USER;
2738 	info.si_pid = task_tgid_vnr(current);
2739 	info.si_uid = current_uid();
2740 
2741 	return kill_something_info(sig, &info, pid);
2742 }
2743 
2744 static int
2745 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2746 {
2747 	struct task_struct *p;
2748 	int error = -ESRCH;
2749 
2750 	rcu_read_lock();
2751 	p = find_task_by_vpid(pid);
2752 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2753 		error = check_kill_permission(sig, info, p);
2754 		/*
2755 		 * The null signal is a permissions and process existence
2756 		 * probe.  No signal is actually delivered.
2757 		 */
2758 		if (!error && sig) {
2759 			error = do_send_sig_info(sig, info, p, false);
2760 			/*
2761 			 * If lock_task_sighand() failed we pretend the task
2762 			 * dies after receiving the signal. The window is tiny,
2763 			 * and the signal is private anyway.
2764 			 */
2765 			if (unlikely(error == -ESRCH))
2766 				error = 0;
2767 		}
2768 	}
2769 	rcu_read_unlock();
2770 
2771 	return error;
2772 }
2773 
2774 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2775 {
2776 	struct siginfo info;
2777 
2778 	info.si_signo = sig;
2779 	info.si_errno = 0;
2780 	info.si_code = SI_TKILL;
2781 	info.si_pid = task_tgid_vnr(current);
2782 	info.si_uid = current_uid();
2783 
2784 	return do_send_specific(tgid, pid, sig, &info);
2785 }
2786 
2787 /**
2788  *  sys_tgkill - send signal to one specific thread
2789  *  @tgid: the thread group ID of the thread
2790  *  @pid: the PID of the thread
2791  *  @sig: signal to be sent
2792  *
2793  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2794  *  exists but it's not belonging to the target process anymore. This
2795  *  method solves the problem of threads exiting and PIDs getting reused.
2796  */
2797 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2798 {
2799 	/* This is only valid for single tasks */
2800 	if (pid <= 0 || tgid <= 0)
2801 		return -EINVAL;
2802 
2803 	return do_tkill(tgid, pid, sig);
2804 }
2805 
2806 /**
2807  *  sys_tkill - send signal to one specific task
2808  *  @pid: the PID of the task
2809  *  @sig: signal to be sent
2810  *
2811  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2812  */
2813 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2814 {
2815 	/* This is only valid for single tasks */
2816 	if (pid <= 0)
2817 		return -EINVAL;
2818 
2819 	return do_tkill(0, pid, sig);
2820 }
2821 
2822 /**
2823  *  sys_rt_sigqueueinfo - send signal information to a signal
2824  *  @pid: the PID of the thread
2825  *  @sig: signal to be sent
2826  *  @uinfo: signal info to be sent
2827  */
2828 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2829 		siginfo_t __user *, uinfo)
2830 {
2831 	siginfo_t info;
2832 
2833 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2834 		return -EFAULT;
2835 
2836 	/* Not even root can pretend to send signals from the kernel.
2837 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2838 	 */
2839 	if (info.si_code >= 0 || info.si_code == SI_TKILL) {
2840 		/* We used to allow any < 0 si_code */
2841 		WARN_ON_ONCE(info.si_code < 0);
2842 		return -EPERM;
2843 	}
2844 	info.si_signo = sig;
2845 
2846 	/* POSIX.1b doesn't mention process groups.  */
2847 	return kill_proc_info(sig, &info, pid);
2848 }
2849 
2850 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2851 {
2852 	/* This is only valid for single tasks */
2853 	if (pid <= 0 || tgid <= 0)
2854 		return -EINVAL;
2855 
2856 	/* Not even root can pretend to send signals from the kernel.
2857 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2858 	 */
2859 	if (info->si_code >= 0 || info->si_code == SI_TKILL) {
2860 		/* We used to allow any < 0 si_code */
2861 		WARN_ON_ONCE(info->si_code < 0);
2862 		return -EPERM;
2863 	}
2864 	info->si_signo = sig;
2865 
2866 	return do_send_specific(tgid, pid, sig, info);
2867 }
2868 
2869 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2870 		siginfo_t __user *, uinfo)
2871 {
2872 	siginfo_t info;
2873 
2874 	if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2875 		return -EFAULT;
2876 
2877 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2878 }
2879 
2880 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2881 {
2882 	struct task_struct *t = current;
2883 	struct k_sigaction *k;
2884 	sigset_t mask;
2885 
2886 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2887 		return -EINVAL;
2888 
2889 	k = &t->sighand->action[sig-1];
2890 
2891 	spin_lock_irq(&current->sighand->siglock);
2892 	if (oact)
2893 		*oact = *k;
2894 
2895 	if (act) {
2896 		sigdelsetmask(&act->sa.sa_mask,
2897 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
2898 		*k = *act;
2899 		/*
2900 		 * POSIX 3.3.1.3:
2901 		 *  "Setting a signal action to SIG_IGN for a signal that is
2902 		 *   pending shall cause the pending signal to be discarded,
2903 		 *   whether or not it is blocked."
2904 		 *
2905 		 *  "Setting a signal action to SIG_DFL for a signal that is
2906 		 *   pending and whose default action is to ignore the signal
2907 		 *   (for example, SIGCHLD), shall cause the pending signal to
2908 		 *   be discarded, whether or not it is blocked"
2909 		 */
2910 		if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2911 			sigemptyset(&mask);
2912 			sigaddset(&mask, sig);
2913 			rm_from_queue_full(&mask, &t->signal->shared_pending);
2914 			do {
2915 				rm_from_queue_full(&mask, &t->pending);
2916 				t = next_thread(t);
2917 			} while (t != current);
2918 		}
2919 	}
2920 
2921 	spin_unlock_irq(&current->sighand->siglock);
2922 	return 0;
2923 }
2924 
2925 int
2926 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2927 {
2928 	stack_t oss;
2929 	int error;
2930 
2931 	oss.ss_sp = (void __user *) current->sas_ss_sp;
2932 	oss.ss_size = current->sas_ss_size;
2933 	oss.ss_flags = sas_ss_flags(sp);
2934 
2935 	if (uss) {
2936 		void __user *ss_sp;
2937 		size_t ss_size;
2938 		int ss_flags;
2939 
2940 		error = -EFAULT;
2941 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2942 			goto out;
2943 		error = __get_user(ss_sp, &uss->ss_sp) |
2944 			__get_user(ss_flags, &uss->ss_flags) |
2945 			__get_user(ss_size, &uss->ss_size);
2946 		if (error)
2947 			goto out;
2948 
2949 		error = -EPERM;
2950 		if (on_sig_stack(sp))
2951 			goto out;
2952 
2953 		error = -EINVAL;
2954 		/*
2955 		 * Note - this code used to test ss_flags incorrectly:
2956 		 *  	  old code may have been written using ss_flags==0
2957 		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
2958 		 *	  way that worked) - this fix preserves that older
2959 		 *	  mechanism.
2960 		 */
2961 		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2962 			goto out;
2963 
2964 		if (ss_flags == SS_DISABLE) {
2965 			ss_size = 0;
2966 			ss_sp = NULL;
2967 		} else {
2968 			error = -ENOMEM;
2969 			if (ss_size < MINSIGSTKSZ)
2970 				goto out;
2971 		}
2972 
2973 		current->sas_ss_sp = (unsigned long) ss_sp;
2974 		current->sas_ss_size = ss_size;
2975 	}
2976 
2977 	error = 0;
2978 	if (uoss) {
2979 		error = -EFAULT;
2980 		if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2981 			goto out;
2982 		error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2983 			__put_user(oss.ss_size, &uoss->ss_size) |
2984 			__put_user(oss.ss_flags, &uoss->ss_flags);
2985 	}
2986 
2987 out:
2988 	return error;
2989 }
2990 
2991 #ifdef __ARCH_WANT_SYS_SIGPENDING
2992 
2993 /**
2994  *  sys_sigpending - examine pending signals
2995  *  @set: where mask of pending signal is returned
2996  */
2997 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2998 {
2999 	return do_sigpending(set, sizeof(*set));
3000 }
3001 
3002 #endif
3003 
3004 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3005 /**
3006  *  sys_sigprocmask - examine and change blocked signals
3007  *  @how: whether to add, remove, or set signals
3008  *  @nset: signals to add or remove (if non-null)
3009  *  @oset: previous value of signal mask if non-null
3010  *
3011  * Some platforms have their own version with special arguments;
3012  * others support only sys_rt_sigprocmask.
3013  */
3014 
3015 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3016 		old_sigset_t __user *, oset)
3017 {
3018 	old_sigset_t old_set, new_set;
3019 	sigset_t new_blocked;
3020 
3021 	old_set = current->blocked.sig[0];
3022 
3023 	if (nset) {
3024 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
3025 			return -EFAULT;
3026 		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
3027 
3028 		new_blocked = current->blocked;
3029 
3030 		switch (how) {
3031 		case SIG_BLOCK:
3032 			sigaddsetmask(&new_blocked, new_set);
3033 			break;
3034 		case SIG_UNBLOCK:
3035 			sigdelsetmask(&new_blocked, new_set);
3036 			break;
3037 		case SIG_SETMASK:
3038 			new_blocked.sig[0] = new_set;
3039 			break;
3040 		default:
3041 			return -EINVAL;
3042 		}
3043 
3044 		set_current_blocked(&new_blocked);
3045 	}
3046 
3047 	if (oset) {
3048 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
3049 			return -EFAULT;
3050 	}
3051 
3052 	return 0;
3053 }
3054 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3055 
3056 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
3057 /**
3058  *  sys_rt_sigaction - alter an action taken by a process
3059  *  @sig: signal to be sent
3060  *  @act: new sigaction
3061  *  @oact: used to save the previous sigaction
3062  *  @sigsetsize: size of sigset_t type
3063  */
3064 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3065 		const struct sigaction __user *, act,
3066 		struct sigaction __user *, oact,
3067 		size_t, sigsetsize)
3068 {
3069 	struct k_sigaction new_sa, old_sa;
3070 	int ret = -EINVAL;
3071 
3072 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3073 	if (sigsetsize != sizeof(sigset_t))
3074 		goto out;
3075 
3076 	if (act) {
3077 		if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3078 			return -EFAULT;
3079 	}
3080 
3081 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3082 
3083 	if (!ret && oact) {
3084 		if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3085 			return -EFAULT;
3086 	}
3087 out:
3088 	return ret;
3089 }
3090 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
3091 
3092 #ifdef __ARCH_WANT_SYS_SGETMASK
3093 
3094 /*
3095  * For backwards compatibility.  Functionality superseded by sigprocmask.
3096  */
3097 SYSCALL_DEFINE0(sgetmask)
3098 {
3099 	/* SMP safe */
3100 	return current->blocked.sig[0];
3101 }
3102 
3103 SYSCALL_DEFINE1(ssetmask, int, newmask)
3104 {
3105 	int old;
3106 
3107 	spin_lock_irq(&current->sighand->siglock);
3108 	old = current->blocked.sig[0];
3109 
3110 	siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
3111 						  sigmask(SIGSTOP)));
3112 	recalc_sigpending();
3113 	spin_unlock_irq(&current->sighand->siglock);
3114 
3115 	return old;
3116 }
3117 #endif /* __ARCH_WANT_SGETMASK */
3118 
3119 #ifdef __ARCH_WANT_SYS_SIGNAL
3120 /*
3121  * For backwards compatibility.  Functionality superseded by sigaction.
3122  */
3123 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3124 {
3125 	struct k_sigaction new_sa, old_sa;
3126 	int ret;
3127 
3128 	new_sa.sa.sa_handler = handler;
3129 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3130 	sigemptyset(&new_sa.sa.sa_mask);
3131 
3132 	ret = do_sigaction(sig, &new_sa, &old_sa);
3133 
3134 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3135 }
3136 #endif /* __ARCH_WANT_SYS_SIGNAL */
3137 
3138 #ifdef __ARCH_WANT_SYS_PAUSE
3139 
3140 SYSCALL_DEFINE0(pause)
3141 {
3142 	while (!signal_pending(current)) {
3143 		current->state = TASK_INTERRUPTIBLE;
3144 		schedule();
3145 	}
3146 	return -ERESTARTNOHAND;
3147 }
3148 
3149 #endif
3150 
3151 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
3152 /**
3153  *  sys_rt_sigsuspend - replace the signal mask for a value with the
3154  *	@unewset value until a signal is received
3155  *  @unewset: new signal mask value
3156  *  @sigsetsize: size of sigset_t type
3157  */
3158 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3159 {
3160 	sigset_t newset;
3161 
3162 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3163 	if (sigsetsize != sizeof(sigset_t))
3164 		return -EINVAL;
3165 
3166 	if (copy_from_user(&newset, unewset, sizeof(newset)))
3167 		return -EFAULT;
3168 	sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3169 
3170 	spin_lock_irq(&current->sighand->siglock);
3171 	current->saved_sigmask = current->blocked;
3172 	current->blocked = newset;
3173 	recalc_sigpending();
3174 	spin_unlock_irq(&current->sighand->siglock);
3175 
3176 	current->state = TASK_INTERRUPTIBLE;
3177 	schedule();
3178 	set_restore_sigmask();
3179 	return -ERESTARTNOHAND;
3180 }
3181 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
3182 
3183 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
3184 {
3185 	return NULL;
3186 }
3187 
3188 void __init signals_init(void)
3189 {
3190 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3191 }
3192 
3193 #ifdef CONFIG_KGDB_KDB
3194 #include <linux/kdb.h>
3195 /*
3196  * kdb_send_sig_info - Allows kdb to send signals without exposing
3197  * signal internals.  This function checks if the required locks are
3198  * available before calling the main signal code, to avoid kdb
3199  * deadlocks.
3200  */
3201 void
3202 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3203 {
3204 	static struct task_struct *kdb_prev_t;
3205 	int sig, new_t;
3206 	if (!spin_trylock(&t->sighand->siglock)) {
3207 		kdb_printf("Can't do kill command now.\n"
3208 			   "The sigmask lock is held somewhere else in "
3209 			   "kernel, try again later\n");
3210 		return;
3211 	}
3212 	spin_unlock(&t->sighand->siglock);
3213 	new_t = kdb_prev_t != t;
3214 	kdb_prev_t = t;
3215 	if (t->state != TASK_RUNNING && new_t) {
3216 		kdb_printf("Process is not RUNNING, sending a signal from "
3217 			   "kdb risks deadlock\n"
3218 			   "on the run queue locks. "
3219 			   "The signal has _not_ been sent.\n"
3220 			   "Reissue the kill command if you want to risk "
3221 			   "the deadlock.\n");
3222 		return;
3223 	}
3224 	sig = info->si_signo;
3225 	if (send_sig_info(sig, info, t))
3226 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
3227 			   sig, t->pid);
3228 	else
3229 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3230 }
3231 #endif	/* CONFIG_KGDB_KDB */
3232