xref: /openbmc/linux/kernel/signal.c (revision 752beb5e)
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *		Changes to use preallocated sigqueue structures
10  *		to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/proc_fs.h>
25 #include <linux/tty.h>
26 #include <linux/binfmts.h>
27 #include <linux/coredump.h>
28 #include <linux/security.h>
29 #include <linux/syscalls.h>
30 #include <linux/ptrace.h>
31 #include <linux/signal.h>
32 #include <linux/signalfd.h>
33 #include <linux/ratelimit.h>
34 #include <linux/tracehook.h>
35 #include <linux/capability.h>
36 #include <linux/freezer.h>
37 #include <linux/pid_namespace.h>
38 #include <linux/nsproxy.h>
39 #include <linux/user_namespace.h>
40 #include <linux/uprobes.h>
41 #include <linux/compat.h>
42 #include <linux/cn_proc.h>
43 #include <linux/compiler.h>
44 #include <linux/posix-timers.h>
45 #include <linux/livepatch.h>
46 #include <linux/cgroup.h>
47 
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/signal.h>
50 
51 #include <asm/param.h>
52 #include <linux/uaccess.h>
53 #include <asm/unistd.h>
54 #include <asm/siginfo.h>
55 #include <asm/cacheflush.h>
56 #include "audit.h"	/* audit_signal_info() */
57 
58 /*
59  * SLAB caches for signal bits.
60  */
61 
62 static struct kmem_cache *sigqueue_cachep;
63 
64 int print_fatal_signals __read_mostly;
65 
66 static void __user *sig_handler(struct task_struct *t, int sig)
67 {
68 	return t->sighand->action[sig - 1].sa.sa_handler;
69 }
70 
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
72 {
73 	/* Is it explicitly or implicitly ignored? */
74 	return handler == SIG_IGN ||
75 	       (handler == SIG_DFL && sig_kernel_ignore(sig));
76 }
77 
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
79 {
80 	void __user *handler;
81 
82 	handler = sig_handler(t, sig);
83 
84 	/* SIGKILL and SIGSTOP may not be sent to the global init */
85 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
86 		return true;
87 
88 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
90 		return true;
91 
92 	return sig_handler_ignored(handler, sig);
93 }
94 
95 static bool sig_ignored(struct task_struct *t, int sig, bool force)
96 {
97 	/*
98 	 * Blocked signals are never ignored, since the
99 	 * signal handler may change by the time it is
100 	 * unblocked.
101 	 */
102 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
103 		return false;
104 
105 	/*
106 	 * Tracers may want to know about even ignored signal unless it
107 	 * is SIGKILL which can't be reported anyway but can be ignored
108 	 * by SIGNAL_UNKILLABLE task.
109 	 */
110 	if (t->ptrace && sig != SIGKILL)
111 		return false;
112 
113 	return sig_task_ignored(t, sig, force);
114 }
115 
116 /*
117  * Re-calculate pending state from the set of locally pending
118  * signals, globally pending signals, and blocked signals.
119  */
120 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
121 {
122 	unsigned long ready;
123 	long i;
124 
125 	switch (_NSIG_WORDS) {
126 	default:
127 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
128 			ready |= signal->sig[i] &~ blocked->sig[i];
129 		break;
130 
131 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
132 		ready |= signal->sig[2] &~ blocked->sig[2];
133 		ready |= signal->sig[1] &~ blocked->sig[1];
134 		ready |= signal->sig[0] &~ blocked->sig[0];
135 		break;
136 
137 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
138 		ready |= signal->sig[0] &~ blocked->sig[0];
139 		break;
140 
141 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
142 	}
143 	return ready !=	0;
144 }
145 
146 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
147 
148 static bool recalc_sigpending_tsk(struct task_struct *t)
149 {
150 	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
151 	    PENDING(&t->pending, &t->blocked) ||
152 	    PENDING(&t->signal->shared_pending, &t->blocked) ||
153 	    cgroup_task_frozen(t)) {
154 		set_tsk_thread_flag(t, TIF_SIGPENDING);
155 		return true;
156 	}
157 
158 	/*
159 	 * We must never clear the flag in another thread, or in current
160 	 * when it's possible the current syscall is returning -ERESTART*.
161 	 * So we don't clear it here, and only callers who know they should do.
162 	 */
163 	return false;
164 }
165 
166 /*
167  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
168  * This is superfluous when called on current, the wakeup is a harmless no-op.
169  */
170 void recalc_sigpending_and_wake(struct task_struct *t)
171 {
172 	if (recalc_sigpending_tsk(t))
173 		signal_wake_up(t, 0);
174 }
175 
176 void recalc_sigpending(void)
177 {
178 	if (!recalc_sigpending_tsk(current) && !freezing(current) &&
179 	    !klp_patch_pending(current))
180 		clear_thread_flag(TIF_SIGPENDING);
181 
182 }
183 EXPORT_SYMBOL(recalc_sigpending);
184 
185 void calculate_sigpending(void)
186 {
187 	/* Have any signals or users of TIF_SIGPENDING been delayed
188 	 * until after fork?
189 	 */
190 	spin_lock_irq(&current->sighand->siglock);
191 	set_tsk_thread_flag(current, TIF_SIGPENDING);
192 	recalc_sigpending();
193 	spin_unlock_irq(&current->sighand->siglock);
194 }
195 
196 /* Given the mask, find the first available signal that should be serviced. */
197 
198 #define SYNCHRONOUS_MASK \
199 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
200 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
201 
202 int next_signal(struct sigpending *pending, sigset_t *mask)
203 {
204 	unsigned long i, *s, *m, x;
205 	int sig = 0;
206 
207 	s = pending->signal.sig;
208 	m = mask->sig;
209 
210 	/*
211 	 * Handle the first word specially: it contains the
212 	 * synchronous signals that need to be dequeued first.
213 	 */
214 	x = *s &~ *m;
215 	if (x) {
216 		if (x & SYNCHRONOUS_MASK)
217 			x &= SYNCHRONOUS_MASK;
218 		sig = ffz(~x) + 1;
219 		return sig;
220 	}
221 
222 	switch (_NSIG_WORDS) {
223 	default:
224 		for (i = 1; i < _NSIG_WORDS; ++i) {
225 			x = *++s &~ *++m;
226 			if (!x)
227 				continue;
228 			sig = ffz(~x) + i*_NSIG_BPW + 1;
229 			break;
230 		}
231 		break;
232 
233 	case 2:
234 		x = s[1] &~ m[1];
235 		if (!x)
236 			break;
237 		sig = ffz(~x) + _NSIG_BPW + 1;
238 		break;
239 
240 	case 1:
241 		/* Nothing to do */
242 		break;
243 	}
244 
245 	return sig;
246 }
247 
248 static inline void print_dropped_signal(int sig)
249 {
250 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
251 
252 	if (!print_fatal_signals)
253 		return;
254 
255 	if (!__ratelimit(&ratelimit_state))
256 		return;
257 
258 	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
259 				current->comm, current->pid, sig);
260 }
261 
262 /**
263  * task_set_jobctl_pending - set jobctl pending bits
264  * @task: target task
265  * @mask: pending bits to set
266  *
267  * Clear @mask from @task->jobctl.  @mask must be subset of
268  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
269  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
270  * cleared.  If @task is already being killed or exiting, this function
271  * becomes noop.
272  *
273  * CONTEXT:
274  * Must be called with @task->sighand->siglock held.
275  *
276  * RETURNS:
277  * %true if @mask is set, %false if made noop because @task was dying.
278  */
279 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
280 {
281 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
282 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
283 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
284 
285 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
286 		return false;
287 
288 	if (mask & JOBCTL_STOP_SIGMASK)
289 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
290 
291 	task->jobctl |= mask;
292 	return true;
293 }
294 
295 /**
296  * task_clear_jobctl_trapping - clear jobctl trapping bit
297  * @task: target task
298  *
299  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
300  * Clear it and wake up the ptracer.  Note that we don't need any further
301  * locking.  @task->siglock guarantees that @task->parent points to the
302  * ptracer.
303  *
304  * CONTEXT:
305  * Must be called with @task->sighand->siglock held.
306  */
307 void task_clear_jobctl_trapping(struct task_struct *task)
308 {
309 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
310 		task->jobctl &= ~JOBCTL_TRAPPING;
311 		smp_mb();	/* advised by wake_up_bit() */
312 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
313 	}
314 }
315 
316 /**
317  * task_clear_jobctl_pending - clear jobctl pending bits
318  * @task: target task
319  * @mask: pending bits to clear
320  *
321  * Clear @mask from @task->jobctl.  @mask must be subset of
322  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
323  * STOP bits are cleared together.
324  *
325  * If clearing of @mask leaves no stop or trap pending, this function calls
326  * task_clear_jobctl_trapping().
327  *
328  * CONTEXT:
329  * Must be called with @task->sighand->siglock held.
330  */
331 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
332 {
333 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
334 
335 	if (mask & JOBCTL_STOP_PENDING)
336 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
337 
338 	task->jobctl &= ~mask;
339 
340 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
341 		task_clear_jobctl_trapping(task);
342 }
343 
344 /**
345  * task_participate_group_stop - participate in a group stop
346  * @task: task participating in a group stop
347  *
348  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
349  * Group stop states are cleared and the group stop count is consumed if
350  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
351  * stop, the appropriate %SIGNAL_* flags are set.
352  *
353  * CONTEXT:
354  * Must be called with @task->sighand->siglock held.
355  *
356  * RETURNS:
357  * %true if group stop completion should be notified to the parent, %false
358  * otherwise.
359  */
360 static bool task_participate_group_stop(struct task_struct *task)
361 {
362 	struct signal_struct *sig = task->signal;
363 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
364 
365 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
366 
367 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
368 
369 	if (!consume)
370 		return false;
371 
372 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
373 		sig->group_stop_count--;
374 
375 	/*
376 	 * Tell the caller to notify completion iff we are entering into a
377 	 * fresh group stop.  Read comment in do_signal_stop() for details.
378 	 */
379 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
380 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
381 		return true;
382 	}
383 	return false;
384 }
385 
386 void task_join_group_stop(struct task_struct *task)
387 {
388 	/* Have the new thread join an on-going signal group stop */
389 	unsigned long jobctl = current->jobctl;
390 	if (jobctl & JOBCTL_STOP_PENDING) {
391 		struct signal_struct *sig = current->signal;
392 		unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
393 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
394 		if (task_set_jobctl_pending(task, signr | gstop)) {
395 			sig->group_stop_count++;
396 		}
397 	}
398 }
399 
400 /*
401  * allocate a new signal queue record
402  * - this may be called without locks if and only if t == current, otherwise an
403  *   appropriate lock must be held to stop the target task from exiting
404  */
405 static struct sigqueue *
406 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
407 {
408 	struct sigqueue *q = NULL;
409 	struct user_struct *user;
410 
411 	/*
412 	 * Protect access to @t credentials. This can go away when all
413 	 * callers hold rcu read lock.
414 	 */
415 	rcu_read_lock();
416 	user = get_uid(__task_cred(t)->user);
417 	atomic_inc(&user->sigpending);
418 	rcu_read_unlock();
419 
420 	if (override_rlimit ||
421 	    atomic_read(&user->sigpending) <=
422 			task_rlimit(t, RLIMIT_SIGPENDING)) {
423 		q = kmem_cache_alloc(sigqueue_cachep, flags);
424 	} else {
425 		print_dropped_signal(sig);
426 	}
427 
428 	if (unlikely(q == NULL)) {
429 		atomic_dec(&user->sigpending);
430 		free_uid(user);
431 	} else {
432 		INIT_LIST_HEAD(&q->list);
433 		q->flags = 0;
434 		q->user = user;
435 	}
436 
437 	return q;
438 }
439 
440 static void __sigqueue_free(struct sigqueue *q)
441 {
442 	if (q->flags & SIGQUEUE_PREALLOC)
443 		return;
444 	atomic_dec(&q->user->sigpending);
445 	free_uid(q->user);
446 	kmem_cache_free(sigqueue_cachep, q);
447 }
448 
449 void flush_sigqueue(struct sigpending *queue)
450 {
451 	struct sigqueue *q;
452 
453 	sigemptyset(&queue->signal);
454 	while (!list_empty(&queue->list)) {
455 		q = list_entry(queue->list.next, struct sigqueue , list);
456 		list_del_init(&q->list);
457 		__sigqueue_free(q);
458 	}
459 }
460 
461 /*
462  * Flush all pending signals for this kthread.
463  */
464 void flush_signals(struct task_struct *t)
465 {
466 	unsigned long flags;
467 
468 	spin_lock_irqsave(&t->sighand->siglock, flags);
469 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
470 	flush_sigqueue(&t->pending);
471 	flush_sigqueue(&t->signal->shared_pending);
472 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
473 }
474 EXPORT_SYMBOL(flush_signals);
475 
476 #ifdef CONFIG_POSIX_TIMERS
477 static void __flush_itimer_signals(struct sigpending *pending)
478 {
479 	sigset_t signal, retain;
480 	struct sigqueue *q, *n;
481 
482 	signal = pending->signal;
483 	sigemptyset(&retain);
484 
485 	list_for_each_entry_safe(q, n, &pending->list, list) {
486 		int sig = q->info.si_signo;
487 
488 		if (likely(q->info.si_code != SI_TIMER)) {
489 			sigaddset(&retain, sig);
490 		} else {
491 			sigdelset(&signal, sig);
492 			list_del_init(&q->list);
493 			__sigqueue_free(q);
494 		}
495 	}
496 
497 	sigorsets(&pending->signal, &signal, &retain);
498 }
499 
500 void flush_itimer_signals(void)
501 {
502 	struct task_struct *tsk = current;
503 	unsigned long flags;
504 
505 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
506 	__flush_itimer_signals(&tsk->pending);
507 	__flush_itimer_signals(&tsk->signal->shared_pending);
508 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
509 }
510 #endif
511 
512 void ignore_signals(struct task_struct *t)
513 {
514 	int i;
515 
516 	for (i = 0; i < _NSIG; ++i)
517 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
518 
519 	flush_signals(t);
520 }
521 
522 /*
523  * Flush all handlers for a task.
524  */
525 
526 void
527 flush_signal_handlers(struct task_struct *t, int force_default)
528 {
529 	int i;
530 	struct k_sigaction *ka = &t->sighand->action[0];
531 	for (i = _NSIG ; i != 0 ; i--) {
532 		if (force_default || ka->sa.sa_handler != SIG_IGN)
533 			ka->sa.sa_handler = SIG_DFL;
534 		ka->sa.sa_flags = 0;
535 #ifdef __ARCH_HAS_SA_RESTORER
536 		ka->sa.sa_restorer = NULL;
537 #endif
538 		sigemptyset(&ka->sa.sa_mask);
539 		ka++;
540 	}
541 }
542 
543 bool unhandled_signal(struct task_struct *tsk, int sig)
544 {
545 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
546 	if (is_global_init(tsk))
547 		return true;
548 
549 	if (handler != SIG_IGN && handler != SIG_DFL)
550 		return false;
551 
552 	/* if ptraced, let the tracer determine */
553 	return !tsk->ptrace;
554 }
555 
556 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
557 			   bool *resched_timer)
558 {
559 	struct sigqueue *q, *first = NULL;
560 
561 	/*
562 	 * Collect the siginfo appropriate to this signal.  Check if
563 	 * there is another siginfo for the same signal.
564 	*/
565 	list_for_each_entry(q, &list->list, list) {
566 		if (q->info.si_signo == sig) {
567 			if (first)
568 				goto still_pending;
569 			first = q;
570 		}
571 	}
572 
573 	sigdelset(&list->signal, sig);
574 
575 	if (first) {
576 still_pending:
577 		list_del_init(&first->list);
578 		copy_siginfo(info, &first->info);
579 
580 		*resched_timer =
581 			(first->flags & SIGQUEUE_PREALLOC) &&
582 			(info->si_code == SI_TIMER) &&
583 			(info->si_sys_private);
584 
585 		__sigqueue_free(first);
586 	} else {
587 		/*
588 		 * Ok, it wasn't in the queue.  This must be
589 		 * a fast-pathed signal or we must have been
590 		 * out of queue space.  So zero out the info.
591 		 */
592 		clear_siginfo(info);
593 		info->si_signo = sig;
594 		info->si_errno = 0;
595 		info->si_code = SI_USER;
596 		info->si_pid = 0;
597 		info->si_uid = 0;
598 	}
599 }
600 
601 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
602 			kernel_siginfo_t *info, bool *resched_timer)
603 {
604 	int sig = next_signal(pending, mask);
605 
606 	if (sig)
607 		collect_signal(sig, pending, info, resched_timer);
608 	return sig;
609 }
610 
611 /*
612  * Dequeue a signal and return the element to the caller, which is
613  * expected to free it.
614  *
615  * All callers have to hold the siglock.
616  */
617 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
618 {
619 	bool resched_timer = false;
620 	int signr;
621 
622 	/* We only dequeue private signals from ourselves, we don't let
623 	 * signalfd steal them
624 	 */
625 	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
626 	if (!signr) {
627 		signr = __dequeue_signal(&tsk->signal->shared_pending,
628 					 mask, info, &resched_timer);
629 #ifdef CONFIG_POSIX_TIMERS
630 		/*
631 		 * itimer signal ?
632 		 *
633 		 * itimers are process shared and we restart periodic
634 		 * itimers in the signal delivery path to prevent DoS
635 		 * attacks in the high resolution timer case. This is
636 		 * compliant with the old way of self-restarting
637 		 * itimers, as the SIGALRM is a legacy signal and only
638 		 * queued once. Changing the restart behaviour to
639 		 * restart the timer in the signal dequeue path is
640 		 * reducing the timer noise on heavy loaded !highres
641 		 * systems too.
642 		 */
643 		if (unlikely(signr == SIGALRM)) {
644 			struct hrtimer *tmr = &tsk->signal->real_timer;
645 
646 			if (!hrtimer_is_queued(tmr) &&
647 			    tsk->signal->it_real_incr != 0) {
648 				hrtimer_forward(tmr, tmr->base->get_time(),
649 						tsk->signal->it_real_incr);
650 				hrtimer_restart(tmr);
651 			}
652 		}
653 #endif
654 	}
655 
656 	recalc_sigpending();
657 	if (!signr)
658 		return 0;
659 
660 	if (unlikely(sig_kernel_stop(signr))) {
661 		/*
662 		 * Set a marker that we have dequeued a stop signal.  Our
663 		 * caller might release the siglock and then the pending
664 		 * stop signal it is about to process is no longer in the
665 		 * pending bitmasks, but must still be cleared by a SIGCONT
666 		 * (and overruled by a SIGKILL).  So those cases clear this
667 		 * shared flag after we've set it.  Note that this flag may
668 		 * remain set after the signal we return is ignored or
669 		 * handled.  That doesn't matter because its only purpose
670 		 * is to alert stop-signal processing code when another
671 		 * processor has come along and cleared the flag.
672 		 */
673 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
674 	}
675 #ifdef CONFIG_POSIX_TIMERS
676 	if (resched_timer) {
677 		/*
678 		 * Release the siglock to ensure proper locking order
679 		 * of timer locks outside of siglocks.  Note, we leave
680 		 * irqs disabled here, since the posix-timers code is
681 		 * about to disable them again anyway.
682 		 */
683 		spin_unlock(&tsk->sighand->siglock);
684 		posixtimer_rearm(info);
685 		spin_lock(&tsk->sighand->siglock);
686 
687 		/* Don't expose the si_sys_private value to userspace */
688 		info->si_sys_private = 0;
689 	}
690 #endif
691 	return signr;
692 }
693 EXPORT_SYMBOL_GPL(dequeue_signal);
694 
695 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
696 {
697 	struct task_struct *tsk = current;
698 	struct sigpending *pending = &tsk->pending;
699 	struct sigqueue *q, *sync = NULL;
700 
701 	/*
702 	 * Might a synchronous signal be in the queue?
703 	 */
704 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
705 		return 0;
706 
707 	/*
708 	 * Return the first synchronous signal in the queue.
709 	 */
710 	list_for_each_entry(q, &pending->list, list) {
711 		/* Synchronous signals have a postive si_code */
712 		if ((q->info.si_code > SI_USER) &&
713 		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
714 			sync = q;
715 			goto next;
716 		}
717 	}
718 	return 0;
719 next:
720 	/*
721 	 * Check if there is another siginfo for the same signal.
722 	 */
723 	list_for_each_entry_continue(q, &pending->list, list) {
724 		if (q->info.si_signo == sync->info.si_signo)
725 			goto still_pending;
726 	}
727 
728 	sigdelset(&pending->signal, sync->info.si_signo);
729 	recalc_sigpending();
730 still_pending:
731 	list_del_init(&sync->list);
732 	copy_siginfo(info, &sync->info);
733 	__sigqueue_free(sync);
734 	return info->si_signo;
735 }
736 
737 /*
738  * Tell a process that it has a new active signal..
739  *
740  * NOTE! we rely on the previous spin_lock to
741  * lock interrupts for us! We can only be called with
742  * "siglock" held, and the local interrupt must
743  * have been disabled when that got acquired!
744  *
745  * No need to set need_resched since signal event passing
746  * goes through ->blocked
747  */
748 void signal_wake_up_state(struct task_struct *t, unsigned int state)
749 {
750 	set_tsk_thread_flag(t, TIF_SIGPENDING);
751 	/*
752 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
753 	 * case. We don't check t->state here because there is a race with it
754 	 * executing another processor and just now entering stopped state.
755 	 * By using wake_up_state, we ensure the process will wake up and
756 	 * handle its death signal.
757 	 */
758 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
759 		kick_process(t);
760 }
761 
762 /*
763  * Remove signals in mask from the pending set and queue.
764  * Returns 1 if any signals were found.
765  *
766  * All callers must be holding the siglock.
767  */
768 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
769 {
770 	struct sigqueue *q, *n;
771 	sigset_t m;
772 
773 	sigandsets(&m, mask, &s->signal);
774 	if (sigisemptyset(&m))
775 		return;
776 
777 	sigandnsets(&s->signal, &s->signal, mask);
778 	list_for_each_entry_safe(q, n, &s->list, list) {
779 		if (sigismember(mask, q->info.si_signo)) {
780 			list_del_init(&q->list);
781 			__sigqueue_free(q);
782 		}
783 	}
784 }
785 
786 static inline int is_si_special(const struct kernel_siginfo *info)
787 {
788 	return info <= SEND_SIG_PRIV;
789 }
790 
791 static inline bool si_fromuser(const struct kernel_siginfo *info)
792 {
793 	return info == SEND_SIG_NOINFO ||
794 		(!is_si_special(info) && SI_FROMUSER(info));
795 }
796 
797 /*
798  * called with RCU read lock from check_kill_permission()
799  */
800 static bool kill_ok_by_cred(struct task_struct *t)
801 {
802 	const struct cred *cred = current_cred();
803 	const struct cred *tcred = __task_cred(t);
804 
805 	return uid_eq(cred->euid, tcred->suid) ||
806 	       uid_eq(cred->euid, tcred->uid) ||
807 	       uid_eq(cred->uid, tcred->suid) ||
808 	       uid_eq(cred->uid, tcred->uid) ||
809 	       ns_capable(tcred->user_ns, CAP_KILL);
810 }
811 
812 /*
813  * Bad permissions for sending the signal
814  * - the caller must hold the RCU read lock
815  */
816 static int check_kill_permission(int sig, struct kernel_siginfo *info,
817 				 struct task_struct *t)
818 {
819 	struct pid *sid;
820 	int error;
821 
822 	if (!valid_signal(sig))
823 		return -EINVAL;
824 
825 	if (!si_fromuser(info))
826 		return 0;
827 
828 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
829 	if (error)
830 		return error;
831 
832 	if (!same_thread_group(current, t) &&
833 	    !kill_ok_by_cred(t)) {
834 		switch (sig) {
835 		case SIGCONT:
836 			sid = task_session(t);
837 			/*
838 			 * We don't return the error if sid == NULL. The
839 			 * task was unhashed, the caller must notice this.
840 			 */
841 			if (!sid || sid == task_session(current))
842 				break;
843 		default:
844 			return -EPERM;
845 		}
846 	}
847 
848 	return security_task_kill(t, info, sig, NULL);
849 }
850 
851 /**
852  * ptrace_trap_notify - schedule trap to notify ptracer
853  * @t: tracee wanting to notify tracer
854  *
855  * This function schedules sticky ptrace trap which is cleared on the next
856  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
857  * ptracer.
858  *
859  * If @t is running, STOP trap will be taken.  If trapped for STOP and
860  * ptracer is listening for events, tracee is woken up so that it can
861  * re-trap for the new event.  If trapped otherwise, STOP trap will be
862  * eventually taken without returning to userland after the existing traps
863  * are finished by PTRACE_CONT.
864  *
865  * CONTEXT:
866  * Must be called with @task->sighand->siglock held.
867  */
868 static void ptrace_trap_notify(struct task_struct *t)
869 {
870 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
871 	assert_spin_locked(&t->sighand->siglock);
872 
873 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
874 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
875 }
876 
877 /*
878  * Handle magic process-wide effects of stop/continue signals. Unlike
879  * the signal actions, these happen immediately at signal-generation
880  * time regardless of blocking, ignoring, or handling.  This does the
881  * actual continuing for SIGCONT, but not the actual stopping for stop
882  * signals. The process stop is done as a signal action for SIG_DFL.
883  *
884  * Returns true if the signal should be actually delivered, otherwise
885  * it should be dropped.
886  */
887 static bool prepare_signal(int sig, struct task_struct *p, bool force)
888 {
889 	struct signal_struct *signal = p->signal;
890 	struct task_struct *t;
891 	sigset_t flush;
892 
893 	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
894 		if (!(signal->flags & SIGNAL_GROUP_EXIT))
895 			return sig == SIGKILL;
896 		/*
897 		 * The process is in the middle of dying, nothing to do.
898 		 */
899 	} else if (sig_kernel_stop(sig)) {
900 		/*
901 		 * This is a stop signal.  Remove SIGCONT from all queues.
902 		 */
903 		siginitset(&flush, sigmask(SIGCONT));
904 		flush_sigqueue_mask(&flush, &signal->shared_pending);
905 		for_each_thread(p, t)
906 			flush_sigqueue_mask(&flush, &t->pending);
907 	} else if (sig == SIGCONT) {
908 		unsigned int why;
909 		/*
910 		 * Remove all stop signals from all queues, wake all threads.
911 		 */
912 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
913 		flush_sigqueue_mask(&flush, &signal->shared_pending);
914 		for_each_thread(p, t) {
915 			flush_sigqueue_mask(&flush, &t->pending);
916 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
917 			if (likely(!(t->ptrace & PT_SEIZED)))
918 				wake_up_state(t, __TASK_STOPPED);
919 			else
920 				ptrace_trap_notify(t);
921 		}
922 
923 		/*
924 		 * Notify the parent with CLD_CONTINUED if we were stopped.
925 		 *
926 		 * If we were in the middle of a group stop, we pretend it
927 		 * was already finished, and then continued. Since SIGCHLD
928 		 * doesn't queue we report only CLD_STOPPED, as if the next
929 		 * CLD_CONTINUED was dropped.
930 		 */
931 		why = 0;
932 		if (signal->flags & SIGNAL_STOP_STOPPED)
933 			why |= SIGNAL_CLD_CONTINUED;
934 		else if (signal->group_stop_count)
935 			why |= SIGNAL_CLD_STOPPED;
936 
937 		if (why) {
938 			/*
939 			 * The first thread which returns from do_signal_stop()
940 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
941 			 * notify its parent. See get_signal().
942 			 */
943 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
944 			signal->group_stop_count = 0;
945 			signal->group_exit_code = 0;
946 		}
947 	}
948 
949 	return !sig_ignored(p, sig, force);
950 }
951 
952 /*
953  * Test if P wants to take SIG.  After we've checked all threads with this,
954  * it's equivalent to finding no threads not blocking SIG.  Any threads not
955  * blocking SIG were ruled out because they are not running and already
956  * have pending signals.  Such threads will dequeue from the shared queue
957  * as soon as they're available, so putting the signal on the shared queue
958  * will be equivalent to sending it to one such thread.
959  */
960 static inline bool wants_signal(int sig, struct task_struct *p)
961 {
962 	if (sigismember(&p->blocked, sig))
963 		return false;
964 
965 	if (p->flags & PF_EXITING)
966 		return false;
967 
968 	if (sig == SIGKILL)
969 		return true;
970 
971 	if (task_is_stopped_or_traced(p))
972 		return false;
973 
974 	return task_curr(p) || !signal_pending(p);
975 }
976 
977 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
978 {
979 	struct signal_struct *signal = p->signal;
980 	struct task_struct *t;
981 
982 	/*
983 	 * Now find a thread we can wake up to take the signal off the queue.
984 	 *
985 	 * If the main thread wants the signal, it gets first crack.
986 	 * Probably the least surprising to the average bear.
987 	 */
988 	if (wants_signal(sig, p))
989 		t = p;
990 	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
991 		/*
992 		 * There is just one thread and it does not need to be woken.
993 		 * It will dequeue unblocked signals before it runs again.
994 		 */
995 		return;
996 	else {
997 		/*
998 		 * Otherwise try to find a suitable thread.
999 		 */
1000 		t = signal->curr_target;
1001 		while (!wants_signal(sig, t)) {
1002 			t = next_thread(t);
1003 			if (t == signal->curr_target)
1004 				/*
1005 				 * No thread needs to be woken.
1006 				 * Any eligible threads will see
1007 				 * the signal in the queue soon.
1008 				 */
1009 				return;
1010 		}
1011 		signal->curr_target = t;
1012 	}
1013 
1014 	/*
1015 	 * Found a killable thread.  If the signal will be fatal,
1016 	 * then start taking the whole group down immediately.
1017 	 */
1018 	if (sig_fatal(p, sig) &&
1019 	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
1020 	    !sigismember(&t->real_blocked, sig) &&
1021 	    (sig == SIGKILL || !p->ptrace)) {
1022 		/*
1023 		 * This signal will be fatal to the whole group.
1024 		 */
1025 		if (!sig_kernel_coredump(sig)) {
1026 			/*
1027 			 * Start a group exit and wake everybody up.
1028 			 * This way we don't have other threads
1029 			 * running and doing things after a slower
1030 			 * thread has the fatal signal pending.
1031 			 */
1032 			signal->flags = SIGNAL_GROUP_EXIT;
1033 			signal->group_exit_code = sig;
1034 			signal->group_stop_count = 0;
1035 			t = p;
1036 			do {
1037 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1038 				sigaddset(&t->pending.signal, SIGKILL);
1039 				signal_wake_up(t, 1);
1040 			} while_each_thread(p, t);
1041 			return;
1042 		}
1043 	}
1044 
1045 	/*
1046 	 * The signal is already in the shared-pending queue.
1047 	 * Tell the chosen thread to wake up and dequeue it.
1048 	 */
1049 	signal_wake_up(t, sig == SIGKILL);
1050 	return;
1051 }
1052 
1053 static inline bool legacy_queue(struct sigpending *signals, int sig)
1054 {
1055 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1056 }
1057 
1058 #ifdef CONFIG_USER_NS
1059 static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
1060 {
1061 	if (current_user_ns() == task_cred_xxx(t, user_ns))
1062 		return;
1063 
1064 	if (SI_FROMKERNEL(info))
1065 		return;
1066 
1067 	rcu_read_lock();
1068 	info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1069 					make_kuid(current_user_ns(), info->si_uid));
1070 	rcu_read_unlock();
1071 }
1072 #else
1073 static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
1074 {
1075 	return;
1076 }
1077 #endif
1078 
1079 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1080 			enum pid_type type, int from_ancestor_ns)
1081 {
1082 	struct sigpending *pending;
1083 	struct sigqueue *q;
1084 	int override_rlimit;
1085 	int ret = 0, result;
1086 
1087 	assert_spin_locked(&t->sighand->siglock);
1088 
1089 	result = TRACE_SIGNAL_IGNORED;
1090 	if (!prepare_signal(sig, t,
1091 			from_ancestor_ns || (info == SEND_SIG_PRIV)))
1092 		goto ret;
1093 
1094 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1095 	/*
1096 	 * Short-circuit ignored signals and support queuing
1097 	 * exactly one non-rt signal, so that we can get more
1098 	 * detailed information about the cause of the signal.
1099 	 */
1100 	result = TRACE_SIGNAL_ALREADY_PENDING;
1101 	if (legacy_queue(pending, sig))
1102 		goto ret;
1103 
1104 	result = TRACE_SIGNAL_DELIVERED;
1105 	/*
1106 	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1107 	 */
1108 	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1109 		goto out_set;
1110 
1111 	/*
1112 	 * Real-time signals must be queued if sent by sigqueue, or
1113 	 * some other real-time mechanism.  It is implementation
1114 	 * defined whether kill() does so.  We attempt to do so, on
1115 	 * the principle of least surprise, but since kill is not
1116 	 * allowed to fail with EAGAIN when low on memory we just
1117 	 * make sure at least one signal gets delivered and don't
1118 	 * pass on the info struct.
1119 	 */
1120 	if (sig < SIGRTMIN)
1121 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1122 	else
1123 		override_rlimit = 0;
1124 
1125 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1126 	if (q) {
1127 		list_add_tail(&q->list, &pending->list);
1128 		switch ((unsigned long) info) {
1129 		case (unsigned long) SEND_SIG_NOINFO:
1130 			clear_siginfo(&q->info);
1131 			q->info.si_signo = sig;
1132 			q->info.si_errno = 0;
1133 			q->info.si_code = SI_USER;
1134 			q->info.si_pid = task_tgid_nr_ns(current,
1135 							task_active_pid_ns(t));
1136 			q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1137 			break;
1138 		case (unsigned long) SEND_SIG_PRIV:
1139 			clear_siginfo(&q->info);
1140 			q->info.si_signo = sig;
1141 			q->info.si_errno = 0;
1142 			q->info.si_code = SI_KERNEL;
1143 			q->info.si_pid = 0;
1144 			q->info.si_uid = 0;
1145 			break;
1146 		default:
1147 			copy_siginfo(&q->info, info);
1148 			if (from_ancestor_ns)
1149 				q->info.si_pid = 0;
1150 			break;
1151 		}
1152 
1153 		userns_fixup_signal_uid(&q->info, t);
1154 
1155 	} else if (!is_si_special(info)) {
1156 		if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1157 			/*
1158 			 * Queue overflow, abort.  We may abort if the
1159 			 * signal was rt and sent by user using something
1160 			 * other than kill().
1161 			 */
1162 			result = TRACE_SIGNAL_OVERFLOW_FAIL;
1163 			ret = -EAGAIN;
1164 			goto ret;
1165 		} else {
1166 			/*
1167 			 * This is a silent loss of information.  We still
1168 			 * send the signal, but the *info bits are lost.
1169 			 */
1170 			result = TRACE_SIGNAL_LOSE_INFO;
1171 		}
1172 	}
1173 
1174 out_set:
1175 	signalfd_notify(t, sig);
1176 	sigaddset(&pending->signal, sig);
1177 
1178 	/* Let multiprocess signals appear after on-going forks */
1179 	if (type > PIDTYPE_TGID) {
1180 		struct multiprocess_signals *delayed;
1181 		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1182 			sigset_t *signal = &delayed->signal;
1183 			/* Can't queue both a stop and a continue signal */
1184 			if (sig == SIGCONT)
1185 				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1186 			else if (sig_kernel_stop(sig))
1187 				sigdelset(signal, SIGCONT);
1188 			sigaddset(signal, sig);
1189 		}
1190 	}
1191 
1192 	complete_signal(sig, t, type);
1193 ret:
1194 	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1195 	return ret;
1196 }
1197 
1198 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1199 			enum pid_type type)
1200 {
1201 	int from_ancestor_ns = 0;
1202 
1203 #ifdef CONFIG_PID_NS
1204 	from_ancestor_ns = si_fromuser(info) &&
1205 			   !task_pid_nr_ns(current, task_active_pid_ns(t));
1206 #endif
1207 
1208 	return __send_signal(sig, info, t, type, from_ancestor_ns);
1209 }
1210 
1211 static void print_fatal_signal(int signr)
1212 {
1213 	struct pt_regs *regs = signal_pt_regs();
1214 	pr_info("potentially unexpected fatal signal %d.\n", signr);
1215 
1216 #if defined(__i386__) && !defined(__arch_um__)
1217 	pr_info("code at %08lx: ", regs->ip);
1218 	{
1219 		int i;
1220 		for (i = 0; i < 16; i++) {
1221 			unsigned char insn;
1222 
1223 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1224 				break;
1225 			pr_cont("%02x ", insn);
1226 		}
1227 	}
1228 	pr_cont("\n");
1229 #endif
1230 	preempt_disable();
1231 	show_regs(regs);
1232 	preempt_enable();
1233 }
1234 
1235 static int __init setup_print_fatal_signals(char *str)
1236 {
1237 	get_option (&str, &print_fatal_signals);
1238 
1239 	return 1;
1240 }
1241 
1242 __setup("print-fatal-signals=", setup_print_fatal_signals);
1243 
1244 int
1245 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1246 {
1247 	return send_signal(sig, info, p, PIDTYPE_TGID);
1248 }
1249 
1250 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1251 			enum pid_type type)
1252 {
1253 	unsigned long flags;
1254 	int ret = -ESRCH;
1255 
1256 	if (lock_task_sighand(p, &flags)) {
1257 		ret = send_signal(sig, info, p, type);
1258 		unlock_task_sighand(p, &flags);
1259 	}
1260 
1261 	return ret;
1262 }
1263 
1264 /*
1265  * Force a signal that the process can't ignore: if necessary
1266  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1267  *
1268  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1269  * since we do not want to have a signal handler that was blocked
1270  * be invoked when user space had explicitly blocked it.
1271  *
1272  * We don't want to have recursive SIGSEGV's etc, for example,
1273  * that is why we also clear SIGNAL_UNKILLABLE.
1274  */
1275 int
1276 force_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *t)
1277 {
1278 	unsigned long int flags;
1279 	int ret, blocked, ignored;
1280 	struct k_sigaction *action;
1281 
1282 	spin_lock_irqsave(&t->sighand->siglock, flags);
1283 	action = &t->sighand->action[sig-1];
1284 	ignored = action->sa.sa_handler == SIG_IGN;
1285 	blocked = sigismember(&t->blocked, sig);
1286 	if (blocked || ignored) {
1287 		action->sa.sa_handler = SIG_DFL;
1288 		if (blocked) {
1289 			sigdelset(&t->blocked, sig);
1290 			recalc_sigpending_and_wake(t);
1291 		}
1292 	}
1293 	/*
1294 	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1295 	 * debugging to leave init killable.
1296 	 */
1297 	if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1298 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1299 	ret = send_signal(sig, info, t, PIDTYPE_PID);
1300 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1301 
1302 	return ret;
1303 }
1304 
1305 /*
1306  * Nuke all other threads in the group.
1307  */
1308 int zap_other_threads(struct task_struct *p)
1309 {
1310 	struct task_struct *t = p;
1311 	int count = 0;
1312 
1313 	p->signal->group_stop_count = 0;
1314 
1315 	while_each_thread(p, t) {
1316 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1317 		count++;
1318 
1319 		/* Don't bother with already dead threads */
1320 		if (t->exit_state)
1321 			continue;
1322 		sigaddset(&t->pending.signal, SIGKILL);
1323 		signal_wake_up(t, 1);
1324 	}
1325 
1326 	return count;
1327 }
1328 
1329 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1330 					   unsigned long *flags)
1331 {
1332 	struct sighand_struct *sighand;
1333 
1334 	rcu_read_lock();
1335 	for (;;) {
1336 		sighand = rcu_dereference(tsk->sighand);
1337 		if (unlikely(sighand == NULL))
1338 			break;
1339 
1340 		/*
1341 		 * This sighand can be already freed and even reused, but
1342 		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1343 		 * initializes ->siglock: this slab can't go away, it has
1344 		 * the same object type, ->siglock can't be reinitialized.
1345 		 *
1346 		 * We need to ensure that tsk->sighand is still the same
1347 		 * after we take the lock, we can race with de_thread() or
1348 		 * __exit_signal(). In the latter case the next iteration
1349 		 * must see ->sighand == NULL.
1350 		 */
1351 		spin_lock_irqsave(&sighand->siglock, *flags);
1352 		if (likely(sighand == tsk->sighand))
1353 			break;
1354 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1355 	}
1356 	rcu_read_unlock();
1357 
1358 	return sighand;
1359 }
1360 
1361 /*
1362  * send signal info to all the members of a group
1363  */
1364 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1365 			struct task_struct *p, enum pid_type type)
1366 {
1367 	int ret;
1368 
1369 	rcu_read_lock();
1370 	ret = check_kill_permission(sig, info, p);
1371 	rcu_read_unlock();
1372 
1373 	if (!ret && sig)
1374 		ret = do_send_sig_info(sig, info, p, type);
1375 
1376 	return ret;
1377 }
1378 
1379 /*
1380  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1381  * control characters do (^C, ^Z etc)
1382  * - the caller must hold at least a readlock on tasklist_lock
1383  */
1384 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1385 {
1386 	struct task_struct *p = NULL;
1387 	int retval, success;
1388 
1389 	success = 0;
1390 	retval = -ESRCH;
1391 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1392 		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1393 		success |= !err;
1394 		retval = err;
1395 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1396 	return success ? 0 : retval;
1397 }
1398 
1399 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1400 {
1401 	int error = -ESRCH;
1402 	struct task_struct *p;
1403 
1404 	for (;;) {
1405 		rcu_read_lock();
1406 		p = pid_task(pid, PIDTYPE_PID);
1407 		if (p)
1408 			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1409 		rcu_read_unlock();
1410 		if (likely(!p || error != -ESRCH))
1411 			return error;
1412 
1413 		/*
1414 		 * The task was unhashed in between, try again.  If it
1415 		 * is dead, pid_task() will return NULL, if we race with
1416 		 * de_thread() it will find the new leader.
1417 		 */
1418 	}
1419 }
1420 
1421 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1422 {
1423 	int error;
1424 	rcu_read_lock();
1425 	error = kill_pid_info(sig, info, find_vpid(pid));
1426 	rcu_read_unlock();
1427 	return error;
1428 }
1429 
1430 static inline bool kill_as_cred_perm(const struct cred *cred,
1431 				     struct task_struct *target)
1432 {
1433 	const struct cred *pcred = __task_cred(target);
1434 
1435 	return uid_eq(cred->euid, pcred->suid) ||
1436 	       uid_eq(cred->euid, pcred->uid) ||
1437 	       uid_eq(cred->uid, pcred->suid) ||
1438 	       uid_eq(cred->uid, pcred->uid);
1439 }
1440 
1441 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1442 int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid,
1443 			 const struct cred *cred)
1444 {
1445 	int ret = -EINVAL;
1446 	struct task_struct *p;
1447 	unsigned long flags;
1448 
1449 	if (!valid_signal(sig))
1450 		return ret;
1451 
1452 	rcu_read_lock();
1453 	p = pid_task(pid, PIDTYPE_PID);
1454 	if (!p) {
1455 		ret = -ESRCH;
1456 		goto out_unlock;
1457 	}
1458 	if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1459 		ret = -EPERM;
1460 		goto out_unlock;
1461 	}
1462 	ret = security_task_kill(p, info, sig, cred);
1463 	if (ret)
1464 		goto out_unlock;
1465 
1466 	if (sig) {
1467 		if (lock_task_sighand(p, &flags)) {
1468 			ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
1469 			unlock_task_sighand(p, &flags);
1470 		} else
1471 			ret = -ESRCH;
1472 	}
1473 out_unlock:
1474 	rcu_read_unlock();
1475 	return ret;
1476 }
1477 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1478 
1479 /*
1480  * kill_something_info() interprets pid in interesting ways just like kill(2).
1481  *
1482  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1483  * is probably wrong.  Should make it like BSD or SYSV.
1484  */
1485 
1486 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1487 {
1488 	int ret;
1489 
1490 	if (pid > 0) {
1491 		rcu_read_lock();
1492 		ret = kill_pid_info(sig, info, find_vpid(pid));
1493 		rcu_read_unlock();
1494 		return ret;
1495 	}
1496 
1497 	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1498 	if (pid == INT_MIN)
1499 		return -ESRCH;
1500 
1501 	read_lock(&tasklist_lock);
1502 	if (pid != -1) {
1503 		ret = __kill_pgrp_info(sig, info,
1504 				pid ? find_vpid(-pid) : task_pgrp(current));
1505 	} else {
1506 		int retval = 0, count = 0;
1507 		struct task_struct * p;
1508 
1509 		for_each_process(p) {
1510 			if (task_pid_vnr(p) > 1 &&
1511 					!same_thread_group(p, current)) {
1512 				int err = group_send_sig_info(sig, info, p,
1513 							      PIDTYPE_MAX);
1514 				++count;
1515 				if (err != -EPERM)
1516 					retval = err;
1517 			}
1518 		}
1519 		ret = count ? retval : -ESRCH;
1520 	}
1521 	read_unlock(&tasklist_lock);
1522 
1523 	return ret;
1524 }
1525 
1526 /*
1527  * These are for backward compatibility with the rest of the kernel source.
1528  */
1529 
1530 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1531 {
1532 	/*
1533 	 * Make sure legacy kernel users don't send in bad values
1534 	 * (normal paths check this in check_kill_permission).
1535 	 */
1536 	if (!valid_signal(sig))
1537 		return -EINVAL;
1538 
1539 	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1540 }
1541 EXPORT_SYMBOL(send_sig_info);
1542 
1543 #define __si_special(priv) \
1544 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1545 
1546 int
1547 send_sig(int sig, struct task_struct *p, int priv)
1548 {
1549 	return send_sig_info(sig, __si_special(priv), p);
1550 }
1551 EXPORT_SYMBOL(send_sig);
1552 
1553 void force_sig(int sig, struct task_struct *p)
1554 {
1555 	force_sig_info(sig, SEND_SIG_PRIV, p);
1556 }
1557 EXPORT_SYMBOL(force_sig);
1558 
1559 /*
1560  * When things go south during signal handling, we
1561  * will force a SIGSEGV. And if the signal that caused
1562  * the problem was already a SIGSEGV, we'll want to
1563  * make sure we don't even try to deliver the signal..
1564  */
1565 void force_sigsegv(int sig, struct task_struct *p)
1566 {
1567 	if (sig == SIGSEGV) {
1568 		unsigned long flags;
1569 		spin_lock_irqsave(&p->sighand->siglock, flags);
1570 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1571 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1572 	}
1573 	force_sig(SIGSEGV, p);
1574 }
1575 
1576 int force_sig_fault(int sig, int code, void __user *addr
1577 	___ARCH_SI_TRAPNO(int trapno)
1578 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1579 	, struct task_struct *t)
1580 {
1581 	struct kernel_siginfo info;
1582 
1583 	clear_siginfo(&info);
1584 	info.si_signo = sig;
1585 	info.si_errno = 0;
1586 	info.si_code  = code;
1587 	info.si_addr  = addr;
1588 #ifdef __ARCH_SI_TRAPNO
1589 	info.si_trapno = trapno;
1590 #endif
1591 #ifdef __ia64__
1592 	info.si_imm = imm;
1593 	info.si_flags = flags;
1594 	info.si_isr = isr;
1595 #endif
1596 	return force_sig_info(info.si_signo, &info, t);
1597 }
1598 
1599 int send_sig_fault(int sig, int code, void __user *addr
1600 	___ARCH_SI_TRAPNO(int trapno)
1601 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1602 	, struct task_struct *t)
1603 {
1604 	struct kernel_siginfo info;
1605 
1606 	clear_siginfo(&info);
1607 	info.si_signo = sig;
1608 	info.si_errno = 0;
1609 	info.si_code  = code;
1610 	info.si_addr  = addr;
1611 #ifdef __ARCH_SI_TRAPNO
1612 	info.si_trapno = trapno;
1613 #endif
1614 #ifdef __ia64__
1615 	info.si_imm = imm;
1616 	info.si_flags = flags;
1617 	info.si_isr = isr;
1618 #endif
1619 	return send_sig_info(info.si_signo, &info, t);
1620 }
1621 
1622 int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1623 {
1624 	struct kernel_siginfo info;
1625 
1626 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1627 	clear_siginfo(&info);
1628 	info.si_signo = SIGBUS;
1629 	info.si_errno = 0;
1630 	info.si_code = code;
1631 	info.si_addr = addr;
1632 	info.si_addr_lsb = lsb;
1633 	return force_sig_info(info.si_signo, &info, t);
1634 }
1635 
1636 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1637 {
1638 	struct kernel_siginfo info;
1639 
1640 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1641 	clear_siginfo(&info);
1642 	info.si_signo = SIGBUS;
1643 	info.si_errno = 0;
1644 	info.si_code = code;
1645 	info.si_addr = addr;
1646 	info.si_addr_lsb = lsb;
1647 	return send_sig_info(info.si_signo, &info, t);
1648 }
1649 EXPORT_SYMBOL(send_sig_mceerr);
1650 
1651 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1652 {
1653 	struct kernel_siginfo info;
1654 
1655 	clear_siginfo(&info);
1656 	info.si_signo = SIGSEGV;
1657 	info.si_errno = 0;
1658 	info.si_code  = SEGV_BNDERR;
1659 	info.si_addr  = addr;
1660 	info.si_lower = lower;
1661 	info.si_upper = upper;
1662 	return force_sig_info(info.si_signo, &info, current);
1663 }
1664 
1665 #ifdef SEGV_PKUERR
1666 int force_sig_pkuerr(void __user *addr, u32 pkey)
1667 {
1668 	struct kernel_siginfo info;
1669 
1670 	clear_siginfo(&info);
1671 	info.si_signo = SIGSEGV;
1672 	info.si_errno = 0;
1673 	info.si_code  = SEGV_PKUERR;
1674 	info.si_addr  = addr;
1675 	info.si_pkey  = pkey;
1676 	return force_sig_info(info.si_signo, &info, current);
1677 }
1678 #endif
1679 
1680 /* For the crazy architectures that include trap information in
1681  * the errno field, instead of an actual errno value.
1682  */
1683 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1684 {
1685 	struct kernel_siginfo info;
1686 
1687 	clear_siginfo(&info);
1688 	info.si_signo = SIGTRAP;
1689 	info.si_errno = errno;
1690 	info.si_code  = TRAP_HWBKPT;
1691 	info.si_addr  = addr;
1692 	return force_sig_info(info.si_signo, &info, current);
1693 }
1694 
1695 int kill_pgrp(struct pid *pid, int sig, int priv)
1696 {
1697 	int ret;
1698 
1699 	read_lock(&tasklist_lock);
1700 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1701 	read_unlock(&tasklist_lock);
1702 
1703 	return ret;
1704 }
1705 EXPORT_SYMBOL(kill_pgrp);
1706 
1707 int kill_pid(struct pid *pid, int sig, int priv)
1708 {
1709 	return kill_pid_info(sig, __si_special(priv), pid);
1710 }
1711 EXPORT_SYMBOL(kill_pid);
1712 
1713 /*
1714  * These functions support sending signals using preallocated sigqueue
1715  * structures.  This is needed "because realtime applications cannot
1716  * afford to lose notifications of asynchronous events, like timer
1717  * expirations or I/O completions".  In the case of POSIX Timers
1718  * we allocate the sigqueue structure from the timer_create.  If this
1719  * allocation fails we are able to report the failure to the application
1720  * with an EAGAIN error.
1721  */
1722 struct sigqueue *sigqueue_alloc(void)
1723 {
1724 	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1725 
1726 	if (q)
1727 		q->flags |= SIGQUEUE_PREALLOC;
1728 
1729 	return q;
1730 }
1731 
1732 void sigqueue_free(struct sigqueue *q)
1733 {
1734 	unsigned long flags;
1735 	spinlock_t *lock = &current->sighand->siglock;
1736 
1737 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1738 	/*
1739 	 * We must hold ->siglock while testing q->list
1740 	 * to serialize with collect_signal() or with
1741 	 * __exit_signal()->flush_sigqueue().
1742 	 */
1743 	spin_lock_irqsave(lock, flags);
1744 	q->flags &= ~SIGQUEUE_PREALLOC;
1745 	/*
1746 	 * If it is queued it will be freed when dequeued,
1747 	 * like the "regular" sigqueue.
1748 	 */
1749 	if (!list_empty(&q->list))
1750 		q = NULL;
1751 	spin_unlock_irqrestore(lock, flags);
1752 
1753 	if (q)
1754 		__sigqueue_free(q);
1755 }
1756 
1757 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1758 {
1759 	int sig = q->info.si_signo;
1760 	struct sigpending *pending;
1761 	struct task_struct *t;
1762 	unsigned long flags;
1763 	int ret, result;
1764 
1765 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1766 
1767 	ret = -1;
1768 	rcu_read_lock();
1769 	t = pid_task(pid, type);
1770 	if (!t || !likely(lock_task_sighand(t, &flags)))
1771 		goto ret;
1772 
1773 	ret = 1; /* the signal is ignored */
1774 	result = TRACE_SIGNAL_IGNORED;
1775 	if (!prepare_signal(sig, t, false))
1776 		goto out;
1777 
1778 	ret = 0;
1779 	if (unlikely(!list_empty(&q->list))) {
1780 		/*
1781 		 * If an SI_TIMER entry is already queue just increment
1782 		 * the overrun count.
1783 		 */
1784 		BUG_ON(q->info.si_code != SI_TIMER);
1785 		q->info.si_overrun++;
1786 		result = TRACE_SIGNAL_ALREADY_PENDING;
1787 		goto out;
1788 	}
1789 	q->info.si_overrun = 0;
1790 
1791 	signalfd_notify(t, sig);
1792 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1793 	list_add_tail(&q->list, &pending->list);
1794 	sigaddset(&pending->signal, sig);
1795 	complete_signal(sig, t, type);
1796 	result = TRACE_SIGNAL_DELIVERED;
1797 out:
1798 	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1799 	unlock_task_sighand(t, &flags);
1800 ret:
1801 	rcu_read_unlock();
1802 	return ret;
1803 }
1804 
1805 /*
1806  * Let a parent know about the death of a child.
1807  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1808  *
1809  * Returns true if our parent ignored us and so we've switched to
1810  * self-reaping.
1811  */
1812 bool do_notify_parent(struct task_struct *tsk, int sig)
1813 {
1814 	struct kernel_siginfo info;
1815 	unsigned long flags;
1816 	struct sighand_struct *psig;
1817 	bool autoreap = false;
1818 	u64 utime, stime;
1819 
1820 	BUG_ON(sig == -1);
1821 
1822  	/* do_notify_parent_cldstop should have been called instead.  */
1823  	BUG_ON(task_is_stopped_or_traced(tsk));
1824 
1825 	BUG_ON(!tsk->ptrace &&
1826 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1827 
1828 	if (sig != SIGCHLD) {
1829 		/*
1830 		 * This is only possible if parent == real_parent.
1831 		 * Check if it has changed security domain.
1832 		 */
1833 		if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1834 			sig = SIGCHLD;
1835 	}
1836 
1837 	clear_siginfo(&info);
1838 	info.si_signo = sig;
1839 	info.si_errno = 0;
1840 	/*
1841 	 * We are under tasklist_lock here so our parent is tied to
1842 	 * us and cannot change.
1843 	 *
1844 	 * task_active_pid_ns will always return the same pid namespace
1845 	 * until a task passes through release_task.
1846 	 *
1847 	 * write_lock() currently calls preempt_disable() which is the
1848 	 * same as rcu_read_lock(), but according to Oleg, this is not
1849 	 * correct to rely on this
1850 	 */
1851 	rcu_read_lock();
1852 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1853 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1854 				       task_uid(tsk));
1855 	rcu_read_unlock();
1856 
1857 	task_cputime(tsk, &utime, &stime);
1858 	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1859 	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1860 
1861 	info.si_status = tsk->exit_code & 0x7f;
1862 	if (tsk->exit_code & 0x80)
1863 		info.si_code = CLD_DUMPED;
1864 	else if (tsk->exit_code & 0x7f)
1865 		info.si_code = CLD_KILLED;
1866 	else {
1867 		info.si_code = CLD_EXITED;
1868 		info.si_status = tsk->exit_code >> 8;
1869 	}
1870 
1871 	psig = tsk->parent->sighand;
1872 	spin_lock_irqsave(&psig->siglock, flags);
1873 	if (!tsk->ptrace && sig == SIGCHLD &&
1874 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1875 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1876 		/*
1877 		 * We are exiting and our parent doesn't care.  POSIX.1
1878 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1879 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1880 		 * automatically and not left for our parent's wait4 call.
1881 		 * Rather than having the parent do it as a magic kind of
1882 		 * signal handler, we just set this to tell do_exit that we
1883 		 * can be cleaned up without becoming a zombie.  Note that
1884 		 * we still call __wake_up_parent in this case, because a
1885 		 * blocked sys_wait4 might now return -ECHILD.
1886 		 *
1887 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1888 		 * is implementation-defined: we do (if you don't want
1889 		 * it, just use SIG_IGN instead).
1890 		 */
1891 		autoreap = true;
1892 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1893 			sig = 0;
1894 	}
1895 	if (valid_signal(sig) && sig)
1896 		__group_send_sig_info(sig, &info, tsk->parent);
1897 	__wake_up_parent(tsk, tsk->parent);
1898 	spin_unlock_irqrestore(&psig->siglock, flags);
1899 
1900 	return autoreap;
1901 }
1902 
1903 /**
1904  * do_notify_parent_cldstop - notify parent of stopped/continued state change
1905  * @tsk: task reporting the state change
1906  * @for_ptracer: the notification is for ptracer
1907  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1908  *
1909  * Notify @tsk's parent that the stopped/continued state has changed.  If
1910  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1911  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1912  *
1913  * CONTEXT:
1914  * Must be called with tasklist_lock at least read locked.
1915  */
1916 static void do_notify_parent_cldstop(struct task_struct *tsk,
1917 				     bool for_ptracer, int why)
1918 {
1919 	struct kernel_siginfo info;
1920 	unsigned long flags;
1921 	struct task_struct *parent;
1922 	struct sighand_struct *sighand;
1923 	u64 utime, stime;
1924 
1925 	if (for_ptracer) {
1926 		parent = tsk->parent;
1927 	} else {
1928 		tsk = tsk->group_leader;
1929 		parent = tsk->real_parent;
1930 	}
1931 
1932 	clear_siginfo(&info);
1933 	info.si_signo = SIGCHLD;
1934 	info.si_errno = 0;
1935 	/*
1936 	 * see comment in do_notify_parent() about the following 4 lines
1937 	 */
1938 	rcu_read_lock();
1939 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1940 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1941 	rcu_read_unlock();
1942 
1943 	task_cputime(tsk, &utime, &stime);
1944 	info.si_utime = nsec_to_clock_t(utime);
1945 	info.si_stime = nsec_to_clock_t(stime);
1946 
1947  	info.si_code = why;
1948  	switch (why) {
1949  	case CLD_CONTINUED:
1950  		info.si_status = SIGCONT;
1951  		break;
1952  	case CLD_STOPPED:
1953  		info.si_status = tsk->signal->group_exit_code & 0x7f;
1954  		break;
1955  	case CLD_TRAPPED:
1956  		info.si_status = tsk->exit_code & 0x7f;
1957  		break;
1958  	default:
1959  		BUG();
1960  	}
1961 
1962 	sighand = parent->sighand;
1963 	spin_lock_irqsave(&sighand->siglock, flags);
1964 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1965 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1966 		__group_send_sig_info(SIGCHLD, &info, parent);
1967 	/*
1968 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1969 	 */
1970 	__wake_up_parent(tsk, parent);
1971 	spin_unlock_irqrestore(&sighand->siglock, flags);
1972 }
1973 
1974 static inline bool may_ptrace_stop(void)
1975 {
1976 	if (!likely(current->ptrace))
1977 		return false;
1978 	/*
1979 	 * Are we in the middle of do_coredump?
1980 	 * If so and our tracer is also part of the coredump stopping
1981 	 * is a deadlock situation, and pointless because our tracer
1982 	 * is dead so don't allow us to stop.
1983 	 * If SIGKILL was already sent before the caller unlocked
1984 	 * ->siglock we must see ->core_state != NULL. Otherwise it
1985 	 * is safe to enter schedule().
1986 	 *
1987 	 * This is almost outdated, a task with the pending SIGKILL can't
1988 	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1989 	 * after SIGKILL was already dequeued.
1990 	 */
1991 	if (unlikely(current->mm->core_state) &&
1992 	    unlikely(current->mm == current->parent->mm))
1993 		return false;
1994 
1995 	return true;
1996 }
1997 
1998 /*
1999  * Return non-zero if there is a SIGKILL that should be waking us up.
2000  * Called with the siglock held.
2001  */
2002 static bool sigkill_pending(struct task_struct *tsk)
2003 {
2004 	return sigismember(&tsk->pending.signal, SIGKILL) ||
2005 	       sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2006 }
2007 
2008 /*
2009  * This must be called with current->sighand->siglock held.
2010  *
2011  * This should be the path for all ptrace stops.
2012  * We always set current->last_siginfo while stopped here.
2013  * That makes it a way to test a stopped process for
2014  * being ptrace-stopped vs being job-control-stopped.
2015  *
2016  * If we actually decide not to stop at all because the tracer
2017  * is gone, we keep current->exit_code unless clear_code.
2018  */
2019 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2020 	__releases(&current->sighand->siglock)
2021 	__acquires(&current->sighand->siglock)
2022 {
2023 	bool gstop_done = false;
2024 
2025 	if (arch_ptrace_stop_needed(exit_code, info)) {
2026 		/*
2027 		 * The arch code has something special to do before a
2028 		 * ptrace stop.  This is allowed to block, e.g. for faults
2029 		 * on user stack pages.  We can't keep the siglock while
2030 		 * calling arch_ptrace_stop, so we must release it now.
2031 		 * To preserve proper semantics, we must do this before
2032 		 * any signal bookkeeping like checking group_stop_count.
2033 		 * Meanwhile, a SIGKILL could come in before we retake the
2034 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
2035 		 * So after regaining the lock, we must check for SIGKILL.
2036 		 */
2037 		spin_unlock_irq(&current->sighand->siglock);
2038 		arch_ptrace_stop(exit_code, info);
2039 		spin_lock_irq(&current->sighand->siglock);
2040 		if (sigkill_pending(current))
2041 			return;
2042 	}
2043 
2044 	set_special_state(TASK_TRACED);
2045 
2046 	/*
2047 	 * We're committing to trapping.  TRACED should be visible before
2048 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2049 	 * Also, transition to TRACED and updates to ->jobctl should be
2050 	 * atomic with respect to siglock and should be done after the arch
2051 	 * hook as siglock is released and regrabbed across it.
2052 	 *
2053 	 *     TRACER				    TRACEE
2054 	 *
2055 	 *     ptrace_attach()
2056 	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2057 	 *     do_wait()
2058 	 *       set_current_state()                smp_wmb();
2059 	 *       ptrace_do_wait()
2060 	 *         wait_task_stopped()
2061 	 *           task_stopped_code()
2062 	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2063 	 */
2064 	smp_wmb();
2065 
2066 	current->last_siginfo = info;
2067 	current->exit_code = exit_code;
2068 
2069 	/*
2070 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2071 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2072 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2073 	 * could be clear now.  We act as if SIGCONT is received after
2074 	 * TASK_TRACED is entered - ignore it.
2075 	 */
2076 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2077 		gstop_done = task_participate_group_stop(current);
2078 
2079 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2080 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2081 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2082 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2083 
2084 	/* entering a trap, clear TRAPPING */
2085 	task_clear_jobctl_trapping(current);
2086 
2087 	spin_unlock_irq(&current->sighand->siglock);
2088 	read_lock(&tasklist_lock);
2089 	if (may_ptrace_stop()) {
2090 		/*
2091 		 * Notify parents of the stop.
2092 		 *
2093 		 * While ptraced, there are two parents - the ptracer and
2094 		 * the real_parent of the group_leader.  The ptracer should
2095 		 * know about every stop while the real parent is only
2096 		 * interested in the completion of group stop.  The states
2097 		 * for the two don't interact with each other.  Notify
2098 		 * separately unless they're gonna be duplicates.
2099 		 */
2100 		do_notify_parent_cldstop(current, true, why);
2101 		if (gstop_done && ptrace_reparented(current))
2102 			do_notify_parent_cldstop(current, false, why);
2103 
2104 		/*
2105 		 * Don't want to allow preemption here, because
2106 		 * sys_ptrace() needs this task to be inactive.
2107 		 *
2108 		 * XXX: implement read_unlock_no_resched().
2109 		 */
2110 		preempt_disable();
2111 		read_unlock(&tasklist_lock);
2112 		preempt_enable_no_resched();
2113 		cgroup_enter_frozen();
2114 		freezable_schedule();
2115 	} else {
2116 		/*
2117 		 * By the time we got the lock, our tracer went away.
2118 		 * Don't drop the lock yet, another tracer may come.
2119 		 *
2120 		 * If @gstop_done, the ptracer went away between group stop
2121 		 * completion and here.  During detach, it would have set
2122 		 * JOBCTL_STOP_PENDING on us and we'll re-enter
2123 		 * TASK_STOPPED in do_signal_stop() on return, so notifying
2124 		 * the real parent of the group stop completion is enough.
2125 		 */
2126 		if (gstop_done)
2127 			do_notify_parent_cldstop(current, false, why);
2128 
2129 		/* tasklist protects us from ptrace_freeze_traced() */
2130 		__set_current_state(TASK_RUNNING);
2131 		if (clear_code)
2132 			current->exit_code = 0;
2133 		read_unlock(&tasklist_lock);
2134 	}
2135 
2136 	/*
2137 	 * We are back.  Now reacquire the siglock before touching
2138 	 * last_siginfo, so that we are sure to have synchronized with
2139 	 * any signal-sending on another CPU that wants to examine it.
2140 	 */
2141 	spin_lock_irq(&current->sighand->siglock);
2142 	current->last_siginfo = NULL;
2143 
2144 	/* LISTENING can be set only during STOP traps, clear it */
2145 	current->jobctl &= ~JOBCTL_LISTENING;
2146 
2147 	/*
2148 	 * Queued signals ignored us while we were stopped for tracing.
2149 	 * So check for any that we should take before resuming user mode.
2150 	 * This sets TIF_SIGPENDING, but never clears it.
2151 	 */
2152 	recalc_sigpending_tsk(current);
2153 }
2154 
2155 static void ptrace_do_notify(int signr, int exit_code, int why)
2156 {
2157 	kernel_siginfo_t info;
2158 
2159 	clear_siginfo(&info);
2160 	info.si_signo = signr;
2161 	info.si_code = exit_code;
2162 	info.si_pid = task_pid_vnr(current);
2163 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2164 
2165 	/* Let the debugger run.  */
2166 	ptrace_stop(exit_code, why, 1, &info);
2167 }
2168 
2169 void ptrace_notify(int exit_code)
2170 {
2171 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2172 	if (unlikely(current->task_works))
2173 		task_work_run();
2174 
2175 	spin_lock_irq(&current->sighand->siglock);
2176 	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2177 	spin_unlock_irq(&current->sighand->siglock);
2178 }
2179 
2180 /**
2181  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2182  * @signr: signr causing group stop if initiating
2183  *
2184  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2185  * and participate in it.  If already set, participate in the existing
2186  * group stop.  If participated in a group stop (and thus slept), %true is
2187  * returned with siglock released.
2188  *
2189  * If ptraced, this function doesn't handle stop itself.  Instead,
2190  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2191  * untouched.  The caller must ensure that INTERRUPT trap handling takes
2192  * places afterwards.
2193  *
2194  * CONTEXT:
2195  * Must be called with @current->sighand->siglock held, which is released
2196  * on %true return.
2197  *
2198  * RETURNS:
2199  * %false if group stop is already cancelled or ptrace trap is scheduled.
2200  * %true if participated in group stop.
2201  */
2202 static bool do_signal_stop(int signr)
2203 	__releases(&current->sighand->siglock)
2204 {
2205 	struct signal_struct *sig = current->signal;
2206 
2207 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2208 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2209 		struct task_struct *t;
2210 
2211 		/* signr will be recorded in task->jobctl for retries */
2212 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2213 
2214 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2215 		    unlikely(signal_group_exit(sig)))
2216 			return false;
2217 		/*
2218 		 * There is no group stop already in progress.  We must
2219 		 * initiate one now.
2220 		 *
2221 		 * While ptraced, a task may be resumed while group stop is
2222 		 * still in effect and then receive a stop signal and
2223 		 * initiate another group stop.  This deviates from the
2224 		 * usual behavior as two consecutive stop signals can't
2225 		 * cause two group stops when !ptraced.  That is why we
2226 		 * also check !task_is_stopped(t) below.
2227 		 *
2228 		 * The condition can be distinguished by testing whether
2229 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2230 		 * group_exit_code in such case.
2231 		 *
2232 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2233 		 * an intervening stop signal is required to cause two
2234 		 * continued events regardless of ptrace.
2235 		 */
2236 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2237 			sig->group_exit_code = signr;
2238 
2239 		sig->group_stop_count = 0;
2240 
2241 		if (task_set_jobctl_pending(current, signr | gstop))
2242 			sig->group_stop_count++;
2243 
2244 		t = current;
2245 		while_each_thread(current, t) {
2246 			/*
2247 			 * Setting state to TASK_STOPPED for a group
2248 			 * stop is always done with the siglock held,
2249 			 * so this check has no races.
2250 			 */
2251 			if (!task_is_stopped(t) &&
2252 			    task_set_jobctl_pending(t, signr | gstop)) {
2253 				sig->group_stop_count++;
2254 				if (likely(!(t->ptrace & PT_SEIZED)))
2255 					signal_wake_up(t, 0);
2256 				else
2257 					ptrace_trap_notify(t);
2258 			}
2259 		}
2260 	}
2261 
2262 	if (likely(!current->ptrace)) {
2263 		int notify = 0;
2264 
2265 		/*
2266 		 * If there are no other threads in the group, or if there
2267 		 * is a group stop in progress and we are the last to stop,
2268 		 * report to the parent.
2269 		 */
2270 		if (task_participate_group_stop(current))
2271 			notify = CLD_STOPPED;
2272 
2273 		set_special_state(TASK_STOPPED);
2274 		spin_unlock_irq(&current->sighand->siglock);
2275 
2276 		/*
2277 		 * Notify the parent of the group stop completion.  Because
2278 		 * we're not holding either the siglock or tasklist_lock
2279 		 * here, ptracer may attach inbetween; however, this is for
2280 		 * group stop and should always be delivered to the real
2281 		 * parent of the group leader.  The new ptracer will get
2282 		 * its notification when this task transitions into
2283 		 * TASK_TRACED.
2284 		 */
2285 		if (notify) {
2286 			read_lock(&tasklist_lock);
2287 			do_notify_parent_cldstop(current, false, notify);
2288 			read_unlock(&tasklist_lock);
2289 		}
2290 
2291 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2292 		cgroup_enter_frozen();
2293 		freezable_schedule();
2294 		return true;
2295 	} else {
2296 		/*
2297 		 * While ptraced, group stop is handled by STOP trap.
2298 		 * Schedule it and let the caller deal with it.
2299 		 */
2300 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2301 		return false;
2302 	}
2303 }
2304 
2305 /**
2306  * do_jobctl_trap - take care of ptrace jobctl traps
2307  *
2308  * When PT_SEIZED, it's used for both group stop and explicit
2309  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2310  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2311  * the stop signal; otherwise, %SIGTRAP.
2312  *
2313  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2314  * number as exit_code and no siginfo.
2315  *
2316  * CONTEXT:
2317  * Must be called with @current->sighand->siglock held, which may be
2318  * released and re-acquired before returning with intervening sleep.
2319  */
2320 static void do_jobctl_trap(void)
2321 {
2322 	struct signal_struct *signal = current->signal;
2323 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2324 
2325 	if (current->ptrace & PT_SEIZED) {
2326 		if (!signal->group_stop_count &&
2327 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2328 			signr = SIGTRAP;
2329 		WARN_ON_ONCE(!signr);
2330 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2331 				 CLD_STOPPED);
2332 	} else {
2333 		WARN_ON_ONCE(!signr);
2334 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2335 		current->exit_code = 0;
2336 	}
2337 }
2338 
2339 /**
2340  * do_freezer_trap - handle the freezer jobctl trap
2341  *
2342  * Puts the task into frozen state, if only the task is not about to quit.
2343  * In this case it drops JOBCTL_TRAP_FREEZE.
2344  *
2345  * CONTEXT:
2346  * Must be called with @current->sighand->siglock held,
2347  * which is always released before returning.
2348  */
2349 static void do_freezer_trap(void)
2350 	__releases(&current->sighand->siglock)
2351 {
2352 	/*
2353 	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2354 	 * let's make another loop to give it a chance to be handled.
2355 	 * In any case, we'll return back.
2356 	 */
2357 	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2358 	     JOBCTL_TRAP_FREEZE) {
2359 		spin_unlock_irq(&current->sighand->siglock);
2360 		return;
2361 	}
2362 
2363 	/*
2364 	 * Now we're sure that there is no pending fatal signal and no
2365 	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2366 	 * immediately (if there is a non-fatal signal pending), and
2367 	 * put the task into sleep.
2368 	 */
2369 	__set_current_state(TASK_INTERRUPTIBLE);
2370 	clear_thread_flag(TIF_SIGPENDING);
2371 	spin_unlock_irq(&current->sighand->siglock);
2372 	cgroup_enter_frozen();
2373 	freezable_schedule();
2374 }
2375 
2376 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2377 {
2378 	/*
2379 	 * We do not check sig_kernel_stop(signr) but set this marker
2380 	 * unconditionally because we do not know whether debugger will
2381 	 * change signr. This flag has no meaning unless we are going
2382 	 * to stop after return from ptrace_stop(). In this case it will
2383 	 * be checked in do_signal_stop(), we should only stop if it was
2384 	 * not cleared by SIGCONT while we were sleeping. See also the
2385 	 * comment in dequeue_signal().
2386 	 */
2387 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2388 	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2389 
2390 	/* We're back.  Did the debugger cancel the sig?  */
2391 	signr = current->exit_code;
2392 	if (signr == 0)
2393 		return signr;
2394 
2395 	current->exit_code = 0;
2396 
2397 	/*
2398 	 * Update the siginfo structure if the signal has
2399 	 * changed.  If the debugger wanted something
2400 	 * specific in the siginfo structure then it should
2401 	 * have updated *info via PTRACE_SETSIGINFO.
2402 	 */
2403 	if (signr != info->si_signo) {
2404 		clear_siginfo(info);
2405 		info->si_signo = signr;
2406 		info->si_errno = 0;
2407 		info->si_code = SI_USER;
2408 		rcu_read_lock();
2409 		info->si_pid = task_pid_vnr(current->parent);
2410 		info->si_uid = from_kuid_munged(current_user_ns(),
2411 						task_uid(current->parent));
2412 		rcu_read_unlock();
2413 	}
2414 
2415 	/* If the (new) signal is now blocked, requeue it.  */
2416 	if (sigismember(&current->blocked, signr)) {
2417 		send_signal(signr, info, current, PIDTYPE_PID);
2418 		signr = 0;
2419 	}
2420 
2421 	return signr;
2422 }
2423 
2424 bool get_signal(struct ksignal *ksig)
2425 {
2426 	struct sighand_struct *sighand = current->sighand;
2427 	struct signal_struct *signal = current->signal;
2428 	int signr;
2429 
2430 	if (unlikely(current->task_works))
2431 		task_work_run();
2432 
2433 	if (unlikely(uprobe_deny_signal()))
2434 		return false;
2435 
2436 	/*
2437 	 * Do this once, we can't return to user-mode if freezing() == T.
2438 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2439 	 * thus do not need another check after return.
2440 	 */
2441 	try_to_freeze();
2442 
2443 relock:
2444 	spin_lock_irq(&sighand->siglock);
2445 	/*
2446 	 * Every stopped thread goes here after wakeup. Check to see if
2447 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2448 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2449 	 */
2450 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2451 		int why;
2452 
2453 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2454 			why = CLD_CONTINUED;
2455 		else
2456 			why = CLD_STOPPED;
2457 
2458 		signal->flags &= ~SIGNAL_CLD_MASK;
2459 
2460 		spin_unlock_irq(&sighand->siglock);
2461 
2462 		/*
2463 		 * Notify the parent that we're continuing.  This event is
2464 		 * always per-process and doesn't make whole lot of sense
2465 		 * for ptracers, who shouldn't consume the state via
2466 		 * wait(2) either, but, for backward compatibility, notify
2467 		 * the ptracer of the group leader too unless it's gonna be
2468 		 * a duplicate.
2469 		 */
2470 		read_lock(&tasklist_lock);
2471 		do_notify_parent_cldstop(current, false, why);
2472 
2473 		if (ptrace_reparented(current->group_leader))
2474 			do_notify_parent_cldstop(current->group_leader,
2475 						true, why);
2476 		read_unlock(&tasklist_lock);
2477 
2478 		goto relock;
2479 	}
2480 
2481 	/* Has this task already been marked for death? */
2482 	if (signal_group_exit(signal)) {
2483 		ksig->info.si_signo = signr = SIGKILL;
2484 		sigdelset(&current->pending.signal, SIGKILL);
2485 		recalc_sigpending();
2486 		goto fatal;
2487 	}
2488 
2489 	for (;;) {
2490 		struct k_sigaction *ka;
2491 
2492 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2493 		    do_signal_stop(0))
2494 			goto relock;
2495 
2496 		if (unlikely(current->jobctl &
2497 			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2498 			if (current->jobctl & JOBCTL_TRAP_MASK) {
2499 				do_jobctl_trap();
2500 				spin_unlock_irq(&sighand->siglock);
2501 			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2502 				do_freezer_trap();
2503 
2504 			goto relock;
2505 		}
2506 
2507 		/*
2508 		 * If the task is leaving the frozen state, let's update
2509 		 * cgroup counters and reset the frozen bit.
2510 		 */
2511 		if (unlikely(cgroup_task_frozen(current))) {
2512 			spin_unlock_irq(&sighand->siglock);
2513 			cgroup_leave_frozen(false);
2514 			goto relock;
2515 		}
2516 
2517 		/*
2518 		 * Signals generated by the execution of an instruction
2519 		 * need to be delivered before any other pending signals
2520 		 * so that the instruction pointer in the signal stack
2521 		 * frame points to the faulting instruction.
2522 		 */
2523 		signr = dequeue_synchronous_signal(&ksig->info);
2524 		if (!signr)
2525 			signr = dequeue_signal(current, &current->blocked, &ksig->info);
2526 
2527 		if (!signr)
2528 			break; /* will return 0 */
2529 
2530 		if (unlikely(current->ptrace) && signr != SIGKILL) {
2531 			signr = ptrace_signal(signr, &ksig->info);
2532 			if (!signr)
2533 				continue;
2534 		}
2535 
2536 		ka = &sighand->action[signr-1];
2537 
2538 		/* Trace actually delivered signals. */
2539 		trace_signal_deliver(signr, &ksig->info, ka);
2540 
2541 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2542 			continue;
2543 		if (ka->sa.sa_handler != SIG_DFL) {
2544 			/* Run the handler.  */
2545 			ksig->ka = *ka;
2546 
2547 			if (ka->sa.sa_flags & SA_ONESHOT)
2548 				ka->sa.sa_handler = SIG_DFL;
2549 
2550 			break; /* will return non-zero "signr" value */
2551 		}
2552 
2553 		/*
2554 		 * Now we are doing the default action for this signal.
2555 		 */
2556 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2557 			continue;
2558 
2559 		/*
2560 		 * Global init gets no signals it doesn't want.
2561 		 * Container-init gets no signals it doesn't want from same
2562 		 * container.
2563 		 *
2564 		 * Note that if global/container-init sees a sig_kernel_only()
2565 		 * signal here, the signal must have been generated internally
2566 		 * or must have come from an ancestor namespace. In either
2567 		 * case, the signal cannot be dropped.
2568 		 */
2569 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2570 				!sig_kernel_only(signr))
2571 			continue;
2572 
2573 		if (sig_kernel_stop(signr)) {
2574 			/*
2575 			 * The default action is to stop all threads in
2576 			 * the thread group.  The job control signals
2577 			 * do nothing in an orphaned pgrp, but SIGSTOP
2578 			 * always works.  Note that siglock needs to be
2579 			 * dropped during the call to is_orphaned_pgrp()
2580 			 * because of lock ordering with tasklist_lock.
2581 			 * This allows an intervening SIGCONT to be posted.
2582 			 * We need to check for that and bail out if necessary.
2583 			 */
2584 			if (signr != SIGSTOP) {
2585 				spin_unlock_irq(&sighand->siglock);
2586 
2587 				/* signals can be posted during this window */
2588 
2589 				if (is_current_pgrp_orphaned())
2590 					goto relock;
2591 
2592 				spin_lock_irq(&sighand->siglock);
2593 			}
2594 
2595 			if (likely(do_signal_stop(ksig->info.si_signo))) {
2596 				/* It released the siglock.  */
2597 				goto relock;
2598 			}
2599 
2600 			/*
2601 			 * We didn't actually stop, due to a race
2602 			 * with SIGCONT or something like that.
2603 			 */
2604 			continue;
2605 		}
2606 
2607 	fatal:
2608 		spin_unlock_irq(&sighand->siglock);
2609 		if (unlikely(cgroup_task_frozen(current)))
2610 			cgroup_leave_frozen(true);
2611 
2612 		/*
2613 		 * Anything else is fatal, maybe with a core dump.
2614 		 */
2615 		current->flags |= PF_SIGNALED;
2616 
2617 		if (sig_kernel_coredump(signr)) {
2618 			if (print_fatal_signals)
2619 				print_fatal_signal(ksig->info.si_signo);
2620 			proc_coredump_connector(current);
2621 			/*
2622 			 * If it was able to dump core, this kills all
2623 			 * other threads in the group and synchronizes with
2624 			 * their demise.  If we lost the race with another
2625 			 * thread getting here, it set group_exit_code
2626 			 * first and our do_group_exit call below will use
2627 			 * that value and ignore the one we pass it.
2628 			 */
2629 			do_coredump(&ksig->info);
2630 		}
2631 
2632 		/*
2633 		 * Death signals, no core dump.
2634 		 */
2635 		do_group_exit(ksig->info.si_signo);
2636 		/* NOTREACHED */
2637 	}
2638 	spin_unlock_irq(&sighand->siglock);
2639 
2640 	ksig->sig = signr;
2641 	return ksig->sig > 0;
2642 }
2643 
2644 /**
2645  * signal_delivered -
2646  * @ksig:		kernel signal struct
2647  * @stepping:		nonzero if debugger single-step or block-step in use
2648  *
2649  * This function should be called when a signal has successfully been
2650  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2651  * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2652  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2653  */
2654 static void signal_delivered(struct ksignal *ksig, int stepping)
2655 {
2656 	sigset_t blocked;
2657 
2658 	/* A signal was successfully delivered, and the
2659 	   saved sigmask was stored on the signal frame,
2660 	   and will be restored by sigreturn.  So we can
2661 	   simply clear the restore sigmask flag.  */
2662 	clear_restore_sigmask();
2663 
2664 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2665 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2666 		sigaddset(&blocked, ksig->sig);
2667 	set_current_blocked(&blocked);
2668 	tracehook_signal_handler(stepping);
2669 }
2670 
2671 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2672 {
2673 	if (failed)
2674 		force_sigsegv(ksig->sig, current);
2675 	else
2676 		signal_delivered(ksig, stepping);
2677 }
2678 
2679 /*
2680  * It could be that complete_signal() picked us to notify about the
2681  * group-wide signal. Other threads should be notified now to take
2682  * the shared signals in @which since we will not.
2683  */
2684 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2685 {
2686 	sigset_t retarget;
2687 	struct task_struct *t;
2688 
2689 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2690 	if (sigisemptyset(&retarget))
2691 		return;
2692 
2693 	t = tsk;
2694 	while_each_thread(tsk, t) {
2695 		if (t->flags & PF_EXITING)
2696 			continue;
2697 
2698 		if (!has_pending_signals(&retarget, &t->blocked))
2699 			continue;
2700 		/* Remove the signals this thread can handle. */
2701 		sigandsets(&retarget, &retarget, &t->blocked);
2702 
2703 		if (!signal_pending(t))
2704 			signal_wake_up(t, 0);
2705 
2706 		if (sigisemptyset(&retarget))
2707 			break;
2708 	}
2709 }
2710 
2711 void exit_signals(struct task_struct *tsk)
2712 {
2713 	int group_stop = 0;
2714 	sigset_t unblocked;
2715 
2716 	/*
2717 	 * @tsk is about to have PF_EXITING set - lock out users which
2718 	 * expect stable threadgroup.
2719 	 */
2720 	cgroup_threadgroup_change_begin(tsk);
2721 
2722 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2723 		tsk->flags |= PF_EXITING;
2724 		cgroup_threadgroup_change_end(tsk);
2725 		return;
2726 	}
2727 
2728 	spin_lock_irq(&tsk->sighand->siglock);
2729 	/*
2730 	 * From now this task is not visible for group-wide signals,
2731 	 * see wants_signal(), do_signal_stop().
2732 	 */
2733 	tsk->flags |= PF_EXITING;
2734 
2735 	cgroup_threadgroup_change_end(tsk);
2736 
2737 	if (!signal_pending(tsk))
2738 		goto out;
2739 
2740 	unblocked = tsk->blocked;
2741 	signotset(&unblocked);
2742 	retarget_shared_pending(tsk, &unblocked);
2743 
2744 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2745 	    task_participate_group_stop(tsk))
2746 		group_stop = CLD_STOPPED;
2747 out:
2748 	spin_unlock_irq(&tsk->sighand->siglock);
2749 
2750 	/*
2751 	 * If group stop has completed, deliver the notification.  This
2752 	 * should always go to the real parent of the group leader.
2753 	 */
2754 	if (unlikely(group_stop)) {
2755 		read_lock(&tasklist_lock);
2756 		do_notify_parent_cldstop(tsk, false, group_stop);
2757 		read_unlock(&tasklist_lock);
2758 	}
2759 }
2760 
2761 /*
2762  * System call entry points.
2763  */
2764 
2765 /**
2766  *  sys_restart_syscall - restart a system call
2767  */
2768 SYSCALL_DEFINE0(restart_syscall)
2769 {
2770 	struct restart_block *restart = &current->restart_block;
2771 	return restart->fn(restart);
2772 }
2773 
2774 long do_no_restart_syscall(struct restart_block *param)
2775 {
2776 	return -EINTR;
2777 }
2778 
2779 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2780 {
2781 	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2782 		sigset_t newblocked;
2783 		/* A set of now blocked but previously unblocked signals. */
2784 		sigandnsets(&newblocked, newset, &current->blocked);
2785 		retarget_shared_pending(tsk, &newblocked);
2786 	}
2787 	tsk->blocked = *newset;
2788 	recalc_sigpending();
2789 }
2790 
2791 /**
2792  * set_current_blocked - change current->blocked mask
2793  * @newset: new mask
2794  *
2795  * It is wrong to change ->blocked directly, this helper should be used
2796  * to ensure the process can't miss a shared signal we are going to block.
2797  */
2798 void set_current_blocked(sigset_t *newset)
2799 {
2800 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2801 	__set_current_blocked(newset);
2802 }
2803 
2804 void __set_current_blocked(const sigset_t *newset)
2805 {
2806 	struct task_struct *tsk = current;
2807 
2808 	/*
2809 	 * In case the signal mask hasn't changed, there is nothing we need
2810 	 * to do. The current->blocked shouldn't be modified by other task.
2811 	 */
2812 	if (sigequalsets(&tsk->blocked, newset))
2813 		return;
2814 
2815 	spin_lock_irq(&tsk->sighand->siglock);
2816 	__set_task_blocked(tsk, newset);
2817 	spin_unlock_irq(&tsk->sighand->siglock);
2818 }
2819 
2820 /*
2821  * This is also useful for kernel threads that want to temporarily
2822  * (or permanently) block certain signals.
2823  *
2824  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2825  * interface happily blocks "unblockable" signals like SIGKILL
2826  * and friends.
2827  */
2828 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2829 {
2830 	struct task_struct *tsk = current;
2831 	sigset_t newset;
2832 
2833 	/* Lockless, only current can change ->blocked, never from irq */
2834 	if (oldset)
2835 		*oldset = tsk->blocked;
2836 
2837 	switch (how) {
2838 	case SIG_BLOCK:
2839 		sigorsets(&newset, &tsk->blocked, set);
2840 		break;
2841 	case SIG_UNBLOCK:
2842 		sigandnsets(&newset, &tsk->blocked, set);
2843 		break;
2844 	case SIG_SETMASK:
2845 		newset = *set;
2846 		break;
2847 	default:
2848 		return -EINVAL;
2849 	}
2850 
2851 	__set_current_blocked(&newset);
2852 	return 0;
2853 }
2854 EXPORT_SYMBOL(sigprocmask);
2855 
2856 /*
2857  * The api helps set app-provided sigmasks.
2858  *
2859  * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2860  * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2861  */
2862 int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
2863 		     sigset_t *oldset, size_t sigsetsize)
2864 {
2865 	if (!usigmask)
2866 		return 0;
2867 
2868 	if (sigsetsize != sizeof(sigset_t))
2869 		return -EINVAL;
2870 	if (copy_from_user(set, usigmask, sizeof(sigset_t)))
2871 		return -EFAULT;
2872 
2873 	*oldset = current->blocked;
2874 	set_current_blocked(set);
2875 
2876 	return 0;
2877 }
2878 EXPORT_SYMBOL(set_user_sigmask);
2879 
2880 #ifdef CONFIG_COMPAT
2881 int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
2882 			    sigset_t *set, sigset_t *oldset,
2883 			    size_t sigsetsize)
2884 {
2885 	if (!usigmask)
2886 		return 0;
2887 
2888 	if (sigsetsize != sizeof(compat_sigset_t))
2889 		return -EINVAL;
2890 	if (get_compat_sigset(set, usigmask))
2891 		return -EFAULT;
2892 
2893 	*oldset = current->blocked;
2894 	set_current_blocked(set);
2895 
2896 	return 0;
2897 }
2898 EXPORT_SYMBOL(set_compat_user_sigmask);
2899 #endif
2900 
2901 /*
2902  * restore_user_sigmask:
2903  * usigmask: sigmask passed in from userland.
2904  * sigsaved: saved sigmask when the syscall started and changed the sigmask to
2905  *           usigmask.
2906  *
2907  * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2908  * epoll_pwait where a new sigmask is passed in from userland for the syscalls.
2909  */
2910 void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved)
2911 {
2912 
2913 	if (!usigmask)
2914 		return;
2915 	/*
2916 	 * When signals are pending, do not restore them here.
2917 	 * Restoring sigmask here can lead to delivering signals that the above
2918 	 * syscalls are intended to block because of the sigmask passed in.
2919 	 */
2920 	if (signal_pending(current)) {
2921 		current->saved_sigmask = *sigsaved;
2922 		set_restore_sigmask();
2923 		return;
2924 	}
2925 
2926 	/*
2927 	 * This is needed because the fast syscall return path does not restore
2928 	 * saved_sigmask when signals are not pending.
2929 	 */
2930 	set_current_blocked(sigsaved);
2931 }
2932 EXPORT_SYMBOL(restore_user_sigmask);
2933 
2934 /**
2935  *  sys_rt_sigprocmask - change the list of currently blocked signals
2936  *  @how: whether to add, remove, or set signals
2937  *  @nset: stores pending signals
2938  *  @oset: previous value of signal mask if non-null
2939  *  @sigsetsize: size of sigset_t type
2940  */
2941 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2942 		sigset_t __user *, oset, size_t, sigsetsize)
2943 {
2944 	sigset_t old_set, new_set;
2945 	int error;
2946 
2947 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2948 	if (sigsetsize != sizeof(sigset_t))
2949 		return -EINVAL;
2950 
2951 	old_set = current->blocked;
2952 
2953 	if (nset) {
2954 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2955 			return -EFAULT;
2956 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2957 
2958 		error = sigprocmask(how, &new_set, NULL);
2959 		if (error)
2960 			return error;
2961 	}
2962 
2963 	if (oset) {
2964 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2965 			return -EFAULT;
2966 	}
2967 
2968 	return 0;
2969 }
2970 
2971 #ifdef CONFIG_COMPAT
2972 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2973 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2974 {
2975 	sigset_t old_set = current->blocked;
2976 
2977 	/* XXX: Don't preclude handling different sized sigset_t's.  */
2978 	if (sigsetsize != sizeof(sigset_t))
2979 		return -EINVAL;
2980 
2981 	if (nset) {
2982 		sigset_t new_set;
2983 		int error;
2984 		if (get_compat_sigset(&new_set, nset))
2985 			return -EFAULT;
2986 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2987 
2988 		error = sigprocmask(how, &new_set, NULL);
2989 		if (error)
2990 			return error;
2991 	}
2992 	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
2993 }
2994 #endif
2995 
2996 static void do_sigpending(sigset_t *set)
2997 {
2998 	spin_lock_irq(&current->sighand->siglock);
2999 	sigorsets(set, &current->pending.signal,
3000 		  &current->signal->shared_pending.signal);
3001 	spin_unlock_irq(&current->sighand->siglock);
3002 
3003 	/* Outside the lock because only this thread touches it.  */
3004 	sigandsets(set, &current->blocked, set);
3005 }
3006 
3007 /**
3008  *  sys_rt_sigpending - examine a pending signal that has been raised
3009  *			while blocked
3010  *  @uset: stores pending signals
3011  *  @sigsetsize: size of sigset_t type or larger
3012  */
3013 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3014 {
3015 	sigset_t set;
3016 
3017 	if (sigsetsize > sizeof(*uset))
3018 		return -EINVAL;
3019 
3020 	do_sigpending(&set);
3021 
3022 	if (copy_to_user(uset, &set, sigsetsize))
3023 		return -EFAULT;
3024 
3025 	return 0;
3026 }
3027 
3028 #ifdef CONFIG_COMPAT
3029 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3030 		compat_size_t, sigsetsize)
3031 {
3032 	sigset_t set;
3033 
3034 	if (sigsetsize > sizeof(*uset))
3035 		return -EINVAL;
3036 
3037 	do_sigpending(&set);
3038 
3039 	return put_compat_sigset(uset, &set, sigsetsize);
3040 }
3041 #endif
3042 
3043 static const struct {
3044 	unsigned char limit, layout;
3045 } sig_sicodes[] = {
3046 	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3047 	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3048 	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3049 	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3050 	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3051 #if defined(SIGEMT)
3052 	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3053 #endif
3054 	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3055 	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3056 	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3057 };
3058 
3059 static bool known_siginfo_layout(unsigned sig, int si_code)
3060 {
3061 	if (si_code == SI_KERNEL)
3062 		return true;
3063 	else if ((si_code > SI_USER)) {
3064 		if (sig_specific_sicodes(sig)) {
3065 			if (si_code <= sig_sicodes[sig].limit)
3066 				return true;
3067 		}
3068 		else if (si_code <= NSIGPOLL)
3069 			return true;
3070 	}
3071 	else if (si_code >= SI_DETHREAD)
3072 		return true;
3073 	else if (si_code == SI_ASYNCNL)
3074 		return true;
3075 	return false;
3076 }
3077 
3078 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3079 {
3080 	enum siginfo_layout layout = SIL_KILL;
3081 	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3082 		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3083 		    (si_code <= sig_sicodes[sig].limit)) {
3084 			layout = sig_sicodes[sig].layout;
3085 			/* Handle the exceptions */
3086 			if ((sig == SIGBUS) &&
3087 			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3088 				layout = SIL_FAULT_MCEERR;
3089 			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3090 				layout = SIL_FAULT_BNDERR;
3091 #ifdef SEGV_PKUERR
3092 			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3093 				layout = SIL_FAULT_PKUERR;
3094 #endif
3095 		}
3096 		else if (si_code <= NSIGPOLL)
3097 			layout = SIL_POLL;
3098 	} else {
3099 		if (si_code == SI_TIMER)
3100 			layout = SIL_TIMER;
3101 		else if (si_code == SI_SIGIO)
3102 			layout = SIL_POLL;
3103 		else if (si_code < 0)
3104 			layout = SIL_RT;
3105 	}
3106 	return layout;
3107 }
3108 
3109 static inline char __user *si_expansion(const siginfo_t __user *info)
3110 {
3111 	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3112 }
3113 
3114 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3115 {
3116 	char __user *expansion = si_expansion(to);
3117 	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3118 		return -EFAULT;
3119 	if (clear_user(expansion, SI_EXPANSION_SIZE))
3120 		return -EFAULT;
3121 	return 0;
3122 }
3123 
3124 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3125 				       const siginfo_t __user *from)
3126 {
3127 	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3128 		char __user *expansion = si_expansion(from);
3129 		char buf[SI_EXPANSION_SIZE];
3130 		int i;
3131 		/*
3132 		 * An unknown si_code might need more than
3133 		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3134 		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3135 		 * will return this data to userspace exactly.
3136 		 */
3137 		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3138 			return -EFAULT;
3139 		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3140 			if (buf[i] != 0)
3141 				return -E2BIG;
3142 		}
3143 	}
3144 	return 0;
3145 }
3146 
3147 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3148 				    const siginfo_t __user *from)
3149 {
3150 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3151 		return -EFAULT;
3152 	to->si_signo = signo;
3153 	return post_copy_siginfo_from_user(to, from);
3154 }
3155 
3156 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3157 {
3158 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3159 		return -EFAULT;
3160 	return post_copy_siginfo_from_user(to, from);
3161 }
3162 
3163 #ifdef CONFIG_COMPAT
3164 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3165 			   const struct kernel_siginfo *from)
3166 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3167 {
3168 	return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3169 }
3170 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3171 			     const struct kernel_siginfo *from, bool x32_ABI)
3172 #endif
3173 {
3174 	struct compat_siginfo new;
3175 	memset(&new, 0, sizeof(new));
3176 
3177 	new.si_signo = from->si_signo;
3178 	new.si_errno = from->si_errno;
3179 	new.si_code  = from->si_code;
3180 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3181 	case SIL_KILL:
3182 		new.si_pid = from->si_pid;
3183 		new.si_uid = from->si_uid;
3184 		break;
3185 	case SIL_TIMER:
3186 		new.si_tid     = from->si_tid;
3187 		new.si_overrun = from->si_overrun;
3188 		new.si_int     = from->si_int;
3189 		break;
3190 	case SIL_POLL:
3191 		new.si_band = from->si_band;
3192 		new.si_fd   = from->si_fd;
3193 		break;
3194 	case SIL_FAULT:
3195 		new.si_addr = ptr_to_compat(from->si_addr);
3196 #ifdef __ARCH_SI_TRAPNO
3197 		new.si_trapno = from->si_trapno;
3198 #endif
3199 		break;
3200 	case SIL_FAULT_MCEERR:
3201 		new.si_addr = ptr_to_compat(from->si_addr);
3202 #ifdef __ARCH_SI_TRAPNO
3203 		new.si_trapno = from->si_trapno;
3204 #endif
3205 		new.si_addr_lsb = from->si_addr_lsb;
3206 		break;
3207 	case SIL_FAULT_BNDERR:
3208 		new.si_addr = ptr_to_compat(from->si_addr);
3209 #ifdef __ARCH_SI_TRAPNO
3210 		new.si_trapno = from->si_trapno;
3211 #endif
3212 		new.si_lower = ptr_to_compat(from->si_lower);
3213 		new.si_upper = ptr_to_compat(from->si_upper);
3214 		break;
3215 	case SIL_FAULT_PKUERR:
3216 		new.si_addr = ptr_to_compat(from->si_addr);
3217 #ifdef __ARCH_SI_TRAPNO
3218 		new.si_trapno = from->si_trapno;
3219 #endif
3220 		new.si_pkey = from->si_pkey;
3221 		break;
3222 	case SIL_CHLD:
3223 		new.si_pid    = from->si_pid;
3224 		new.si_uid    = from->si_uid;
3225 		new.si_status = from->si_status;
3226 #ifdef CONFIG_X86_X32_ABI
3227 		if (x32_ABI) {
3228 			new._sifields._sigchld_x32._utime = from->si_utime;
3229 			new._sifields._sigchld_x32._stime = from->si_stime;
3230 		} else
3231 #endif
3232 		{
3233 			new.si_utime = from->si_utime;
3234 			new.si_stime = from->si_stime;
3235 		}
3236 		break;
3237 	case SIL_RT:
3238 		new.si_pid = from->si_pid;
3239 		new.si_uid = from->si_uid;
3240 		new.si_int = from->si_int;
3241 		break;
3242 	case SIL_SYS:
3243 		new.si_call_addr = ptr_to_compat(from->si_call_addr);
3244 		new.si_syscall   = from->si_syscall;
3245 		new.si_arch      = from->si_arch;
3246 		break;
3247 	}
3248 
3249 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3250 		return -EFAULT;
3251 
3252 	return 0;
3253 }
3254 
3255 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3256 					 const struct compat_siginfo *from)
3257 {
3258 	clear_siginfo(to);
3259 	to->si_signo = from->si_signo;
3260 	to->si_errno = from->si_errno;
3261 	to->si_code  = from->si_code;
3262 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3263 	case SIL_KILL:
3264 		to->si_pid = from->si_pid;
3265 		to->si_uid = from->si_uid;
3266 		break;
3267 	case SIL_TIMER:
3268 		to->si_tid     = from->si_tid;
3269 		to->si_overrun = from->si_overrun;
3270 		to->si_int     = from->si_int;
3271 		break;
3272 	case SIL_POLL:
3273 		to->si_band = from->si_band;
3274 		to->si_fd   = from->si_fd;
3275 		break;
3276 	case SIL_FAULT:
3277 		to->si_addr = compat_ptr(from->si_addr);
3278 #ifdef __ARCH_SI_TRAPNO
3279 		to->si_trapno = from->si_trapno;
3280 #endif
3281 		break;
3282 	case SIL_FAULT_MCEERR:
3283 		to->si_addr = compat_ptr(from->si_addr);
3284 #ifdef __ARCH_SI_TRAPNO
3285 		to->si_trapno = from->si_trapno;
3286 #endif
3287 		to->si_addr_lsb = from->si_addr_lsb;
3288 		break;
3289 	case SIL_FAULT_BNDERR:
3290 		to->si_addr = compat_ptr(from->si_addr);
3291 #ifdef __ARCH_SI_TRAPNO
3292 		to->si_trapno = from->si_trapno;
3293 #endif
3294 		to->si_lower = compat_ptr(from->si_lower);
3295 		to->si_upper = compat_ptr(from->si_upper);
3296 		break;
3297 	case SIL_FAULT_PKUERR:
3298 		to->si_addr = compat_ptr(from->si_addr);
3299 #ifdef __ARCH_SI_TRAPNO
3300 		to->si_trapno = from->si_trapno;
3301 #endif
3302 		to->si_pkey = from->si_pkey;
3303 		break;
3304 	case SIL_CHLD:
3305 		to->si_pid    = from->si_pid;
3306 		to->si_uid    = from->si_uid;
3307 		to->si_status = from->si_status;
3308 #ifdef CONFIG_X86_X32_ABI
3309 		if (in_x32_syscall()) {
3310 			to->si_utime = from->_sifields._sigchld_x32._utime;
3311 			to->si_stime = from->_sifields._sigchld_x32._stime;
3312 		} else
3313 #endif
3314 		{
3315 			to->si_utime = from->si_utime;
3316 			to->si_stime = from->si_stime;
3317 		}
3318 		break;
3319 	case SIL_RT:
3320 		to->si_pid = from->si_pid;
3321 		to->si_uid = from->si_uid;
3322 		to->si_int = from->si_int;
3323 		break;
3324 	case SIL_SYS:
3325 		to->si_call_addr = compat_ptr(from->si_call_addr);
3326 		to->si_syscall   = from->si_syscall;
3327 		to->si_arch      = from->si_arch;
3328 		break;
3329 	}
3330 	return 0;
3331 }
3332 
3333 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3334 				      const struct compat_siginfo __user *ufrom)
3335 {
3336 	struct compat_siginfo from;
3337 
3338 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3339 		return -EFAULT;
3340 
3341 	from.si_signo = signo;
3342 	return post_copy_siginfo_from_user32(to, &from);
3343 }
3344 
3345 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3346 			     const struct compat_siginfo __user *ufrom)
3347 {
3348 	struct compat_siginfo from;
3349 
3350 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3351 		return -EFAULT;
3352 
3353 	return post_copy_siginfo_from_user32(to, &from);
3354 }
3355 #endif /* CONFIG_COMPAT */
3356 
3357 /**
3358  *  do_sigtimedwait - wait for queued signals specified in @which
3359  *  @which: queued signals to wait for
3360  *  @info: if non-null, the signal's siginfo is returned here
3361  *  @ts: upper bound on process time suspension
3362  */
3363 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3364 		    const struct timespec64 *ts)
3365 {
3366 	ktime_t *to = NULL, timeout = KTIME_MAX;
3367 	struct task_struct *tsk = current;
3368 	sigset_t mask = *which;
3369 	int sig, ret = 0;
3370 
3371 	if (ts) {
3372 		if (!timespec64_valid(ts))
3373 			return -EINVAL;
3374 		timeout = timespec64_to_ktime(*ts);
3375 		to = &timeout;
3376 	}
3377 
3378 	/*
3379 	 * Invert the set of allowed signals to get those we want to block.
3380 	 */
3381 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3382 	signotset(&mask);
3383 
3384 	spin_lock_irq(&tsk->sighand->siglock);
3385 	sig = dequeue_signal(tsk, &mask, info);
3386 	if (!sig && timeout) {
3387 		/*
3388 		 * None ready, temporarily unblock those we're interested
3389 		 * while we are sleeping in so that we'll be awakened when
3390 		 * they arrive. Unblocking is always fine, we can avoid
3391 		 * set_current_blocked().
3392 		 */
3393 		tsk->real_blocked = tsk->blocked;
3394 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3395 		recalc_sigpending();
3396 		spin_unlock_irq(&tsk->sighand->siglock);
3397 
3398 		__set_current_state(TASK_INTERRUPTIBLE);
3399 		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3400 							 HRTIMER_MODE_REL);
3401 		spin_lock_irq(&tsk->sighand->siglock);
3402 		__set_task_blocked(tsk, &tsk->real_blocked);
3403 		sigemptyset(&tsk->real_blocked);
3404 		sig = dequeue_signal(tsk, &mask, info);
3405 	}
3406 	spin_unlock_irq(&tsk->sighand->siglock);
3407 
3408 	if (sig)
3409 		return sig;
3410 	return ret ? -EINTR : -EAGAIN;
3411 }
3412 
3413 /**
3414  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3415  *			in @uthese
3416  *  @uthese: queued signals to wait for
3417  *  @uinfo: if non-null, the signal's siginfo is returned here
3418  *  @uts: upper bound on process time suspension
3419  *  @sigsetsize: size of sigset_t type
3420  */
3421 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3422 		siginfo_t __user *, uinfo,
3423 		const struct __kernel_timespec __user *, uts,
3424 		size_t, sigsetsize)
3425 {
3426 	sigset_t these;
3427 	struct timespec64 ts;
3428 	kernel_siginfo_t info;
3429 	int ret;
3430 
3431 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3432 	if (sigsetsize != sizeof(sigset_t))
3433 		return -EINVAL;
3434 
3435 	if (copy_from_user(&these, uthese, sizeof(these)))
3436 		return -EFAULT;
3437 
3438 	if (uts) {
3439 		if (get_timespec64(&ts, uts))
3440 			return -EFAULT;
3441 	}
3442 
3443 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3444 
3445 	if (ret > 0 && uinfo) {
3446 		if (copy_siginfo_to_user(uinfo, &info))
3447 			ret = -EFAULT;
3448 	}
3449 
3450 	return ret;
3451 }
3452 
3453 #ifdef CONFIG_COMPAT_32BIT_TIME
3454 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3455 		siginfo_t __user *, uinfo,
3456 		const struct old_timespec32 __user *, uts,
3457 		size_t, sigsetsize)
3458 {
3459 	sigset_t these;
3460 	struct timespec64 ts;
3461 	kernel_siginfo_t info;
3462 	int ret;
3463 
3464 	if (sigsetsize != sizeof(sigset_t))
3465 		return -EINVAL;
3466 
3467 	if (copy_from_user(&these, uthese, sizeof(these)))
3468 		return -EFAULT;
3469 
3470 	if (uts) {
3471 		if (get_old_timespec32(&ts, uts))
3472 			return -EFAULT;
3473 	}
3474 
3475 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3476 
3477 	if (ret > 0 && uinfo) {
3478 		if (copy_siginfo_to_user(uinfo, &info))
3479 			ret = -EFAULT;
3480 	}
3481 
3482 	return ret;
3483 }
3484 #endif
3485 
3486 #ifdef CONFIG_COMPAT
3487 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3488 		struct compat_siginfo __user *, uinfo,
3489 		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3490 {
3491 	sigset_t s;
3492 	struct timespec64 t;
3493 	kernel_siginfo_t info;
3494 	long ret;
3495 
3496 	if (sigsetsize != sizeof(sigset_t))
3497 		return -EINVAL;
3498 
3499 	if (get_compat_sigset(&s, uthese))
3500 		return -EFAULT;
3501 
3502 	if (uts) {
3503 		if (get_timespec64(&t, uts))
3504 			return -EFAULT;
3505 	}
3506 
3507 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3508 
3509 	if (ret > 0 && uinfo) {
3510 		if (copy_siginfo_to_user32(uinfo, &info))
3511 			ret = -EFAULT;
3512 	}
3513 
3514 	return ret;
3515 }
3516 
3517 #ifdef CONFIG_COMPAT_32BIT_TIME
3518 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3519 		struct compat_siginfo __user *, uinfo,
3520 		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3521 {
3522 	sigset_t s;
3523 	struct timespec64 t;
3524 	kernel_siginfo_t info;
3525 	long ret;
3526 
3527 	if (sigsetsize != sizeof(sigset_t))
3528 		return -EINVAL;
3529 
3530 	if (get_compat_sigset(&s, uthese))
3531 		return -EFAULT;
3532 
3533 	if (uts) {
3534 		if (get_old_timespec32(&t, uts))
3535 			return -EFAULT;
3536 	}
3537 
3538 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3539 
3540 	if (ret > 0 && uinfo) {
3541 		if (copy_siginfo_to_user32(uinfo, &info))
3542 			ret = -EFAULT;
3543 	}
3544 
3545 	return ret;
3546 }
3547 #endif
3548 #endif
3549 
3550 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3551 {
3552 	clear_siginfo(info);
3553 	info->si_signo = sig;
3554 	info->si_errno = 0;
3555 	info->si_code = SI_USER;
3556 	info->si_pid = task_tgid_vnr(current);
3557 	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3558 }
3559 
3560 /**
3561  *  sys_kill - send a signal to a process
3562  *  @pid: the PID of the process
3563  *  @sig: signal to be sent
3564  */
3565 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3566 {
3567 	struct kernel_siginfo info;
3568 
3569 	prepare_kill_siginfo(sig, &info);
3570 
3571 	return kill_something_info(sig, &info, pid);
3572 }
3573 
3574 /*
3575  * Verify that the signaler and signalee either are in the same pid namespace
3576  * or that the signaler's pid namespace is an ancestor of the signalee's pid
3577  * namespace.
3578  */
3579 static bool access_pidfd_pidns(struct pid *pid)
3580 {
3581 	struct pid_namespace *active = task_active_pid_ns(current);
3582 	struct pid_namespace *p = ns_of_pid(pid);
3583 
3584 	for (;;) {
3585 		if (!p)
3586 			return false;
3587 		if (p == active)
3588 			break;
3589 		p = p->parent;
3590 	}
3591 
3592 	return true;
3593 }
3594 
3595 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3596 {
3597 #ifdef CONFIG_COMPAT
3598 	/*
3599 	 * Avoid hooking up compat syscalls and instead handle necessary
3600 	 * conversions here. Note, this is a stop-gap measure and should not be
3601 	 * considered a generic solution.
3602 	 */
3603 	if (in_compat_syscall())
3604 		return copy_siginfo_from_user32(
3605 			kinfo, (struct compat_siginfo __user *)info);
3606 #endif
3607 	return copy_siginfo_from_user(kinfo, info);
3608 }
3609 
3610 static struct pid *pidfd_to_pid(const struct file *file)
3611 {
3612 	if (file->f_op == &pidfd_fops)
3613 		return file->private_data;
3614 
3615 	return tgid_pidfd_to_pid(file);
3616 }
3617 
3618 /**
3619  * sys_pidfd_send_signal - send a signal to a process through a task file
3620  *                          descriptor
3621  * @pidfd:  the file descriptor of the process
3622  * @sig:    signal to be sent
3623  * @info:   the signal info
3624  * @flags:  future flags to be passed
3625  *
3626  * The syscall currently only signals via PIDTYPE_PID which covers
3627  * kill(<positive-pid>, <signal>. It does not signal threads or process
3628  * groups.
3629  * In order to extend the syscall to threads and process groups the @flags
3630  * argument should be used. In essence, the @flags argument will determine
3631  * what is signaled and not the file descriptor itself. Put in other words,
3632  * grouping is a property of the flags argument not a property of the file
3633  * descriptor.
3634  *
3635  * Return: 0 on success, negative errno on failure
3636  */
3637 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3638 		siginfo_t __user *, info, unsigned int, flags)
3639 {
3640 	int ret;
3641 	struct fd f;
3642 	struct pid *pid;
3643 	kernel_siginfo_t kinfo;
3644 
3645 	/* Enforce flags be set to 0 until we add an extension. */
3646 	if (flags)
3647 		return -EINVAL;
3648 
3649 	f = fdget(pidfd);
3650 	if (!f.file)
3651 		return -EBADF;
3652 
3653 	/* Is this a pidfd? */
3654 	pid = pidfd_to_pid(f.file);
3655 	if (IS_ERR(pid)) {
3656 		ret = PTR_ERR(pid);
3657 		goto err;
3658 	}
3659 
3660 	ret = -EINVAL;
3661 	if (!access_pidfd_pidns(pid))
3662 		goto err;
3663 
3664 	if (info) {
3665 		ret = copy_siginfo_from_user_any(&kinfo, info);
3666 		if (unlikely(ret))
3667 			goto err;
3668 
3669 		ret = -EINVAL;
3670 		if (unlikely(sig != kinfo.si_signo))
3671 			goto err;
3672 
3673 		/* Only allow sending arbitrary signals to yourself. */
3674 		ret = -EPERM;
3675 		if ((task_pid(current) != pid) &&
3676 		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3677 			goto err;
3678 	} else {
3679 		prepare_kill_siginfo(sig, &kinfo);
3680 	}
3681 
3682 	ret = kill_pid_info(sig, &kinfo, pid);
3683 
3684 err:
3685 	fdput(f);
3686 	return ret;
3687 }
3688 
3689 static int
3690 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3691 {
3692 	struct task_struct *p;
3693 	int error = -ESRCH;
3694 
3695 	rcu_read_lock();
3696 	p = find_task_by_vpid(pid);
3697 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3698 		error = check_kill_permission(sig, info, p);
3699 		/*
3700 		 * The null signal is a permissions and process existence
3701 		 * probe.  No signal is actually delivered.
3702 		 */
3703 		if (!error && sig) {
3704 			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3705 			/*
3706 			 * If lock_task_sighand() failed we pretend the task
3707 			 * dies after receiving the signal. The window is tiny,
3708 			 * and the signal is private anyway.
3709 			 */
3710 			if (unlikely(error == -ESRCH))
3711 				error = 0;
3712 		}
3713 	}
3714 	rcu_read_unlock();
3715 
3716 	return error;
3717 }
3718 
3719 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3720 {
3721 	struct kernel_siginfo info;
3722 
3723 	clear_siginfo(&info);
3724 	info.si_signo = sig;
3725 	info.si_errno = 0;
3726 	info.si_code = SI_TKILL;
3727 	info.si_pid = task_tgid_vnr(current);
3728 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3729 
3730 	return do_send_specific(tgid, pid, sig, &info);
3731 }
3732 
3733 /**
3734  *  sys_tgkill - send signal to one specific thread
3735  *  @tgid: the thread group ID of the thread
3736  *  @pid: the PID of the thread
3737  *  @sig: signal to be sent
3738  *
3739  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3740  *  exists but it's not belonging to the target process anymore. This
3741  *  method solves the problem of threads exiting and PIDs getting reused.
3742  */
3743 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3744 {
3745 	/* This is only valid for single tasks */
3746 	if (pid <= 0 || tgid <= 0)
3747 		return -EINVAL;
3748 
3749 	return do_tkill(tgid, pid, sig);
3750 }
3751 
3752 /**
3753  *  sys_tkill - send signal to one specific task
3754  *  @pid: the PID of the task
3755  *  @sig: signal to be sent
3756  *
3757  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
3758  */
3759 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3760 {
3761 	/* This is only valid for single tasks */
3762 	if (pid <= 0)
3763 		return -EINVAL;
3764 
3765 	return do_tkill(0, pid, sig);
3766 }
3767 
3768 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3769 {
3770 	/* Not even root can pretend to send signals from the kernel.
3771 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3772 	 */
3773 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3774 	    (task_pid_vnr(current) != pid))
3775 		return -EPERM;
3776 
3777 	/* POSIX.1b doesn't mention process groups.  */
3778 	return kill_proc_info(sig, info, pid);
3779 }
3780 
3781 /**
3782  *  sys_rt_sigqueueinfo - send signal information to a signal
3783  *  @pid: the PID of the thread
3784  *  @sig: signal to be sent
3785  *  @uinfo: signal info to be sent
3786  */
3787 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3788 		siginfo_t __user *, uinfo)
3789 {
3790 	kernel_siginfo_t info;
3791 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3792 	if (unlikely(ret))
3793 		return ret;
3794 	return do_rt_sigqueueinfo(pid, sig, &info);
3795 }
3796 
3797 #ifdef CONFIG_COMPAT
3798 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3799 			compat_pid_t, pid,
3800 			int, sig,
3801 			struct compat_siginfo __user *, uinfo)
3802 {
3803 	kernel_siginfo_t info;
3804 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3805 	if (unlikely(ret))
3806 		return ret;
3807 	return do_rt_sigqueueinfo(pid, sig, &info);
3808 }
3809 #endif
3810 
3811 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3812 {
3813 	/* This is only valid for single tasks */
3814 	if (pid <= 0 || tgid <= 0)
3815 		return -EINVAL;
3816 
3817 	/* Not even root can pretend to send signals from the kernel.
3818 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3819 	 */
3820 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3821 	    (task_pid_vnr(current) != pid))
3822 		return -EPERM;
3823 
3824 	return do_send_specific(tgid, pid, sig, info);
3825 }
3826 
3827 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3828 		siginfo_t __user *, uinfo)
3829 {
3830 	kernel_siginfo_t info;
3831 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3832 	if (unlikely(ret))
3833 		return ret;
3834 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3835 }
3836 
3837 #ifdef CONFIG_COMPAT
3838 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3839 			compat_pid_t, tgid,
3840 			compat_pid_t, pid,
3841 			int, sig,
3842 			struct compat_siginfo __user *, uinfo)
3843 {
3844 	kernel_siginfo_t info;
3845 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3846 	if (unlikely(ret))
3847 		return ret;
3848 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3849 }
3850 #endif
3851 
3852 /*
3853  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3854  */
3855 void kernel_sigaction(int sig, __sighandler_t action)
3856 {
3857 	spin_lock_irq(&current->sighand->siglock);
3858 	current->sighand->action[sig - 1].sa.sa_handler = action;
3859 	if (action == SIG_IGN) {
3860 		sigset_t mask;
3861 
3862 		sigemptyset(&mask);
3863 		sigaddset(&mask, sig);
3864 
3865 		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3866 		flush_sigqueue_mask(&mask, &current->pending);
3867 		recalc_sigpending();
3868 	}
3869 	spin_unlock_irq(&current->sighand->siglock);
3870 }
3871 EXPORT_SYMBOL(kernel_sigaction);
3872 
3873 void __weak sigaction_compat_abi(struct k_sigaction *act,
3874 		struct k_sigaction *oact)
3875 {
3876 }
3877 
3878 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3879 {
3880 	struct task_struct *p = current, *t;
3881 	struct k_sigaction *k;
3882 	sigset_t mask;
3883 
3884 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3885 		return -EINVAL;
3886 
3887 	k = &p->sighand->action[sig-1];
3888 
3889 	spin_lock_irq(&p->sighand->siglock);
3890 	if (oact)
3891 		*oact = *k;
3892 
3893 	sigaction_compat_abi(act, oact);
3894 
3895 	if (act) {
3896 		sigdelsetmask(&act->sa.sa_mask,
3897 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
3898 		*k = *act;
3899 		/*
3900 		 * POSIX 3.3.1.3:
3901 		 *  "Setting a signal action to SIG_IGN for a signal that is
3902 		 *   pending shall cause the pending signal to be discarded,
3903 		 *   whether or not it is blocked."
3904 		 *
3905 		 *  "Setting a signal action to SIG_DFL for a signal that is
3906 		 *   pending and whose default action is to ignore the signal
3907 		 *   (for example, SIGCHLD), shall cause the pending signal to
3908 		 *   be discarded, whether or not it is blocked"
3909 		 */
3910 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3911 			sigemptyset(&mask);
3912 			sigaddset(&mask, sig);
3913 			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3914 			for_each_thread(p, t)
3915 				flush_sigqueue_mask(&mask, &t->pending);
3916 		}
3917 	}
3918 
3919 	spin_unlock_irq(&p->sighand->siglock);
3920 	return 0;
3921 }
3922 
3923 static int
3924 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3925 		size_t min_ss_size)
3926 {
3927 	struct task_struct *t = current;
3928 
3929 	if (oss) {
3930 		memset(oss, 0, sizeof(stack_t));
3931 		oss->ss_sp = (void __user *) t->sas_ss_sp;
3932 		oss->ss_size = t->sas_ss_size;
3933 		oss->ss_flags = sas_ss_flags(sp) |
3934 			(current->sas_ss_flags & SS_FLAG_BITS);
3935 	}
3936 
3937 	if (ss) {
3938 		void __user *ss_sp = ss->ss_sp;
3939 		size_t ss_size = ss->ss_size;
3940 		unsigned ss_flags = ss->ss_flags;
3941 		int ss_mode;
3942 
3943 		if (unlikely(on_sig_stack(sp)))
3944 			return -EPERM;
3945 
3946 		ss_mode = ss_flags & ~SS_FLAG_BITS;
3947 		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3948 				ss_mode != 0))
3949 			return -EINVAL;
3950 
3951 		if (ss_mode == SS_DISABLE) {
3952 			ss_size = 0;
3953 			ss_sp = NULL;
3954 		} else {
3955 			if (unlikely(ss_size < min_ss_size))
3956 				return -ENOMEM;
3957 		}
3958 
3959 		t->sas_ss_sp = (unsigned long) ss_sp;
3960 		t->sas_ss_size = ss_size;
3961 		t->sas_ss_flags = ss_flags;
3962 	}
3963 	return 0;
3964 }
3965 
3966 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3967 {
3968 	stack_t new, old;
3969 	int err;
3970 	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3971 		return -EFAULT;
3972 	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3973 			      current_user_stack_pointer(),
3974 			      MINSIGSTKSZ);
3975 	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3976 		err = -EFAULT;
3977 	return err;
3978 }
3979 
3980 int restore_altstack(const stack_t __user *uss)
3981 {
3982 	stack_t new;
3983 	if (copy_from_user(&new, uss, sizeof(stack_t)))
3984 		return -EFAULT;
3985 	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
3986 			     MINSIGSTKSZ);
3987 	/* squash all but EFAULT for now */
3988 	return 0;
3989 }
3990 
3991 int __save_altstack(stack_t __user *uss, unsigned long sp)
3992 {
3993 	struct task_struct *t = current;
3994 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3995 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
3996 		__put_user(t->sas_ss_size, &uss->ss_size);
3997 	if (err)
3998 		return err;
3999 	if (t->sas_ss_flags & SS_AUTODISARM)
4000 		sas_ss_reset(t);
4001 	return 0;
4002 }
4003 
4004 #ifdef CONFIG_COMPAT
4005 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4006 				 compat_stack_t __user *uoss_ptr)
4007 {
4008 	stack_t uss, uoss;
4009 	int ret;
4010 
4011 	if (uss_ptr) {
4012 		compat_stack_t uss32;
4013 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4014 			return -EFAULT;
4015 		uss.ss_sp = compat_ptr(uss32.ss_sp);
4016 		uss.ss_flags = uss32.ss_flags;
4017 		uss.ss_size = uss32.ss_size;
4018 	}
4019 	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4020 			     compat_user_stack_pointer(),
4021 			     COMPAT_MINSIGSTKSZ);
4022 	if (ret >= 0 && uoss_ptr)  {
4023 		compat_stack_t old;
4024 		memset(&old, 0, sizeof(old));
4025 		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4026 		old.ss_flags = uoss.ss_flags;
4027 		old.ss_size = uoss.ss_size;
4028 		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4029 			ret = -EFAULT;
4030 	}
4031 	return ret;
4032 }
4033 
4034 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4035 			const compat_stack_t __user *, uss_ptr,
4036 			compat_stack_t __user *, uoss_ptr)
4037 {
4038 	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4039 }
4040 
4041 int compat_restore_altstack(const compat_stack_t __user *uss)
4042 {
4043 	int err = do_compat_sigaltstack(uss, NULL);
4044 	/* squash all but -EFAULT for now */
4045 	return err == -EFAULT ? err : 0;
4046 }
4047 
4048 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4049 {
4050 	int err;
4051 	struct task_struct *t = current;
4052 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4053 			 &uss->ss_sp) |
4054 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4055 		__put_user(t->sas_ss_size, &uss->ss_size);
4056 	if (err)
4057 		return err;
4058 	if (t->sas_ss_flags & SS_AUTODISARM)
4059 		sas_ss_reset(t);
4060 	return 0;
4061 }
4062 #endif
4063 
4064 #ifdef __ARCH_WANT_SYS_SIGPENDING
4065 
4066 /**
4067  *  sys_sigpending - examine pending signals
4068  *  @uset: where mask of pending signal is returned
4069  */
4070 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4071 {
4072 	sigset_t set;
4073 
4074 	if (sizeof(old_sigset_t) > sizeof(*uset))
4075 		return -EINVAL;
4076 
4077 	do_sigpending(&set);
4078 
4079 	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4080 		return -EFAULT;
4081 
4082 	return 0;
4083 }
4084 
4085 #ifdef CONFIG_COMPAT
4086 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4087 {
4088 	sigset_t set;
4089 
4090 	do_sigpending(&set);
4091 
4092 	return put_user(set.sig[0], set32);
4093 }
4094 #endif
4095 
4096 #endif
4097 
4098 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4099 /**
4100  *  sys_sigprocmask - examine and change blocked signals
4101  *  @how: whether to add, remove, or set signals
4102  *  @nset: signals to add or remove (if non-null)
4103  *  @oset: previous value of signal mask if non-null
4104  *
4105  * Some platforms have their own version with special arguments;
4106  * others support only sys_rt_sigprocmask.
4107  */
4108 
4109 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4110 		old_sigset_t __user *, oset)
4111 {
4112 	old_sigset_t old_set, new_set;
4113 	sigset_t new_blocked;
4114 
4115 	old_set = current->blocked.sig[0];
4116 
4117 	if (nset) {
4118 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4119 			return -EFAULT;
4120 
4121 		new_blocked = current->blocked;
4122 
4123 		switch (how) {
4124 		case SIG_BLOCK:
4125 			sigaddsetmask(&new_blocked, new_set);
4126 			break;
4127 		case SIG_UNBLOCK:
4128 			sigdelsetmask(&new_blocked, new_set);
4129 			break;
4130 		case SIG_SETMASK:
4131 			new_blocked.sig[0] = new_set;
4132 			break;
4133 		default:
4134 			return -EINVAL;
4135 		}
4136 
4137 		set_current_blocked(&new_blocked);
4138 	}
4139 
4140 	if (oset) {
4141 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4142 			return -EFAULT;
4143 	}
4144 
4145 	return 0;
4146 }
4147 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4148 
4149 #ifndef CONFIG_ODD_RT_SIGACTION
4150 /**
4151  *  sys_rt_sigaction - alter an action taken by a process
4152  *  @sig: signal to be sent
4153  *  @act: new sigaction
4154  *  @oact: used to save the previous sigaction
4155  *  @sigsetsize: size of sigset_t type
4156  */
4157 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4158 		const struct sigaction __user *, act,
4159 		struct sigaction __user *, oact,
4160 		size_t, sigsetsize)
4161 {
4162 	struct k_sigaction new_sa, old_sa;
4163 	int ret;
4164 
4165 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4166 	if (sigsetsize != sizeof(sigset_t))
4167 		return -EINVAL;
4168 
4169 	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4170 		return -EFAULT;
4171 
4172 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4173 	if (ret)
4174 		return ret;
4175 
4176 	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4177 		return -EFAULT;
4178 
4179 	return 0;
4180 }
4181 #ifdef CONFIG_COMPAT
4182 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4183 		const struct compat_sigaction __user *, act,
4184 		struct compat_sigaction __user *, oact,
4185 		compat_size_t, sigsetsize)
4186 {
4187 	struct k_sigaction new_ka, old_ka;
4188 #ifdef __ARCH_HAS_SA_RESTORER
4189 	compat_uptr_t restorer;
4190 #endif
4191 	int ret;
4192 
4193 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4194 	if (sigsetsize != sizeof(compat_sigset_t))
4195 		return -EINVAL;
4196 
4197 	if (act) {
4198 		compat_uptr_t handler;
4199 		ret = get_user(handler, &act->sa_handler);
4200 		new_ka.sa.sa_handler = compat_ptr(handler);
4201 #ifdef __ARCH_HAS_SA_RESTORER
4202 		ret |= get_user(restorer, &act->sa_restorer);
4203 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4204 #endif
4205 		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4206 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4207 		if (ret)
4208 			return -EFAULT;
4209 	}
4210 
4211 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4212 	if (!ret && oact) {
4213 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4214 			       &oact->sa_handler);
4215 		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4216 					 sizeof(oact->sa_mask));
4217 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4218 #ifdef __ARCH_HAS_SA_RESTORER
4219 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4220 				&oact->sa_restorer);
4221 #endif
4222 	}
4223 	return ret;
4224 }
4225 #endif
4226 #endif /* !CONFIG_ODD_RT_SIGACTION */
4227 
4228 #ifdef CONFIG_OLD_SIGACTION
4229 SYSCALL_DEFINE3(sigaction, int, sig,
4230 		const struct old_sigaction __user *, act,
4231 	        struct old_sigaction __user *, oact)
4232 {
4233 	struct k_sigaction new_ka, old_ka;
4234 	int ret;
4235 
4236 	if (act) {
4237 		old_sigset_t mask;
4238 		if (!access_ok(act, sizeof(*act)) ||
4239 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4240 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4241 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4242 		    __get_user(mask, &act->sa_mask))
4243 			return -EFAULT;
4244 #ifdef __ARCH_HAS_KA_RESTORER
4245 		new_ka.ka_restorer = NULL;
4246 #endif
4247 		siginitset(&new_ka.sa.sa_mask, mask);
4248 	}
4249 
4250 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4251 
4252 	if (!ret && oact) {
4253 		if (!access_ok(oact, sizeof(*oact)) ||
4254 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4255 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4256 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4257 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4258 			return -EFAULT;
4259 	}
4260 
4261 	return ret;
4262 }
4263 #endif
4264 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4265 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4266 		const struct compat_old_sigaction __user *, act,
4267 	        struct compat_old_sigaction __user *, oact)
4268 {
4269 	struct k_sigaction new_ka, old_ka;
4270 	int ret;
4271 	compat_old_sigset_t mask;
4272 	compat_uptr_t handler, restorer;
4273 
4274 	if (act) {
4275 		if (!access_ok(act, sizeof(*act)) ||
4276 		    __get_user(handler, &act->sa_handler) ||
4277 		    __get_user(restorer, &act->sa_restorer) ||
4278 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4279 		    __get_user(mask, &act->sa_mask))
4280 			return -EFAULT;
4281 
4282 #ifdef __ARCH_HAS_KA_RESTORER
4283 		new_ka.ka_restorer = NULL;
4284 #endif
4285 		new_ka.sa.sa_handler = compat_ptr(handler);
4286 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4287 		siginitset(&new_ka.sa.sa_mask, mask);
4288 	}
4289 
4290 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4291 
4292 	if (!ret && oact) {
4293 		if (!access_ok(oact, sizeof(*oact)) ||
4294 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4295 			       &oact->sa_handler) ||
4296 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4297 			       &oact->sa_restorer) ||
4298 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4299 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4300 			return -EFAULT;
4301 	}
4302 	return ret;
4303 }
4304 #endif
4305 
4306 #ifdef CONFIG_SGETMASK_SYSCALL
4307 
4308 /*
4309  * For backwards compatibility.  Functionality superseded by sigprocmask.
4310  */
4311 SYSCALL_DEFINE0(sgetmask)
4312 {
4313 	/* SMP safe */
4314 	return current->blocked.sig[0];
4315 }
4316 
4317 SYSCALL_DEFINE1(ssetmask, int, newmask)
4318 {
4319 	int old = current->blocked.sig[0];
4320 	sigset_t newset;
4321 
4322 	siginitset(&newset, newmask);
4323 	set_current_blocked(&newset);
4324 
4325 	return old;
4326 }
4327 #endif /* CONFIG_SGETMASK_SYSCALL */
4328 
4329 #ifdef __ARCH_WANT_SYS_SIGNAL
4330 /*
4331  * For backwards compatibility.  Functionality superseded by sigaction.
4332  */
4333 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4334 {
4335 	struct k_sigaction new_sa, old_sa;
4336 	int ret;
4337 
4338 	new_sa.sa.sa_handler = handler;
4339 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4340 	sigemptyset(&new_sa.sa.sa_mask);
4341 
4342 	ret = do_sigaction(sig, &new_sa, &old_sa);
4343 
4344 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4345 }
4346 #endif /* __ARCH_WANT_SYS_SIGNAL */
4347 
4348 #ifdef __ARCH_WANT_SYS_PAUSE
4349 
4350 SYSCALL_DEFINE0(pause)
4351 {
4352 	while (!signal_pending(current)) {
4353 		__set_current_state(TASK_INTERRUPTIBLE);
4354 		schedule();
4355 	}
4356 	return -ERESTARTNOHAND;
4357 }
4358 
4359 #endif
4360 
4361 static int sigsuspend(sigset_t *set)
4362 {
4363 	current->saved_sigmask = current->blocked;
4364 	set_current_blocked(set);
4365 
4366 	while (!signal_pending(current)) {
4367 		__set_current_state(TASK_INTERRUPTIBLE);
4368 		schedule();
4369 	}
4370 	set_restore_sigmask();
4371 	return -ERESTARTNOHAND;
4372 }
4373 
4374 /**
4375  *  sys_rt_sigsuspend - replace the signal mask for a value with the
4376  *	@unewset value until a signal is received
4377  *  @unewset: new signal mask value
4378  *  @sigsetsize: size of sigset_t type
4379  */
4380 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4381 {
4382 	sigset_t newset;
4383 
4384 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4385 	if (sigsetsize != sizeof(sigset_t))
4386 		return -EINVAL;
4387 
4388 	if (copy_from_user(&newset, unewset, sizeof(newset)))
4389 		return -EFAULT;
4390 	return sigsuspend(&newset);
4391 }
4392 
4393 #ifdef CONFIG_COMPAT
4394 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4395 {
4396 	sigset_t newset;
4397 
4398 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4399 	if (sigsetsize != sizeof(sigset_t))
4400 		return -EINVAL;
4401 
4402 	if (get_compat_sigset(&newset, unewset))
4403 		return -EFAULT;
4404 	return sigsuspend(&newset);
4405 }
4406 #endif
4407 
4408 #ifdef CONFIG_OLD_SIGSUSPEND
4409 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4410 {
4411 	sigset_t blocked;
4412 	siginitset(&blocked, mask);
4413 	return sigsuspend(&blocked);
4414 }
4415 #endif
4416 #ifdef CONFIG_OLD_SIGSUSPEND3
4417 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4418 {
4419 	sigset_t blocked;
4420 	siginitset(&blocked, mask);
4421 	return sigsuspend(&blocked);
4422 }
4423 #endif
4424 
4425 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4426 {
4427 	return NULL;
4428 }
4429 
4430 static inline void siginfo_buildtime_checks(void)
4431 {
4432 	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4433 
4434 	/* Verify the offsets in the two siginfos match */
4435 #define CHECK_OFFSET(field) \
4436 	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4437 
4438 	/* kill */
4439 	CHECK_OFFSET(si_pid);
4440 	CHECK_OFFSET(si_uid);
4441 
4442 	/* timer */
4443 	CHECK_OFFSET(si_tid);
4444 	CHECK_OFFSET(si_overrun);
4445 	CHECK_OFFSET(si_value);
4446 
4447 	/* rt */
4448 	CHECK_OFFSET(si_pid);
4449 	CHECK_OFFSET(si_uid);
4450 	CHECK_OFFSET(si_value);
4451 
4452 	/* sigchld */
4453 	CHECK_OFFSET(si_pid);
4454 	CHECK_OFFSET(si_uid);
4455 	CHECK_OFFSET(si_status);
4456 	CHECK_OFFSET(si_utime);
4457 	CHECK_OFFSET(si_stime);
4458 
4459 	/* sigfault */
4460 	CHECK_OFFSET(si_addr);
4461 	CHECK_OFFSET(si_addr_lsb);
4462 	CHECK_OFFSET(si_lower);
4463 	CHECK_OFFSET(si_upper);
4464 	CHECK_OFFSET(si_pkey);
4465 
4466 	/* sigpoll */
4467 	CHECK_OFFSET(si_band);
4468 	CHECK_OFFSET(si_fd);
4469 
4470 	/* sigsys */
4471 	CHECK_OFFSET(si_call_addr);
4472 	CHECK_OFFSET(si_syscall);
4473 	CHECK_OFFSET(si_arch);
4474 #undef CHECK_OFFSET
4475 }
4476 
4477 void __init signals_init(void)
4478 {
4479 	siginfo_buildtime_checks();
4480 
4481 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4482 }
4483 
4484 #ifdef CONFIG_KGDB_KDB
4485 #include <linux/kdb.h>
4486 /*
4487  * kdb_send_sig - Allows kdb to send signals without exposing
4488  * signal internals.  This function checks if the required locks are
4489  * available before calling the main signal code, to avoid kdb
4490  * deadlocks.
4491  */
4492 void kdb_send_sig(struct task_struct *t, int sig)
4493 {
4494 	static struct task_struct *kdb_prev_t;
4495 	int new_t, ret;
4496 	if (!spin_trylock(&t->sighand->siglock)) {
4497 		kdb_printf("Can't do kill command now.\n"
4498 			   "The sigmask lock is held somewhere else in "
4499 			   "kernel, try again later\n");
4500 		return;
4501 	}
4502 	new_t = kdb_prev_t != t;
4503 	kdb_prev_t = t;
4504 	if (t->state != TASK_RUNNING && new_t) {
4505 		spin_unlock(&t->sighand->siglock);
4506 		kdb_printf("Process is not RUNNING, sending a signal from "
4507 			   "kdb risks deadlock\n"
4508 			   "on the run queue locks. "
4509 			   "The signal has _not_ been sent.\n"
4510 			   "Reissue the kill command if you want to risk "
4511 			   "the deadlock.\n");
4512 		return;
4513 	}
4514 	ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4515 	spin_unlock(&t->sighand->siglock);
4516 	if (ret)
4517 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4518 			   sig, t->pid);
4519 	else
4520 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4521 }
4522 #endif	/* CONFIG_KGDB_KDB */
4523