xref: /openbmc/linux/kernel/signal.c (revision cd6d421e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/signal.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
8  *
9  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
10  *		Changes to use preallocated sigqueue structures
11  *		to allow signals to be sent reliably.
12  */
13 
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/livepatch.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49 
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
52 
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
58 
59 /*
60  * SLAB caches for signal bits.
61  */
62 
63 static struct kmem_cache *sigqueue_cachep;
64 
65 int print_fatal_signals __read_mostly;
66 
67 static void __user *sig_handler(struct task_struct *t, int sig)
68 {
69 	return t->sighand->action[sig - 1].sa.sa_handler;
70 }
71 
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 {
74 	/* Is it explicitly or implicitly ignored? */
75 	return handler == SIG_IGN ||
76 	       (handler == SIG_DFL && sig_kernel_ignore(sig));
77 }
78 
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80 {
81 	void __user *handler;
82 
83 	handler = sig_handler(t, sig);
84 
85 	/* SIGKILL and SIGSTOP may not be sent to the global init */
86 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 		return true;
88 
89 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 		return true;
92 
93 	/* Only allow kernel generated signals to this kthread */
94 	if (unlikely((t->flags & PF_KTHREAD) &&
95 		     (handler == SIG_KTHREAD_KERNEL) && !force))
96 		return true;
97 
98 	return sig_handler_ignored(handler, sig);
99 }
100 
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
102 {
103 	/*
104 	 * Blocked signals are never ignored, since the
105 	 * signal handler may change by the time it is
106 	 * unblocked.
107 	 */
108 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
109 		return false;
110 
111 	/*
112 	 * Tracers may want to know about even ignored signal unless it
113 	 * is SIGKILL which can't be reported anyway but can be ignored
114 	 * by SIGNAL_UNKILLABLE task.
115 	 */
116 	if (t->ptrace && sig != SIGKILL)
117 		return false;
118 
119 	return sig_task_ignored(t, sig, force);
120 }
121 
122 /*
123  * Re-calculate pending state from the set of locally pending
124  * signals, globally pending signals, and blocked signals.
125  */
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127 {
128 	unsigned long ready;
129 	long i;
130 
131 	switch (_NSIG_WORDS) {
132 	default:
133 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 			ready |= signal->sig[i] &~ blocked->sig[i];
135 		break;
136 
137 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
138 		ready |= signal->sig[2] &~ blocked->sig[2];
139 		ready |= signal->sig[1] &~ blocked->sig[1];
140 		ready |= signal->sig[0] &~ blocked->sig[0];
141 		break;
142 
143 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
144 		ready |= signal->sig[0] &~ blocked->sig[0];
145 		break;
146 
147 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
148 	}
149 	return ready !=	0;
150 }
151 
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153 
154 static bool recalc_sigpending_tsk(struct task_struct *t)
155 {
156 	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 	    PENDING(&t->pending, &t->blocked) ||
158 	    PENDING(&t->signal->shared_pending, &t->blocked) ||
159 	    cgroup_task_frozen(t)) {
160 		set_tsk_thread_flag(t, TIF_SIGPENDING);
161 		return true;
162 	}
163 
164 	/*
165 	 * We must never clear the flag in another thread, or in current
166 	 * when it's possible the current syscall is returning -ERESTART*.
167 	 * So we don't clear it here, and only callers who know they should do.
168 	 */
169 	return false;
170 }
171 
172 /*
173  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174  * This is superfluous when called on current, the wakeup is a harmless no-op.
175  */
176 void recalc_sigpending_and_wake(struct task_struct *t)
177 {
178 	if (recalc_sigpending_tsk(t))
179 		signal_wake_up(t, 0);
180 }
181 
182 void recalc_sigpending(void)
183 {
184 	if (!recalc_sigpending_tsk(current) && !freezing(current) &&
185 	    !klp_patch_pending(current))
186 		clear_thread_flag(TIF_SIGPENDING);
187 
188 }
189 EXPORT_SYMBOL(recalc_sigpending);
190 
191 void calculate_sigpending(void)
192 {
193 	/* Have any signals or users of TIF_SIGPENDING been delayed
194 	 * until after fork?
195 	 */
196 	spin_lock_irq(&current->sighand->siglock);
197 	set_tsk_thread_flag(current, TIF_SIGPENDING);
198 	recalc_sigpending();
199 	spin_unlock_irq(&current->sighand->siglock);
200 }
201 
202 /* Given the mask, find the first available signal that should be serviced. */
203 
204 #define SYNCHRONOUS_MASK \
205 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
207 
208 int next_signal(struct sigpending *pending, sigset_t *mask)
209 {
210 	unsigned long i, *s, *m, x;
211 	int sig = 0;
212 
213 	s = pending->signal.sig;
214 	m = mask->sig;
215 
216 	/*
217 	 * Handle the first word specially: it contains the
218 	 * synchronous signals that need to be dequeued first.
219 	 */
220 	x = *s &~ *m;
221 	if (x) {
222 		if (x & SYNCHRONOUS_MASK)
223 			x &= SYNCHRONOUS_MASK;
224 		sig = ffz(~x) + 1;
225 		return sig;
226 	}
227 
228 	switch (_NSIG_WORDS) {
229 	default:
230 		for (i = 1; i < _NSIG_WORDS; ++i) {
231 			x = *++s &~ *++m;
232 			if (!x)
233 				continue;
234 			sig = ffz(~x) + i*_NSIG_BPW + 1;
235 			break;
236 		}
237 		break;
238 
239 	case 2:
240 		x = s[1] &~ m[1];
241 		if (!x)
242 			break;
243 		sig = ffz(~x) + _NSIG_BPW + 1;
244 		break;
245 
246 	case 1:
247 		/* Nothing to do */
248 		break;
249 	}
250 
251 	return sig;
252 }
253 
254 static inline void print_dropped_signal(int sig)
255 {
256 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
257 
258 	if (!print_fatal_signals)
259 		return;
260 
261 	if (!__ratelimit(&ratelimit_state))
262 		return;
263 
264 	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 				current->comm, current->pid, sig);
266 }
267 
268 /**
269  * task_set_jobctl_pending - set jobctl pending bits
270  * @task: target task
271  * @mask: pending bits to set
272  *
273  * Clear @mask from @task->jobctl.  @mask must be subset of
274  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
275  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
276  * cleared.  If @task is already being killed or exiting, this function
277  * becomes noop.
278  *
279  * CONTEXT:
280  * Must be called with @task->sighand->siglock held.
281  *
282  * RETURNS:
283  * %true if @mask is set, %false if made noop because @task was dying.
284  */
285 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
286 {
287 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
290 
291 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 		return false;
293 
294 	if (mask & JOBCTL_STOP_SIGMASK)
295 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
296 
297 	task->jobctl |= mask;
298 	return true;
299 }
300 
301 /**
302  * task_clear_jobctl_trapping - clear jobctl trapping bit
303  * @task: target task
304  *
305  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
306  * Clear it and wake up the ptracer.  Note that we don't need any further
307  * locking.  @task->siglock guarantees that @task->parent points to the
308  * ptracer.
309  *
310  * CONTEXT:
311  * Must be called with @task->sighand->siglock held.
312  */
313 void task_clear_jobctl_trapping(struct task_struct *task)
314 {
315 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
316 		task->jobctl &= ~JOBCTL_TRAPPING;
317 		smp_mb();	/* advised by wake_up_bit() */
318 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
319 	}
320 }
321 
322 /**
323  * task_clear_jobctl_pending - clear jobctl pending bits
324  * @task: target task
325  * @mask: pending bits to clear
326  *
327  * Clear @mask from @task->jobctl.  @mask must be subset of
328  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
329  * STOP bits are cleared together.
330  *
331  * If clearing of @mask leaves no stop or trap pending, this function calls
332  * task_clear_jobctl_trapping().
333  *
334  * CONTEXT:
335  * Must be called with @task->sighand->siglock held.
336  */
337 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
338 {
339 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
340 
341 	if (mask & JOBCTL_STOP_PENDING)
342 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
343 
344 	task->jobctl &= ~mask;
345 
346 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
347 		task_clear_jobctl_trapping(task);
348 }
349 
350 /**
351  * task_participate_group_stop - participate in a group stop
352  * @task: task participating in a group stop
353  *
354  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
355  * Group stop states are cleared and the group stop count is consumed if
356  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
357  * stop, the appropriate `SIGNAL_*` flags are set.
358  *
359  * CONTEXT:
360  * Must be called with @task->sighand->siglock held.
361  *
362  * RETURNS:
363  * %true if group stop completion should be notified to the parent, %false
364  * otherwise.
365  */
366 static bool task_participate_group_stop(struct task_struct *task)
367 {
368 	struct signal_struct *sig = task->signal;
369 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
370 
371 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
372 
373 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
374 
375 	if (!consume)
376 		return false;
377 
378 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
379 		sig->group_stop_count--;
380 
381 	/*
382 	 * Tell the caller to notify completion iff we are entering into a
383 	 * fresh group stop.  Read comment in do_signal_stop() for details.
384 	 */
385 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
386 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
387 		return true;
388 	}
389 	return false;
390 }
391 
392 void task_join_group_stop(struct task_struct *task)
393 {
394 	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
395 	struct signal_struct *sig = current->signal;
396 
397 	if (sig->group_stop_count) {
398 		sig->group_stop_count++;
399 		mask |= JOBCTL_STOP_CONSUME;
400 	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
401 		return;
402 
403 	/* Have the new thread join an on-going signal group stop */
404 	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
405 }
406 
407 /*
408  * allocate a new signal queue record
409  * - this may be called without locks if and only if t == current, otherwise an
410  *   appropriate lock must be held to stop the target task from exiting
411  */
412 static struct sigqueue *
413 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
414 {
415 	struct sigqueue *q = NULL;
416 	struct user_struct *user;
417 	int sigpending;
418 
419 	/*
420 	 * Protect access to @t credentials. This can go away when all
421 	 * callers hold rcu read lock.
422 	 *
423 	 * NOTE! A pending signal will hold on to the user refcount,
424 	 * and we get/put the refcount only when the sigpending count
425 	 * changes from/to zero.
426 	 */
427 	rcu_read_lock();
428 	user = __task_cred(t)->user;
429 	sigpending = atomic_inc_return(&user->sigpending);
430 	if (sigpending == 1)
431 		get_uid(user);
432 	rcu_read_unlock();
433 
434 	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 		q = kmem_cache_alloc(sigqueue_cachep, flags);
436 	} else {
437 		print_dropped_signal(sig);
438 	}
439 
440 	if (unlikely(q == NULL)) {
441 		if (atomic_dec_and_test(&user->sigpending))
442 			free_uid(user);
443 	} else {
444 		INIT_LIST_HEAD(&q->list);
445 		q->flags = 0;
446 		q->user = user;
447 	}
448 
449 	return q;
450 }
451 
452 static void __sigqueue_free(struct sigqueue *q)
453 {
454 	if (q->flags & SIGQUEUE_PREALLOC)
455 		return;
456 	if (atomic_dec_and_test(&q->user->sigpending))
457 		free_uid(q->user);
458 	kmem_cache_free(sigqueue_cachep, q);
459 }
460 
461 void flush_sigqueue(struct sigpending *queue)
462 {
463 	struct sigqueue *q;
464 
465 	sigemptyset(&queue->signal);
466 	while (!list_empty(&queue->list)) {
467 		q = list_entry(queue->list.next, struct sigqueue , list);
468 		list_del_init(&q->list);
469 		__sigqueue_free(q);
470 	}
471 }
472 
473 /*
474  * Flush all pending signals for this kthread.
475  */
476 void flush_signals(struct task_struct *t)
477 {
478 	unsigned long flags;
479 
480 	spin_lock_irqsave(&t->sighand->siglock, flags);
481 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 	flush_sigqueue(&t->pending);
483 	flush_sigqueue(&t->signal->shared_pending);
484 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
485 }
486 EXPORT_SYMBOL(flush_signals);
487 
488 #ifdef CONFIG_POSIX_TIMERS
489 static void __flush_itimer_signals(struct sigpending *pending)
490 {
491 	sigset_t signal, retain;
492 	struct sigqueue *q, *n;
493 
494 	signal = pending->signal;
495 	sigemptyset(&retain);
496 
497 	list_for_each_entry_safe(q, n, &pending->list, list) {
498 		int sig = q->info.si_signo;
499 
500 		if (likely(q->info.si_code != SI_TIMER)) {
501 			sigaddset(&retain, sig);
502 		} else {
503 			sigdelset(&signal, sig);
504 			list_del_init(&q->list);
505 			__sigqueue_free(q);
506 		}
507 	}
508 
509 	sigorsets(&pending->signal, &signal, &retain);
510 }
511 
512 void flush_itimer_signals(void)
513 {
514 	struct task_struct *tsk = current;
515 	unsigned long flags;
516 
517 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 	__flush_itimer_signals(&tsk->pending);
519 	__flush_itimer_signals(&tsk->signal->shared_pending);
520 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
521 }
522 #endif
523 
524 void ignore_signals(struct task_struct *t)
525 {
526 	int i;
527 
528 	for (i = 0; i < _NSIG; ++i)
529 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
530 
531 	flush_signals(t);
532 }
533 
534 /*
535  * Flush all handlers for a task.
536  */
537 
538 void
539 flush_signal_handlers(struct task_struct *t, int force_default)
540 {
541 	int i;
542 	struct k_sigaction *ka = &t->sighand->action[0];
543 	for (i = _NSIG ; i != 0 ; i--) {
544 		if (force_default || ka->sa.sa_handler != SIG_IGN)
545 			ka->sa.sa_handler = SIG_DFL;
546 		ka->sa.sa_flags = 0;
547 #ifdef __ARCH_HAS_SA_RESTORER
548 		ka->sa.sa_restorer = NULL;
549 #endif
550 		sigemptyset(&ka->sa.sa_mask);
551 		ka++;
552 	}
553 }
554 
555 bool unhandled_signal(struct task_struct *tsk, int sig)
556 {
557 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 	if (is_global_init(tsk))
559 		return true;
560 
561 	if (handler != SIG_IGN && handler != SIG_DFL)
562 		return false;
563 
564 	/* if ptraced, let the tracer determine */
565 	return !tsk->ptrace;
566 }
567 
568 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
569 			   bool *resched_timer)
570 {
571 	struct sigqueue *q, *first = NULL;
572 
573 	/*
574 	 * Collect the siginfo appropriate to this signal.  Check if
575 	 * there is another siginfo for the same signal.
576 	*/
577 	list_for_each_entry(q, &list->list, list) {
578 		if (q->info.si_signo == sig) {
579 			if (first)
580 				goto still_pending;
581 			first = q;
582 		}
583 	}
584 
585 	sigdelset(&list->signal, sig);
586 
587 	if (first) {
588 still_pending:
589 		list_del_init(&first->list);
590 		copy_siginfo(info, &first->info);
591 
592 		*resched_timer =
593 			(first->flags & SIGQUEUE_PREALLOC) &&
594 			(info->si_code == SI_TIMER) &&
595 			(info->si_sys_private);
596 
597 		__sigqueue_free(first);
598 	} else {
599 		/*
600 		 * Ok, it wasn't in the queue.  This must be
601 		 * a fast-pathed signal or we must have been
602 		 * out of queue space.  So zero out the info.
603 		 */
604 		clear_siginfo(info);
605 		info->si_signo = sig;
606 		info->si_errno = 0;
607 		info->si_code = SI_USER;
608 		info->si_pid = 0;
609 		info->si_uid = 0;
610 	}
611 }
612 
613 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614 			kernel_siginfo_t *info, bool *resched_timer)
615 {
616 	int sig = next_signal(pending, mask);
617 
618 	if (sig)
619 		collect_signal(sig, pending, info, resched_timer);
620 	return sig;
621 }
622 
623 /*
624  * Dequeue a signal and return the element to the caller, which is
625  * expected to free it.
626  *
627  * All callers have to hold the siglock.
628  */
629 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
630 {
631 	bool resched_timer = false;
632 	int signr;
633 
634 	/* We only dequeue private signals from ourselves, we don't let
635 	 * signalfd steal them
636 	 */
637 	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
638 	if (!signr) {
639 		signr = __dequeue_signal(&tsk->signal->shared_pending,
640 					 mask, info, &resched_timer);
641 #ifdef CONFIG_POSIX_TIMERS
642 		/*
643 		 * itimer signal ?
644 		 *
645 		 * itimers are process shared and we restart periodic
646 		 * itimers in the signal delivery path to prevent DoS
647 		 * attacks in the high resolution timer case. This is
648 		 * compliant with the old way of self-restarting
649 		 * itimers, as the SIGALRM is a legacy signal and only
650 		 * queued once. Changing the restart behaviour to
651 		 * restart the timer in the signal dequeue path is
652 		 * reducing the timer noise on heavy loaded !highres
653 		 * systems too.
654 		 */
655 		if (unlikely(signr == SIGALRM)) {
656 			struct hrtimer *tmr = &tsk->signal->real_timer;
657 
658 			if (!hrtimer_is_queued(tmr) &&
659 			    tsk->signal->it_real_incr != 0) {
660 				hrtimer_forward(tmr, tmr->base->get_time(),
661 						tsk->signal->it_real_incr);
662 				hrtimer_restart(tmr);
663 			}
664 		}
665 #endif
666 	}
667 
668 	recalc_sigpending();
669 	if (!signr)
670 		return 0;
671 
672 	if (unlikely(sig_kernel_stop(signr))) {
673 		/*
674 		 * Set a marker that we have dequeued a stop signal.  Our
675 		 * caller might release the siglock and then the pending
676 		 * stop signal it is about to process is no longer in the
677 		 * pending bitmasks, but must still be cleared by a SIGCONT
678 		 * (and overruled by a SIGKILL).  So those cases clear this
679 		 * shared flag after we've set it.  Note that this flag may
680 		 * remain set after the signal we return is ignored or
681 		 * handled.  That doesn't matter because its only purpose
682 		 * is to alert stop-signal processing code when another
683 		 * processor has come along and cleared the flag.
684 		 */
685 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
686 	}
687 #ifdef CONFIG_POSIX_TIMERS
688 	if (resched_timer) {
689 		/*
690 		 * Release the siglock to ensure proper locking order
691 		 * of timer locks outside of siglocks.  Note, we leave
692 		 * irqs disabled here, since the posix-timers code is
693 		 * about to disable them again anyway.
694 		 */
695 		spin_unlock(&tsk->sighand->siglock);
696 		posixtimer_rearm(info);
697 		spin_lock(&tsk->sighand->siglock);
698 
699 		/* Don't expose the si_sys_private value to userspace */
700 		info->si_sys_private = 0;
701 	}
702 #endif
703 	return signr;
704 }
705 EXPORT_SYMBOL_GPL(dequeue_signal);
706 
707 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
708 {
709 	struct task_struct *tsk = current;
710 	struct sigpending *pending = &tsk->pending;
711 	struct sigqueue *q, *sync = NULL;
712 
713 	/*
714 	 * Might a synchronous signal be in the queue?
715 	 */
716 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
717 		return 0;
718 
719 	/*
720 	 * Return the first synchronous signal in the queue.
721 	 */
722 	list_for_each_entry(q, &pending->list, list) {
723 		/* Synchronous signals have a positive si_code */
724 		if ((q->info.si_code > SI_USER) &&
725 		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
726 			sync = q;
727 			goto next;
728 		}
729 	}
730 	return 0;
731 next:
732 	/*
733 	 * Check if there is another siginfo for the same signal.
734 	 */
735 	list_for_each_entry_continue(q, &pending->list, list) {
736 		if (q->info.si_signo == sync->info.si_signo)
737 			goto still_pending;
738 	}
739 
740 	sigdelset(&pending->signal, sync->info.si_signo);
741 	recalc_sigpending();
742 still_pending:
743 	list_del_init(&sync->list);
744 	copy_siginfo(info, &sync->info);
745 	__sigqueue_free(sync);
746 	return info->si_signo;
747 }
748 
749 /*
750  * Tell a process that it has a new active signal..
751  *
752  * NOTE! we rely on the previous spin_lock to
753  * lock interrupts for us! We can only be called with
754  * "siglock" held, and the local interrupt must
755  * have been disabled when that got acquired!
756  *
757  * No need to set need_resched since signal event passing
758  * goes through ->blocked
759  */
760 void signal_wake_up_state(struct task_struct *t, unsigned int state)
761 {
762 	set_tsk_thread_flag(t, TIF_SIGPENDING);
763 	/*
764 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
765 	 * case. We don't check t->state here because there is a race with it
766 	 * executing another processor and just now entering stopped state.
767 	 * By using wake_up_state, we ensure the process will wake up and
768 	 * handle its death signal.
769 	 */
770 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
771 		kick_process(t);
772 }
773 
774 /*
775  * Remove signals in mask from the pending set and queue.
776  * Returns 1 if any signals were found.
777  *
778  * All callers must be holding the siglock.
779  */
780 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
781 {
782 	struct sigqueue *q, *n;
783 	sigset_t m;
784 
785 	sigandsets(&m, mask, &s->signal);
786 	if (sigisemptyset(&m))
787 		return;
788 
789 	sigandnsets(&s->signal, &s->signal, mask);
790 	list_for_each_entry_safe(q, n, &s->list, list) {
791 		if (sigismember(mask, q->info.si_signo)) {
792 			list_del_init(&q->list);
793 			__sigqueue_free(q);
794 		}
795 	}
796 }
797 
798 static inline int is_si_special(const struct kernel_siginfo *info)
799 {
800 	return info <= SEND_SIG_PRIV;
801 }
802 
803 static inline bool si_fromuser(const struct kernel_siginfo *info)
804 {
805 	return info == SEND_SIG_NOINFO ||
806 		(!is_si_special(info) && SI_FROMUSER(info));
807 }
808 
809 /*
810  * called with RCU read lock from check_kill_permission()
811  */
812 static bool kill_ok_by_cred(struct task_struct *t)
813 {
814 	const struct cred *cred = current_cred();
815 	const struct cred *tcred = __task_cred(t);
816 
817 	return uid_eq(cred->euid, tcred->suid) ||
818 	       uid_eq(cred->euid, tcred->uid) ||
819 	       uid_eq(cred->uid, tcred->suid) ||
820 	       uid_eq(cred->uid, tcred->uid) ||
821 	       ns_capable(tcred->user_ns, CAP_KILL);
822 }
823 
824 /*
825  * Bad permissions for sending the signal
826  * - the caller must hold the RCU read lock
827  */
828 static int check_kill_permission(int sig, struct kernel_siginfo *info,
829 				 struct task_struct *t)
830 {
831 	struct pid *sid;
832 	int error;
833 
834 	if (!valid_signal(sig))
835 		return -EINVAL;
836 
837 	if (!si_fromuser(info))
838 		return 0;
839 
840 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
841 	if (error)
842 		return error;
843 
844 	if (!same_thread_group(current, t) &&
845 	    !kill_ok_by_cred(t)) {
846 		switch (sig) {
847 		case SIGCONT:
848 			sid = task_session(t);
849 			/*
850 			 * We don't return the error if sid == NULL. The
851 			 * task was unhashed, the caller must notice this.
852 			 */
853 			if (!sid || sid == task_session(current))
854 				break;
855 			fallthrough;
856 		default:
857 			return -EPERM;
858 		}
859 	}
860 
861 	return security_task_kill(t, info, sig, NULL);
862 }
863 
864 /**
865  * ptrace_trap_notify - schedule trap to notify ptracer
866  * @t: tracee wanting to notify tracer
867  *
868  * This function schedules sticky ptrace trap which is cleared on the next
869  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
870  * ptracer.
871  *
872  * If @t is running, STOP trap will be taken.  If trapped for STOP and
873  * ptracer is listening for events, tracee is woken up so that it can
874  * re-trap for the new event.  If trapped otherwise, STOP trap will be
875  * eventually taken without returning to userland after the existing traps
876  * are finished by PTRACE_CONT.
877  *
878  * CONTEXT:
879  * Must be called with @task->sighand->siglock held.
880  */
881 static void ptrace_trap_notify(struct task_struct *t)
882 {
883 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
884 	assert_spin_locked(&t->sighand->siglock);
885 
886 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
887 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
888 }
889 
890 /*
891  * Handle magic process-wide effects of stop/continue signals. Unlike
892  * the signal actions, these happen immediately at signal-generation
893  * time regardless of blocking, ignoring, or handling.  This does the
894  * actual continuing for SIGCONT, but not the actual stopping for stop
895  * signals. The process stop is done as a signal action for SIG_DFL.
896  *
897  * Returns true if the signal should be actually delivered, otherwise
898  * it should be dropped.
899  */
900 static bool prepare_signal(int sig, struct task_struct *p, bool force)
901 {
902 	struct signal_struct *signal = p->signal;
903 	struct task_struct *t;
904 	sigset_t flush;
905 
906 	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
907 		if (!(signal->flags & SIGNAL_GROUP_EXIT))
908 			return sig == SIGKILL;
909 		/*
910 		 * The process is in the middle of dying, nothing to do.
911 		 */
912 	} else if (sig_kernel_stop(sig)) {
913 		/*
914 		 * This is a stop signal.  Remove SIGCONT from all queues.
915 		 */
916 		siginitset(&flush, sigmask(SIGCONT));
917 		flush_sigqueue_mask(&flush, &signal->shared_pending);
918 		for_each_thread(p, t)
919 			flush_sigqueue_mask(&flush, &t->pending);
920 	} else if (sig == SIGCONT) {
921 		unsigned int why;
922 		/*
923 		 * Remove all stop signals from all queues, wake all threads.
924 		 */
925 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
926 		flush_sigqueue_mask(&flush, &signal->shared_pending);
927 		for_each_thread(p, t) {
928 			flush_sigqueue_mask(&flush, &t->pending);
929 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
930 			if (likely(!(t->ptrace & PT_SEIZED)))
931 				wake_up_state(t, __TASK_STOPPED);
932 			else
933 				ptrace_trap_notify(t);
934 		}
935 
936 		/*
937 		 * Notify the parent with CLD_CONTINUED if we were stopped.
938 		 *
939 		 * If we were in the middle of a group stop, we pretend it
940 		 * was already finished, and then continued. Since SIGCHLD
941 		 * doesn't queue we report only CLD_STOPPED, as if the next
942 		 * CLD_CONTINUED was dropped.
943 		 */
944 		why = 0;
945 		if (signal->flags & SIGNAL_STOP_STOPPED)
946 			why |= SIGNAL_CLD_CONTINUED;
947 		else if (signal->group_stop_count)
948 			why |= SIGNAL_CLD_STOPPED;
949 
950 		if (why) {
951 			/*
952 			 * The first thread which returns from do_signal_stop()
953 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
954 			 * notify its parent. See get_signal().
955 			 */
956 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
957 			signal->group_stop_count = 0;
958 			signal->group_exit_code = 0;
959 		}
960 	}
961 
962 	return !sig_ignored(p, sig, force);
963 }
964 
965 /*
966  * Test if P wants to take SIG.  After we've checked all threads with this,
967  * it's equivalent to finding no threads not blocking SIG.  Any threads not
968  * blocking SIG were ruled out because they are not running and already
969  * have pending signals.  Such threads will dequeue from the shared queue
970  * as soon as they're available, so putting the signal on the shared queue
971  * will be equivalent to sending it to one such thread.
972  */
973 static inline bool wants_signal(int sig, struct task_struct *p)
974 {
975 	if (sigismember(&p->blocked, sig))
976 		return false;
977 
978 	if (p->flags & PF_EXITING)
979 		return false;
980 
981 	if (sig == SIGKILL)
982 		return true;
983 
984 	if (task_is_stopped_or_traced(p))
985 		return false;
986 
987 	return task_curr(p) || !task_sigpending(p);
988 }
989 
990 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
991 {
992 	struct signal_struct *signal = p->signal;
993 	struct task_struct *t;
994 
995 	/*
996 	 * Now find a thread we can wake up to take the signal off the queue.
997 	 *
998 	 * If the main thread wants the signal, it gets first crack.
999 	 * Probably the least surprising to the average bear.
1000 	 */
1001 	if (wants_signal(sig, p))
1002 		t = p;
1003 	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1004 		/*
1005 		 * There is just one thread and it does not need to be woken.
1006 		 * It will dequeue unblocked signals before it runs again.
1007 		 */
1008 		return;
1009 	else {
1010 		/*
1011 		 * Otherwise try to find a suitable thread.
1012 		 */
1013 		t = signal->curr_target;
1014 		while (!wants_signal(sig, t)) {
1015 			t = next_thread(t);
1016 			if (t == signal->curr_target)
1017 				/*
1018 				 * No thread needs to be woken.
1019 				 * Any eligible threads will see
1020 				 * the signal in the queue soon.
1021 				 */
1022 				return;
1023 		}
1024 		signal->curr_target = t;
1025 	}
1026 
1027 	/*
1028 	 * Found a killable thread.  If the signal will be fatal,
1029 	 * then start taking the whole group down immediately.
1030 	 */
1031 	if (sig_fatal(p, sig) &&
1032 	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
1033 	    !sigismember(&t->real_blocked, sig) &&
1034 	    (sig == SIGKILL || !p->ptrace)) {
1035 		/*
1036 		 * This signal will be fatal to the whole group.
1037 		 */
1038 		if (!sig_kernel_coredump(sig)) {
1039 			/*
1040 			 * Start a group exit and wake everybody up.
1041 			 * This way we don't have other threads
1042 			 * running and doing things after a slower
1043 			 * thread has the fatal signal pending.
1044 			 */
1045 			signal->flags = SIGNAL_GROUP_EXIT;
1046 			signal->group_exit_code = sig;
1047 			signal->group_stop_count = 0;
1048 			t = p;
1049 			do {
1050 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1051 				sigaddset(&t->pending.signal, SIGKILL);
1052 				signal_wake_up(t, 1);
1053 			} while_each_thread(p, t);
1054 			return;
1055 		}
1056 	}
1057 
1058 	/*
1059 	 * The signal is already in the shared-pending queue.
1060 	 * Tell the chosen thread to wake up and dequeue it.
1061 	 */
1062 	signal_wake_up(t, sig == SIGKILL);
1063 	return;
1064 }
1065 
1066 static inline bool legacy_queue(struct sigpending *signals, int sig)
1067 {
1068 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1069 }
1070 
1071 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1072 			enum pid_type type, bool force)
1073 {
1074 	struct sigpending *pending;
1075 	struct sigqueue *q;
1076 	int override_rlimit;
1077 	int ret = 0, result;
1078 
1079 	assert_spin_locked(&t->sighand->siglock);
1080 
1081 	result = TRACE_SIGNAL_IGNORED;
1082 	if (!prepare_signal(sig, t, force))
1083 		goto ret;
1084 
1085 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1086 	/*
1087 	 * Short-circuit ignored signals and support queuing
1088 	 * exactly one non-rt signal, so that we can get more
1089 	 * detailed information about the cause of the signal.
1090 	 */
1091 	result = TRACE_SIGNAL_ALREADY_PENDING;
1092 	if (legacy_queue(pending, sig))
1093 		goto ret;
1094 
1095 	result = TRACE_SIGNAL_DELIVERED;
1096 	/*
1097 	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1098 	 */
1099 	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1100 		goto out_set;
1101 
1102 	/*
1103 	 * Real-time signals must be queued if sent by sigqueue, or
1104 	 * some other real-time mechanism.  It is implementation
1105 	 * defined whether kill() does so.  We attempt to do so, on
1106 	 * the principle of least surprise, but since kill is not
1107 	 * allowed to fail with EAGAIN when low on memory we just
1108 	 * make sure at least one signal gets delivered and don't
1109 	 * pass on the info struct.
1110 	 */
1111 	if (sig < SIGRTMIN)
1112 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1113 	else
1114 		override_rlimit = 0;
1115 
1116 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1117 	if (q) {
1118 		list_add_tail(&q->list, &pending->list);
1119 		switch ((unsigned long) info) {
1120 		case (unsigned long) SEND_SIG_NOINFO:
1121 			clear_siginfo(&q->info);
1122 			q->info.si_signo = sig;
1123 			q->info.si_errno = 0;
1124 			q->info.si_code = SI_USER;
1125 			q->info.si_pid = task_tgid_nr_ns(current,
1126 							task_active_pid_ns(t));
1127 			rcu_read_lock();
1128 			q->info.si_uid =
1129 				from_kuid_munged(task_cred_xxx(t, user_ns),
1130 						 current_uid());
1131 			rcu_read_unlock();
1132 			break;
1133 		case (unsigned long) SEND_SIG_PRIV:
1134 			clear_siginfo(&q->info);
1135 			q->info.si_signo = sig;
1136 			q->info.si_errno = 0;
1137 			q->info.si_code = SI_KERNEL;
1138 			q->info.si_pid = 0;
1139 			q->info.si_uid = 0;
1140 			break;
1141 		default:
1142 			copy_siginfo(&q->info, info);
1143 			break;
1144 		}
1145 	} else if (!is_si_special(info) &&
1146 		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1147 		/*
1148 		 * Queue overflow, abort.  We may abort if the
1149 		 * signal was rt and sent by user using something
1150 		 * other than kill().
1151 		 */
1152 		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1153 		ret = -EAGAIN;
1154 		goto ret;
1155 	} else {
1156 		/*
1157 		 * This is a silent loss of information.  We still
1158 		 * send the signal, but the *info bits are lost.
1159 		 */
1160 		result = TRACE_SIGNAL_LOSE_INFO;
1161 	}
1162 
1163 out_set:
1164 	signalfd_notify(t, sig);
1165 	sigaddset(&pending->signal, sig);
1166 
1167 	/* Let multiprocess signals appear after on-going forks */
1168 	if (type > PIDTYPE_TGID) {
1169 		struct multiprocess_signals *delayed;
1170 		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1171 			sigset_t *signal = &delayed->signal;
1172 			/* Can't queue both a stop and a continue signal */
1173 			if (sig == SIGCONT)
1174 				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1175 			else if (sig_kernel_stop(sig))
1176 				sigdelset(signal, SIGCONT);
1177 			sigaddset(signal, sig);
1178 		}
1179 	}
1180 
1181 	complete_signal(sig, t, type);
1182 ret:
1183 	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1184 	return ret;
1185 }
1186 
1187 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1188 {
1189 	bool ret = false;
1190 	switch (siginfo_layout(info->si_signo, info->si_code)) {
1191 	case SIL_KILL:
1192 	case SIL_CHLD:
1193 	case SIL_RT:
1194 		ret = true;
1195 		break;
1196 	case SIL_TIMER:
1197 	case SIL_POLL:
1198 	case SIL_FAULT:
1199 	case SIL_FAULT_MCEERR:
1200 	case SIL_FAULT_BNDERR:
1201 	case SIL_FAULT_PKUERR:
1202 	case SIL_SYS:
1203 		ret = false;
1204 		break;
1205 	}
1206 	return ret;
1207 }
1208 
1209 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1210 			enum pid_type type)
1211 {
1212 	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1213 	bool force = false;
1214 
1215 	if (info == SEND_SIG_NOINFO) {
1216 		/* Force if sent from an ancestor pid namespace */
1217 		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1218 	} else if (info == SEND_SIG_PRIV) {
1219 		/* Don't ignore kernel generated signals */
1220 		force = true;
1221 	} else if (has_si_pid_and_uid(info)) {
1222 		/* SIGKILL and SIGSTOP is special or has ids */
1223 		struct user_namespace *t_user_ns;
1224 
1225 		rcu_read_lock();
1226 		t_user_ns = task_cred_xxx(t, user_ns);
1227 		if (current_user_ns() != t_user_ns) {
1228 			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1229 			info->si_uid = from_kuid_munged(t_user_ns, uid);
1230 		}
1231 		rcu_read_unlock();
1232 
1233 		/* A kernel generated signal? */
1234 		force = (info->si_code == SI_KERNEL);
1235 
1236 		/* From an ancestor pid namespace? */
1237 		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1238 			info->si_pid = 0;
1239 			force = true;
1240 		}
1241 	}
1242 	return __send_signal(sig, info, t, type, force);
1243 }
1244 
1245 static void print_fatal_signal(int signr)
1246 {
1247 	struct pt_regs *regs = signal_pt_regs();
1248 	pr_info("potentially unexpected fatal signal %d.\n", signr);
1249 
1250 #if defined(__i386__) && !defined(__arch_um__)
1251 	pr_info("code at %08lx: ", regs->ip);
1252 	{
1253 		int i;
1254 		for (i = 0; i < 16; i++) {
1255 			unsigned char insn;
1256 
1257 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1258 				break;
1259 			pr_cont("%02x ", insn);
1260 		}
1261 	}
1262 	pr_cont("\n");
1263 #endif
1264 	preempt_disable();
1265 	show_regs(regs);
1266 	preempt_enable();
1267 }
1268 
1269 static int __init setup_print_fatal_signals(char *str)
1270 {
1271 	get_option (&str, &print_fatal_signals);
1272 
1273 	return 1;
1274 }
1275 
1276 __setup("print-fatal-signals=", setup_print_fatal_signals);
1277 
1278 int
1279 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1280 {
1281 	return send_signal(sig, info, p, PIDTYPE_TGID);
1282 }
1283 
1284 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1285 			enum pid_type type)
1286 {
1287 	unsigned long flags;
1288 	int ret = -ESRCH;
1289 
1290 	if (lock_task_sighand(p, &flags)) {
1291 		ret = send_signal(sig, info, p, type);
1292 		unlock_task_sighand(p, &flags);
1293 	}
1294 
1295 	return ret;
1296 }
1297 
1298 /*
1299  * Force a signal that the process can't ignore: if necessary
1300  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1301  *
1302  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1303  * since we do not want to have a signal handler that was blocked
1304  * be invoked when user space had explicitly blocked it.
1305  *
1306  * We don't want to have recursive SIGSEGV's etc, for example,
1307  * that is why we also clear SIGNAL_UNKILLABLE.
1308  */
1309 static int
1310 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1311 {
1312 	unsigned long int flags;
1313 	int ret, blocked, ignored;
1314 	struct k_sigaction *action;
1315 	int sig = info->si_signo;
1316 
1317 	spin_lock_irqsave(&t->sighand->siglock, flags);
1318 	action = &t->sighand->action[sig-1];
1319 	ignored = action->sa.sa_handler == SIG_IGN;
1320 	blocked = sigismember(&t->blocked, sig);
1321 	if (blocked || ignored) {
1322 		action->sa.sa_handler = SIG_DFL;
1323 		if (blocked) {
1324 			sigdelset(&t->blocked, sig);
1325 			recalc_sigpending_and_wake(t);
1326 		}
1327 	}
1328 	/*
1329 	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1330 	 * debugging to leave init killable.
1331 	 */
1332 	if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1333 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1334 	ret = send_signal(sig, info, t, PIDTYPE_PID);
1335 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1336 
1337 	return ret;
1338 }
1339 
1340 int force_sig_info(struct kernel_siginfo *info)
1341 {
1342 	return force_sig_info_to_task(info, current);
1343 }
1344 
1345 /*
1346  * Nuke all other threads in the group.
1347  */
1348 int zap_other_threads(struct task_struct *p)
1349 {
1350 	struct task_struct *t = p;
1351 	int count = 0;
1352 
1353 	p->signal->group_stop_count = 0;
1354 
1355 	while_each_thread(p, t) {
1356 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1357 		count++;
1358 
1359 		/* Don't bother with already dead threads */
1360 		if (t->exit_state)
1361 			continue;
1362 		sigaddset(&t->pending.signal, SIGKILL);
1363 		signal_wake_up(t, 1);
1364 	}
1365 
1366 	return count;
1367 }
1368 
1369 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1370 					   unsigned long *flags)
1371 {
1372 	struct sighand_struct *sighand;
1373 
1374 	rcu_read_lock();
1375 	for (;;) {
1376 		sighand = rcu_dereference(tsk->sighand);
1377 		if (unlikely(sighand == NULL))
1378 			break;
1379 
1380 		/*
1381 		 * This sighand can be already freed and even reused, but
1382 		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1383 		 * initializes ->siglock: this slab can't go away, it has
1384 		 * the same object type, ->siglock can't be reinitialized.
1385 		 *
1386 		 * We need to ensure that tsk->sighand is still the same
1387 		 * after we take the lock, we can race with de_thread() or
1388 		 * __exit_signal(). In the latter case the next iteration
1389 		 * must see ->sighand == NULL.
1390 		 */
1391 		spin_lock_irqsave(&sighand->siglock, *flags);
1392 		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1393 			break;
1394 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1395 	}
1396 	rcu_read_unlock();
1397 
1398 	return sighand;
1399 }
1400 
1401 /*
1402  * send signal info to all the members of a group
1403  */
1404 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1405 			struct task_struct *p, enum pid_type type)
1406 {
1407 	int ret;
1408 
1409 	rcu_read_lock();
1410 	ret = check_kill_permission(sig, info, p);
1411 	rcu_read_unlock();
1412 
1413 	if (!ret && sig)
1414 		ret = do_send_sig_info(sig, info, p, type);
1415 
1416 	return ret;
1417 }
1418 
1419 /*
1420  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1421  * control characters do (^C, ^Z etc)
1422  * - the caller must hold at least a readlock on tasklist_lock
1423  */
1424 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1425 {
1426 	struct task_struct *p = NULL;
1427 	int retval, success;
1428 
1429 	success = 0;
1430 	retval = -ESRCH;
1431 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1432 		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1433 		success |= !err;
1434 		retval = err;
1435 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1436 	return success ? 0 : retval;
1437 }
1438 
1439 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1440 {
1441 	int error = -ESRCH;
1442 	struct task_struct *p;
1443 
1444 	for (;;) {
1445 		rcu_read_lock();
1446 		p = pid_task(pid, PIDTYPE_PID);
1447 		if (p)
1448 			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1449 		rcu_read_unlock();
1450 		if (likely(!p || error != -ESRCH))
1451 			return error;
1452 
1453 		/*
1454 		 * The task was unhashed in between, try again.  If it
1455 		 * is dead, pid_task() will return NULL, if we race with
1456 		 * de_thread() it will find the new leader.
1457 		 */
1458 	}
1459 }
1460 
1461 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1462 {
1463 	int error;
1464 	rcu_read_lock();
1465 	error = kill_pid_info(sig, info, find_vpid(pid));
1466 	rcu_read_unlock();
1467 	return error;
1468 }
1469 
1470 static inline bool kill_as_cred_perm(const struct cred *cred,
1471 				     struct task_struct *target)
1472 {
1473 	const struct cred *pcred = __task_cred(target);
1474 
1475 	return uid_eq(cred->euid, pcred->suid) ||
1476 	       uid_eq(cred->euid, pcred->uid) ||
1477 	       uid_eq(cred->uid, pcred->suid) ||
1478 	       uid_eq(cred->uid, pcred->uid);
1479 }
1480 
1481 /*
1482  * The usb asyncio usage of siginfo is wrong.  The glibc support
1483  * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1484  * AKA after the generic fields:
1485  *	kernel_pid_t	si_pid;
1486  *	kernel_uid32_t	si_uid;
1487  *	sigval_t	si_value;
1488  *
1489  * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1490  * after the generic fields is:
1491  *	void __user 	*si_addr;
1492  *
1493  * This is a practical problem when there is a 64bit big endian kernel
1494  * and a 32bit userspace.  As the 32bit address will encoded in the low
1495  * 32bits of the pointer.  Those low 32bits will be stored at higher
1496  * address than appear in a 32 bit pointer.  So userspace will not
1497  * see the address it was expecting for it's completions.
1498  *
1499  * There is nothing in the encoding that can allow
1500  * copy_siginfo_to_user32 to detect this confusion of formats, so
1501  * handle this by requiring the caller of kill_pid_usb_asyncio to
1502  * notice when this situration takes place and to store the 32bit
1503  * pointer in sival_int, instead of sival_addr of the sigval_t addr
1504  * parameter.
1505  */
1506 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1507 			 struct pid *pid, const struct cred *cred)
1508 {
1509 	struct kernel_siginfo info;
1510 	struct task_struct *p;
1511 	unsigned long flags;
1512 	int ret = -EINVAL;
1513 
1514 	if (!valid_signal(sig))
1515 		return ret;
1516 
1517 	clear_siginfo(&info);
1518 	info.si_signo = sig;
1519 	info.si_errno = errno;
1520 	info.si_code = SI_ASYNCIO;
1521 	*((sigval_t *)&info.si_pid) = addr;
1522 
1523 	rcu_read_lock();
1524 	p = pid_task(pid, PIDTYPE_PID);
1525 	if (!p) {
1526 		ret = -ESRCH;
1527 		goto out_unlock;
1528 	}
1529 	if (!kill_as_cred_perm(cred, p)) {
1530 		ret = -EPERM;
1531 		goto out_unlock;
1532 	}
1533 	ret = security_task_kill(p, &info, sig, cred);
1534 	if (ret)
1535 		goto out_unlock;
1536 
1537 	if (sig) {
1538 		if (lock_task_sighand(p, &flags)) {
1539 			ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1540 			unlock_task_sighand(p, &flags);
1541 		} else
1542 			ret = -ESRCH;
1543 	}
1544 out_unlock:
1545 	rcu_read_unlock();
1546 	return ret;
1547 }
1548 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1549 
1550 /*
1551  * kill_something_info() interprets pid in interesting ways just like kill(2).
1552  *
1553  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1554  * is probably wrong.  Should make it like BSD or SYSV.
1555  */
1556 
1557 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1558 {
1559 	int ret;
1560 
1561 	if (pid > 0)
1562 		return kill_proc_info(sig, info, pid);
1563 
1564 	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1565 	if (pid == INT_MIN)
1566 		return -ESRCH;
1567 
1568 	read_lock(&tasklist_lock);
1569 	if (pid != -1) {
1570 		ret = __kill_pgrp_info(sig, info,
1571 				pid ? find_vpid(-pid) : task_pgrp(current));
1572 	} else {
1573 		int retval = 0, count = 0;
1574 		struct task_struct * p;
1575 
1576 		for_each_process(p) {
1577 			if (task_pid_vnr(p) > 1 &&
1578 					!same_thread_group(p, current)) {
1579 				int err = group_send_sig_info(sig, info, p,
1580 							      PIDTYPE_MAX);
1581 				++count;
1582 				if (err != -EPERM)
1583 					retval = err;
1584 			}
1585 		}
1586 		ret = count ? retval : -ESRCH;
1587 	}
1588 	read_unlock(&tasklist_lock);
1589 
1590 	return ret;
1591 }
1592 
1593 /*
1594  * These are for backward compatibility with the rest of the kernel source.
1595  */
1596 
1597 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1598 {
1599 	/*
1600 	 * Make sure legacy kernel users don't send in bad values
1601 	 * (normal paths check this in check_kill_permission).
1602 	 */
1603 	if (!valid_signal(sig))
1604 		return -EINVAL;
1605 
1606 	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1607 }
1608 EXPORT_SYMBOL(send_sig_info);
1609 
1610 #define __si_special(priv) \
1611 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1612 
1613 int
1614 send_sig(int sig, struct task_struct *p, int priv)
1615 {
1616 	return send_sig_info(sig, __si_special(priv), p);
1617 }
1618 EXPORT_SYMBOL(send_sig);
1619 
1620 void force_sig(int sig)
1621 {
1622 	struct kernel_siginfo info;
1623 
1624 	clear_siginfo(&info);
1625 	info.si_signo = sig;
1626 	info.si_errno = 0;
1627 	info.si_code = SI_KERNEL;
1628 	info.si_pid = 0;
1629 	info.si_uid = 0;
1630 	force_sig_info(&info);
1631 }
1632 EXPORT_SYMBOL(force_sig);
1633 
1634 /*
1635  * When things go south during signal handling, we
1636  * will force a SIGSEGV. And if the signal that caused
1637  * the problem was already a SIGSEGV, we'll want to
1638  * make sure we don't even try to deliver the signal..
1639  */
1640 void force_sigsegv(int sig)
1641 {
1642 	struct task_struct *p = current;
1643 
1644 	if (sig == SIGSEGV) {
1645 		unsigned long flags;
1646 		spin_lock_irqsave(&p->sighand->siglock, flags);
1647 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1648 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1649 	}
1650 	force_sig(SIGSEGV);
1651 }
1652 
1653 int force_sig_fault_to_task(int sig, int code, void __user *addr
1654 	___ARCH_SI_TRAPNO(int trapno)
1655 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1656 	, struct task_struct *t)
1657 {
1658 	struct kernel_siginfo info;
1659 
1660 	clear_siginfo(&info);
1661 	info.si_signo = sig;
1662 	info.si_errno = 0;
1663 	info.si_code  = code;
1664 	info.si_addr  = addr;
1665 #ifdef __ARCH_SI_TRAPNO
1666 	info.si_trapno = trapno;
1667 #endif
1668 #ifdef __ia64__
1669 	info.si_imm = imm;
1670 	info.si_flags = flags;
1671 	info.si_isr = isr;
1672 #endif
1673 	return force_sig_info_to_task(&info, t);
1674 }
1675 
1676 int force_sig_fault(int sig, int code, void __user *addr
1677 	___ARCH_SI_TRAPNO(int trapno)
1678 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1679 {
1680 	return force_sig_fault_to_task(sig, code, addr
1681 				       ___ARCH_SI_TRAPNO(trapno)
1682 				       ___ARCH_SI_IA64(imm, flags, isr), current);
1683 }
1684 
1685 int send_sig_fault(int sig, int code, void __user *addr
1686 	___ARCH_SI_TRAPNO(int trapno)
1687 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1688 	, struct task_struct *t)
1689 {
1690 	struct kernel_siginfo info;
1691 
1692 	clear_siginfo(&info);
1693 	info.si_signo = sig;
1694 	info.si_errno = 0;
1695 	info.si_code  = code;
1696 	info.si_addr  = addr;
1697 #ifdef __ARCH_SI_TRAPNO
1698 	info.si_trapno = trapno;
1699 #endif
1700 #ifdef __ia64__
1701 	info.si_imm = imm;
1702 	info.si_flags = flags;
1703 	info.si_isr = isr;
1704 #endif
1705 	return send_sig_info(info.si_signo, &info, t);
1706 }
1707 
1708 int force_sig_mceerr(int code, void __user *addr, short lsb)
1709 {
1710 	struct kernel_siginfo info;
1711 
1712 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1713 	clear_siginfo(&info);
1714 	info.si_signo = SIGBUS;
1715 	info.si_errno = 0;
1716 	info.si_code = code;
1717 	info.si_addr = addr;
1718 	info.si_addr_lsb = lsb;
1719 	return force_sig_info(&info);
1720 }
1721 
1722 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1723 {
1724 	struct kernel_siginfo info;
1725 
1726 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1727 	clear_siginfo(&info);
1728 	info.si_signo = SIGBUS;
1729 	info.si_errno = 0;
1730 	info.si_code = code;
1731 	info.si_addr = addr;
1732 	info.si_addr_lsb = lsb;
1733 	return send_sig_info(info.si_signo, &info, t);
1734 }
1735 EXPORT_SYMBOL(send_sig_mceerr);
1736 
1737 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1738 {
1739 	struct kernel_siginfo info;
1740 
1741 	clear_siginfo(&info);
1742 	info.si_signo = SIGSEGV;
1743 	info.si_errno = 0;
1744 	info.si_code  = SEGV_BNDERR;
1745 	info.si_addr  = addr;
1746 	info.si_lower = lower;
1747 	info.si_upper = upper;
1748 	return force_sig_info(&info);
1749 }
1750 
1751 #ifdef SEGV_PKUERR
1752 int force_sig_pkuerr(void __user *addr, u32 pkey)
1753 {
1754 	struct kernel_siginfo info;
1755 
1756 	clear_siginfo(&info);
1757 	info.si_signo = SIGSEGV;
1758 	info.si_errno = 0;
1759 	info.si_code  = SEGV_PKUERR;
1760 	info.si_addr  = addr;
1761 	info.si_pkey  = pkey;
1762 	return force_sig_info(&info);
1763 }
1764 #endif
1765 
1766 /* For the crazy architectures that include trap information in
1767  * the errno field, instead of an actual errno value.
1768  */
1769 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1770 {
1771 	struct kernel_siginfo info;
1772 
1773 	clear_siginfo(&info);
1774 	info.si_signo = SIGTRAP;
1775 	info.si_errno = errno;
1776 	info.si_code  = TRAP_HWBKPT;
1777 	info.si_addr  = addr;
1778 	return force_sig_info(&info);
1779 }
1780 
1781 int kill_pgrp(struct pid *pid, int sig, int priv)
1782 {
1783 	int ret;
1784 
1785 	read_lock(&tasklist_lock);
1786 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1787 	read_unlock(&tasklist_lock);
1788 
1789 	return ret;
1790 }
1791 EXPORT_SYMBOL(kill_pgrp);
1792 
1793 int kill_pid(struct pid *pid, int sig, int priv)
1794 {
1795 	return kill_pid_info(sig, __si_special(priv), pid);
1796 }
1797 EXPORT_SYMBOL(kill_pid);
1798 
1799 /*
1800  * These functions support sending signals using preallocated sigqueue
1801  * structures.  This is needed "because realtime applications cannot
1802  * afford to lose notifications of asynchronous events, like timer
1803  * expirations or I/O completions".  In the case of POSIX Timers
1804  * we allocate the sigqueue structure from the timer_create.  If this
1805  * allocation fails we are able to report the failure to the application
1806  * with an EAGAIN error.
1807  */
1808 struct sigqueue *sigqueue_alloc(void)
1809 {
1810 	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1811 
1812 	if (q)
1813 		q->flags |= SIGQUEUE_PREALLOC;
1814 
1815 	return q;
1816 }
1817 
1818 void sigqueue_free(struct sigqueue *q)
1819 {
1820 	unsigned long flags;
1821 	spinlock_t *lock = &current->sighand->siglock;
1822 
1823 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1824 	/*
1825 	 * We must hold ->siglock while testing q->list
1826 	 * to serialize with collect_signal() or with
1827 	 * __exit_signal()->flush_sigqueue().
1828 	 */
1829 	spin_lock_irqsave(lock, flags);
1830 	q->flags &= ~SIGQUEUE_PREALLOC;
1831 	/*
1832 	 * If it is queued it will be freed when dequeued,
1833 	 * like the "regular" sigqueue.
1834 	 */
1835 	if (!list_empty(&q->list))
1836 		q = NULL;
1837 	spin_unlock_irqrestore(lock, flags);
1838 
1839 	if (q)
1840 		__sigqueue_free(q);
1841 }
1842 
1843 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1844 {
1845 	int sig = q->info.si_signo;
1846 	struct sigpending *pending;
1847 	struct task_struct *t;
1848 	unsigned long flags;
1849 	int ret, result;
1850 
1851 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1852 
1853 	ret = -1;
1854 	rcu_read_lock();
1855 	t = pid_task(pid, type);
1856 	if (!t || !likely(lock_task_sighand(t, &flags)))
1857 		goto ret;
1858 
1859 	ret = 1; /* the signal is ignored */
1860 	result = TRACE_SIGNAL_IGNORED;
1861 	if (!prepare_signal(sig, t, false))
1862 		goto out;
1863 
1864 	ret = 0;
1865 	if (unlikely(!list_empty(&q->list))) {
1866 		/*
1867 		 * If an SI_TIMER entry is already queue just increment
1868 		 * the overrun count.
1869 		 */
1870 		BUG_ON(q->info.si_code != SI_TIMER);
1871 		q->info.si_overrun++;
1872 		result = TRACE_SIGNAL_ALREADY_PENDING;
1873 		goto out;
1874 	}
1875 	q->info.si_overrun = 0;
1876 
1877 	signalfd_notify(t, sig);
1878 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1879 	list_add_tail(&q->list, &pending->list);
1880 	sigaddset(&pending->signal, sig);
1881 	complete_signal(sig, t, type);
1882 	result = TRACE_SIGNAL_DELIVERED;
1883 out:
1884 	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1885 	unlock_task_sighand(t, &flags);
1886 ret:
1887 	rcu_read_unlock();
1888 	return ret;
1889 }
1890 
1891 static void do_notify_pidfd(struct task_struct *task)
1892 {
1893 	struct pid *pid;
1894 
1895 	WARN_ON(task->exit_state == 0);
1896 	pid = task_pid(task);
1897 	wake_up_all(&pid->wait_pidfd);
1898 }
1899 
1900 /*
1901  * Let a parent know about the death of a child.
1902  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1903  *
1904  * Returns true if our parent ignored us and so we've switched to
1905  * self-reaping.
1906  */
1907 bool do_notify_parent(struct task_struct *tsk, int sig)
1908 {
1909 	struct kernel_siginfo info;
1910 	unsigned long flags;
1911 	struct sighand_struct *psig;
1912 	bool autoreap = false;
1913 	u64 utime, stime;
1914 
1915 	BUG_ON(sig == -1);
1916 
1917  	/* do_notify_parent_cldstop should have been called instead.  */
1918  	BUG_ON(task_is_stopped_or_traced(tsk));
1919 
1920 	BUG_ON(!tsk->ptrace &&
1921 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1922 
1923 	/* Wake up all pidfd waiters */
1924 	do_notify_pidfd(tsk);
1925 
1926 	if (sig != SIGCHLD) {
1927 		/*
1928 		 * This is only possible if parent == real_parent.
1929 		 * Check if it has changed security domain.
1930 		 */
1931 		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1932 			sig = SIGCHLD;
1933 	}
1934 
1935 	clear_siginfo(&info);
1936 	info.si_signo = sig;
1937 	info.si_errno = 0;
1938 	/*
1939 	 * We are under tasklist_lock here so our parent is tied to
1940 	 * us and cannot change.
1941 	 *
1942 	 * task_active_pid_ns will always return the same pid namespace
1943 	 * until a task passes through release_task.
1944 	 *
1945 	 * write_lock() currently calls preempt_disable() which is the
1946 	 * same as rcu_read_lock(), but according to Oleg, this is not
1947 	 * correct to rely on this
1948 	 */
1949 	rcu_read_lock();
1950 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1951 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1952 				       task_uid(tsk));
1953 	rcu_read_unlock();
1954 
1955 	task_cputime(tsk, &utime, &stime);
1956 	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1957 	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1958 
1959 	info.si_status = tsk->exit_code & 0x7f;
1960 	if (tsk->exit_code & 0x80)
1961 		info.si_code = CLD_DUMPED;
1962 	else if (tsk->exit_code & 0x7f)
1963 		info.si_code = CLD_KILLED;
1964 	else {
1965 		info.si_code = CLD_EXITED;
1966 		info.si_status = tsk->exit_code >> 8;
1967 	}
1968 
1969 	psig = tsk->parent->sighand;
1970 	spin_lock_irqsave(&psig->siglock, flags);
1971 	if (!tsk->ptrace && sig == SIGCHLD &&
1972 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1973 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1974 		/*
1975 		 * We are exiting and our parent doesn't care.  POSIX.1
1976 		 * defines special semantics for setting SIGCHLD to SIG_IGN
1977 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
1978 		 * automatically and not left for our parent's wait4 call.
1979 		 * Rather than having the parent do it as a magic kind of
1980 		 * signal handler, we just set this to tell do_exit that we
1981 		 * can be cleaned up without becoming a zombie.  Note that
1982 		 * we still call __wake_up_parent in this case, because a
1983 		 * blocked sys_wait4 might now return -ECHILD.
1984 		 *
1985 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1986 		 * is implementation-defined: we do (if you don't want
1987 		 * it, just use SIG_IGN instead).
1988 		 */
1989 		autoreap = true;
1990 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1991 			sig = 0;
1992 	}
1993 	/*
1994 	 * Send with __send_signal as si_pid and si_uid are in the
1995 	 * parent's namespaces.
1996 	 */
1997 	if (valid_signal(sig) && sig)
1998 		__send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
1999 	__wake_up_parent(tsk, tsk->parent);
2000 	spin_unlock_irqrestore(&psig->siglock, flags);
2001 
2002 	return autoreap;
2003 }
2004 
2005 /**
2006  * do_notify_parent_cldstop - notify parent of stopped/continued state change
2007  * @tsk: task reporting the state change
2008  * @for_ptracer: the notification is for ptracer
2009  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2010  *
2011  * Notify @tsk's parent that the stopped/continued state has changed.  If
2012  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2013  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2014  *
2015  * CONTEXT:
2016  * Must be called with tasklist_lock at least read locked.
2017  */
2018 static void do_notify_parent_cldstop(struct task_struct *tsk,
2019 				     bool for_ptracer, int why)
2020 {
2021 	struct kernel_siginfo info;
2022 	unsigned long flags;
2023 	struct task_struct *parent;
2024 	struct sighand_struct *sighand;
2025 	u64 utime, stime;
2026 
2027 	if (for_ptracer) {
2028 		parent = tsk->parent;
2029 	} else {
2030 		tsk = tsk->group_leader;
2031 		parent = tsk->real_parent;
2032 	}
2033 
2034 	clear_siginfo(&info);
2035 	info.si_signo = SIGCHLD;
2036 	info.si_errno = 0;
2037 	/*
2038 	 * see comment in do_notify_parent() about the following 4 lines
2039 	 */
2040 	rcu_read_lock();
2041 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2042 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2043 	rcu_read_unlock();
2044 
2045 	task_cputime(tsk, &utime, &stime);
2046 	info.si_utime = nsec_to_clock_t(utime);
2047 	info.si_stime = nsec_to_clock_t(stime);
2048 
2049  	info.si_code = why;
2050  	switch (why) {
2051  	case CLD_CONTINUED:
2052  		info.si_status = SIGCONT;
2053  		break;
2054  	case CLD_STOPPED:
2055  		info.si_status = tsk->signal->group_exit_code & 0x7f;
2056  		break;
2057  	case CLD_TRAPPED:
2058  		info.si_status = tsk->exit_code & 0x7f;
2059  		break;
2060  	default:
2061  		BUG();
2062  	}
2063 
2064 	sighand = parent->sighand;
2065 	spin_lock_irqsave(&sighand->siglock, flags);
2066 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2067 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2068 		__group_send_sig_info(SIGCHLD, &info, parent);
2069 	/*
2070 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2071 	 */
2072 	__wake_up_parent(tsk, parent);
2073 	spin_unlock_irqrestore(&sighand->siglock, flags);
2074 }
2075 
2076 static inline bool may_ptrace_stop(void)
2077 {
2078 	if (!likely(current->ptrace))
2079 		return false;
2080 	/*
2081 	 * Are we in the middle of do_coredump?
2082 	 * If so and our tracer is also part of the coredump stopping
2083 	 * is a deadlock situation, and pointless because our tracer
2084 	 * is dead so don't allow us to stop.
2085 	 * If SIGKILL was already sent before the caller unlocked
2086 	 * ->siglock we must see ->core_state != NULL. Otherwise it
2087 	 * is safe to enter schedule().
2088 	 *
2089 	 * This is almost outdated, a task with the pending SIGKILL can't
2090 	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2091 	 * after SIGKILL was already dequeued.
2092 	 */
2093 	if (unlikely(current->mm->core_state) &&
2094 	    unlikely(current->mm == current->parent->mm))
2095 		return false;
2096 
2097 	return true;
2098 }
2099 
2100 /*
2101  * Return non-zero if there is a SIGKILL that should be waking us up.
2102  * Called with the siglock held.
2103  */
2104 static bool sigkill_pending(struct task_struct *tsk)
2105 {
2106 	return sigismember(&tsk->pending.signal, SIGKILL) ||
2107 	       sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2108 }
2109 
2110 /*
2111  * This must be called with current->sighand->siglock held.
2112  *
2113  * This should be the path for all ptrace stops.
2114  * We always set current->last_siginfo while stopped here.
2115  * That makes it a way to test a stopped process for
2116  * being ptrace-stopped vs being job-control-stopped.
2117  *
2118  * If we actually decide not to stop at all because the tracer
2119  * is gone, we keep current->exit_code unless clear_code.
2120  */
2121 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2122 	__releases(&current->sighand->siglock)
2123 	__acquires(&current->sighand->siglock)
2124 {
2125 	bool gstop_done = false;
2126 
2127 	if (arch_ptrace_stop_needed(exit_code, info)) {
2128 		/*
2129 		 * The arch code has something special to do before a
2130 		 * ptrace stop.  This is allowed to block, e.g. for faults
2131 		 * on user stack pages.  We can't keep the siglock while
2132 		 * calling arch_ptrace_stop, so we must release it now.
2133 		 * To preserve proper semantics, we must do this before
2134 		 * any signal bookkeeping like checking group_stop_count.
2135 		 * Meanwhile, a SIGKILL could come in before we retake the
2136 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
2137 		 * So after regaining the lock, we must check for SIGKILL.
2138 		 */
2139 		spin_unlock_irq(&current->sighand->siglock);
2140 		arch_ptrace_stop(exit_code, info);
2141 		spin_lock_irq(&current->sighand->siglock);
2142 		if (sigkill_pending(current))
2143 			return;
2144 	}
2145 
2146 	set_special_state(TASK_TRACED);
2147 
2148 	/*
2149 	 * We're committing to trapping.  TRACED should be visible before
2150 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2151 	 * Also, transition to TRACED and updates to ->jobctl should be
2152 	 * atomic with respect to siglock and should be done after the arch
2153 	 * hook as siglock is released and regrabbed across it.
2154 	 *
2155 	 *     TRACER				    TRACEE
2156 	 *
2157 	 *     ptrace_attach()
2158 	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2159 	 *     do_wait()
2160 	 *       set_current_state()                smp_wmb();
2161 	 *       ptrace_do_wait()
2162 	 *         wait_task_stopped()
2163 	 *           task_stopped_code()
2164 	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2165 	 */
2166 	smp_wmb();
2167 
2168 	current->last_siginfo = info;
2169 	current->exit_code = exit_code;
2170 
2171 	/*
2172 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2173 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2174 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2175 	 * could be clear now.  We act as if SIGCONT is received after
2176 	 * TASK_TRACED is entered - ignore it.
2177 	 */
2178 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2179 		gstop_done = task_participate_group_stop(current);
2180 
2181 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2182 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2183 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2184 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2185 
2186 	/* entering a trap, clear TRAPPING */
2187 	task_clear_jobctl_trapping(current);
2188 
2189 	spin_unlock_irq(&current->sighand->siglock);
2190 	read_lock(&tasklist_lock);
2191 	if (may_ptrace_stop()) {
2192 		/*
2193 		 * Notify parents of the stop.
2194 		 *
2195 		 * While ptraced, there are two parents - the ptracer and
2196 		 * the real_parent of the group_leader.  The ptracer should
2197 		 * know about every stop while the real parent is only
2198 		 * interested in the completion of group stop.  The states
2199 		 * for the two don't interact with each other.  Notify
2200 		 * separately unless they're gonna be duplicates.
2201 		 */
2202 		do_notify_parent_cldstop(current, true, why);
2203 		if (gstop_done && ptrace_reparented(current))
2204 			do_notify_parent_cldstop(current, false, why);
2205 
2206 		/*
2207 		 * Don't want to allow preemption here, because
2208 		 * sys_ptrace() needs this task to be inactive.
2209 		 *
2210 		 * XXX: implement read_unlock_no_resched().
2211 		 */
2212 		preempt_disable();
2213 		read_unlock(&tasklist_lock);
2214 		cgroup_enter_frozen();
2215 		preempt_enable_no_resched();
2216 		freezable_schedule();
2217 		cgroup_leave_frozen(true);
2218 	} else {
2219 		/*
2220 		 * By the time we got the lock, our tracer went away.
2221 		 * Don't drop the lock yet, another tracer may come.
2222 		 *
2223 		 * If @gstop_done, the ptracer went away between group stop
2224 		 * completion and here.  During detach, it would have set
2225 		 * JOBCTL_STOP_PENDING on us and we'll re-enter
2226 		 * TASK_STOPPED in do_signal_stop() on return, so notifying
2227 		 * the real parent of the group stop completion is enough.
2228 		 */
2229 		if (gstop_done)
2230 			do_notify_parent_cldstop(current, false, why);
2231 
2232 		/* tasklist protects us from ptrace_freeze_traced() */
2233 		__set_current_state(TASK_RUNNING);
2234 		if (clear_code)
2235 			current->exit_code = 0;
2236 		read_unlock(&tasklist_lock);
2237 	}
2238 
2239 	/*
2240 	 * We are back.  Now reacquire the siglock before touching
2241 	 * last_siginfo, so that we are sure to have synchronized with
2242 	 * any signal-sending on another CPU that wants to examine it.
2243 	 */
2244 	spin_lock_irq(&current->sighand->siglock);
2245 	current->last_siginfo = NULL;
2246 
2247 	/* LISTENING can be set only during STOP traps, clear it */
2248 	current->jobctl &= ~JOBCTL_LISTENING;
2249 
2250 	/*
2251 	 * Queued signals ignored us while we were stopped for tracing.
2252 	 * So check for any that we should take before resuming user mode.
2253 	 * This sets TIF_SIGPENDING, but never clears it.
2254 	 */
2255 	recalc_sigpending_tsk(current);
2256 }
2257 
2258 static void ptrace_do_notify(int signr, int exit_code, int why)
2259 {
2260 	kernel_siginfo_t info;
2261 
2262 	clear_siginfo(&info);
2263 	info.si_signo = signr;
2264 	info.si_code = exit_code;
2265 	info.si_pid = task_pid_vnr(current);
2266 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2267 
2268 	/* Let the debugger run.  */
2269 	ptrace_stop(exit_code, why, 1, &info);
2270 }
2271 
2272 void ptrace_notify(int exit_code)
2273 {
2274 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2275 	if (unlikely(current->task_works))
2276 		task_work_run();
2277 
2278 	spin_lock_irq(&current->sighand->siglock);
2279 	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2280 	spin_unlock_irq(&current->sighand->siglock);
2281 }
2282 
2283 /**
2284  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2285  * @signr: signr causing group stop if initiating
2286  *
2287  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2288  * and participate in it.  If already set, participate in the existing
2289  * group stop.  If participated in a group stop (and thus slept), %true is
2290  * returned with siglock released.
2291  *
2292  * If ptraced, this function doesn't handle stop itself.  Instead,
2293  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2294  * untouched.  The caller must ensure that INTERRUPT trap handling takes
2295  * places afterwards.
2296  *
2297  * CONTEXT:
2298  * Must be called with @current->sighand->siglock held, which is released
2299  * on %true return.
2300  *
2301  * RETURNS:
2302  * %false if group stop is already cancelled or ptrace trap is scheduled.
2303  * %true if participated in group stop.
2304  */
2305 static bool do_signal_stop(int signr)
2306 	__releases(&current->sighand->siglock)
2307 {
2308 	struct signal_struct *sig = current->signal;
2309 
2310 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2311 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2312 		struct task_struct *t;
2313 
2314 		/* signr will be recorded in task->jobctl for retries */
2315 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2316 
2317 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2318 		    unlikely(signal_group_exit(sig)))
2319 			return false;
2320 		/*
2321 		 * There is no group stop already in progress.  We must
2322 		 * initiate one now.
2323 		 *
2324 		 * While ptraced, a task may be resumed while group stop is
2325 		 * still in effect and then receive a stop signal and
2326 		 * initiate another group stop.  This deviates from the
2327 		 * usual behavior as two consecutive stop signals can't
2328 		 * cause two group stops when !ptraced.  That is why we
2329 		 * also check !task_is_stopped(t) below.
2330 		 *
2331 		 * The condition can be distinguished by testing whether
2332 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2333 		 * group_exit_code in such case.
2334 		 *
2335 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2336 		 * an intervening stop signal is required to cause two
2337 		 * continued events regardless of ptrace.
2338 		 */
2339 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2340 			sig->group_exit_code = signr;
2341 
2342 		sig->group_stop_count = 0;
2343 
2344 		if (task_set_jobctl_pending(current, signr | gstop))
2345 			sig->group_stop_count++;
2346 
2347 		t = current;
2348 		while_each_thread(current, t) {
2349 			/*
2350 			 * Setting state to TASK_STOPPED for a group
2351 			 * stop is always done with the siglock held,
2352 			 * so this check has no races.
2353 			 */
2354 			if (!task_is_stopped(t) &&
2355 			    task_set_jobctl_pending(t, signr | gstop)) {
2356 				sig->group_stop_count++;
2357 				if (likely(!(t->ptrace & PT_SEIZED)))
2358 					signal_wake_up(t, 0);
2359 				else
2360 					ptrace_trap_notify(t);
2361 			}
2362 		}
2363 	}
2364 
2365 	if (likely(!current->ptrace)) {
2366 		int notify = 0;
2367 
2368 		/*
2369 		 * If there are no other threads in the group, or if there
2370 		 * is a group stop in progress and we are the last to stop,
2371 		 * report to the parent.
2372 		 */
2373 		if (task_participate_group_stop(current))
2374 			notify = CLD_STOPPED;
2375 
2376 		set_special_state(TASK_STOPPED);
2377 		spin_unlock_irq(&current->sighand->siglock);
2378 
2379 		/*
2380 		 * Notify the parent of the group stop completion.  Because
2381 		 * we're not holding either the siglock or tasklist_lock
2382 		 * here, ptracer may attach inbetween; however, this is for
2383 		 * group stop and should always be delivered to the real
2384 		 * parent of the group leader.  The new ptracer will get
2385 		 * its notification when this task transitions into
2386 		 * TASK_TRACED.
2387 		 */
2388 		if (notify) {
2389 			read_lock(&tasklist_lock);
2390 			do_notify_parent_cldstop(current, false, notify);
2391 			read_unlock(&tasklist_lock);
2392 		}
2393 
2394 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2395 		cgroup_enter_frozen();
2396 		freezable_schedule();
2397 		return true;
2398 	} else {
2399 		/*
2400 		 * While ptraced, group stop is handled by STOP trap.
2401 		 * Schedule it and let the caller deal with it.
2402 		 */
2403 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2404 		return false;
2405 	}
2406 }
2407 
2408 /**
2409  * do_jobctl_trap - take care of ptrace jobctl traps
2410  *
2411  * When PT_SEIZED, it's used for both group stop and explicit
2412  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2413  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2414  * the stop signal; otherwise, %SIGTRAP.
2415  *
2416  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2417  * number as exit_code and no siginfo.
2418  *
2419  * CONTEXT:
2420  * Must be called with @current->sighand->siglock held, which may be
2421  * released and re-acquired before returning with intervening sleep.
2422  */
2423 static void do_jobctl_trap(void)
2424 {
2425 	struct signal_struct *signal = current->signal;
2426 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2427 
2428 	if (current->ptrace & PT_SEIZED) {
2429 		if (!signal->group_stop_count &&
2430 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2431 			signr = SIGTRAP;
2432 		WARN_ON_ONCE(!signr);
2433 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2434 				 CLD_STOPPED);
2435 	} else {
2436 		WARN_ON_ONCE(!signr);
2437 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2438 		current->exit_code = 0;
2439 	}
2440 }
2441 
2442 /**
2443  * do_freezer_trap - handle the freezer jobctl trap
2444  *
2445  * Puts the task into frozen state, if only the task is not about to quit.
2446  * In this case it drops JOBCTL_TRAP_FREEZE.
2447  *
2448  * CONTEXT:
2449  * Must be called with @current->sighand->siglock held,
2450  * which is always released before returning.
2451  */
2452 static void do_freezer_trap(void)
2453 	__releases(&current->sighand->siglock)
2454 {
2455 	/*
2456 	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2457 	 * let's make another loop to give it a chance to be handled.
2458 	 * In any case, we'll return back.
2459 	 */
2460 	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2461 	     JOBCTL_TRAP_FREEZE) {
2462 		spin_unlock_irq(&current->sighand->siglock);
2463 		return;
2464 	}
2465 
2466 	/*
2467 	 * Now we're sure that there is no pending fatal signal and no
2468 	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2469 	 * immediately (if there is a non-fatal signal pending), and
2470 	 * put the task into sleep.
2471 	 */
2472 	__set_current_state(TASK_INTERRUPTIBLE);
2473 	clear_thread_flag(TIF_SIGPENDING);
2474 	spin_unlock_irq(&current->sighand->siglock);
2475 	cgroup_enter_frozen();
2476 	freezable_schedule();
2477 }
2478 
2479 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2480 {
2481 	/*
2482 	 * We do not check sig_kernel_stop(signr) but set this marker
2483 	 * unconditionally because we do not know whether debugger will
2484 	 * change signr. This flag has no meaning unless we are going
2485 	 * to stop after return from ptrace_stop(). In this case it will
2486 	 * be checked in do_signal_stop(), we should only stop if it was
2487 	 * not cleared by SIGCONT while we were sleeping. See also the
2488 	 * comment in dequeue_signal().
2489 	 */
2490 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2491 	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2492 
2493 	/* We're back.  Did the debugger cancel the sig?  */
2494 	signr = current->exit_code;
2495 	if (signr == 0)
2496 		return signr;
2497 
2498 	current->exit_code = 0;
2499 
2500 	/*
2501 	 * Update the siginfo structure if the signal has
2502 	 * changed.  If the debugger wanted something
2503 	 * specific in the siginfo structure then it should
2504 	 * have updated *info via PTRACE_SETSIGINFO.
2505 	 */
2506 	if (signr != info->si_signo) {
2507 		clear_siginfo(info);
2508 		info->si_signo = signr;
2509 		info->si_errno = 0;
2510 		info->si_code = SI_USER;
2511 		rcu_read_lock();
2512 		info->si_pid = task_pid_vnr(current->parent);
2513 		info->si_uid = from_kuid_munged(current_user_ns(),
2514 						task_uid(current->parent));
2515 		rcu_read_unlock();
2516 	}
2517 
2518 	/* If the (new) signal is now blocked, requeue it.  */
2519 	if (sigismember(&current->blocked, signr)) {
2520 		send_signal(signr, info, current, PIDTYPE_PID);
2521 		signr = 0;
2522 	}
2523 
2524 	return signr;
2525 }
2526 
2527 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2528 {
2529 	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2530 	case SIL_FAULT:
2531 	case SIL_FAULT_MCEERR:
2532 	case SIL_FAULT_BNDERR:
2533 	case SIL_FAULT_PKUERR:
2534 		ksig->info.si_addr = arch_untagged_si_addr(
2535 			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2536 		break;
2537 	case SIL_KILL:
2538 	case SIL_TIMER:
2539 	case SIL_POLL:
2540 	case SIL_CHLD:
2541 	case SIL_RT:
2542 	case SIL_SYS:
2543 		break;
2544 	}
2545 }
2546 
2547 bool get_signal(struct ksignal *ksig)
2548 {
2549 	struct sighand_struct *sighand = current->sighand;
2550 	struct signal_struct *signal = current->signal;
2551 	int signr;
2552 
2553 	if (unlikely(current->task_works))
2554 		task_work_run();
2555 
2556 	/*
2557 	 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2558 	 * that the arch handlers don't all have to do it. If we get here
2559 	 * without TIF_SIGPENDING, just exit after running signal work.
2560 	 */
2561 	if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2562 		if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2563 			tracehook_notify_signal();
2564 		if (!task_sigpending(current))
2565 			return false;
2566 	}
2567 
2568 	if (unlikely(uprobe_deny_signal()))
2569 		return false;
2570 
2571 	/*
2572 	 * Do this once, we can't return to user-mode if freezing() == T.
2573 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2574 	 * thus do not need another check after return.
2575 	 */
2576 	try_to_freeze();
2577 
2578 relock:
2579 	spin_lock_irq(&sighand->siglock);
2580 
2581 	/*
2582 	 * Every stopped thread goes here after wakeup. Check to see if
2583 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2584 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2585 	 */
2586 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2587 		int why;
2588 
2589 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2590 			why = CLD_CONTINUED;
2591 		else
2592 			why = CLD_STOPPED;
2593 
2594 		signal->flags &= ~SIGNAL_CLD_MASK;
2595 
2596 		spin_unlock_irq(&sighand->siglock);
2597 
2598 		/*
2599 		 * Notify the parent that we're continuing.  This event is
2600 		 * always per-process and doesn't make whole lot of sense
2601 		 * for ptracers, who shouldn't consume the state via
2602 		 * wait(2) either, but, for backward compatibility, notify
2603 		 * the ptracer of the group leader too unless it's gonna be
2604 		 * a duplicate.
2605 		 */
2606 		read_lock(&tasklist_lock);
2607 		do_notify_parent_cldstop(current, false, why);
2608 
2609 		if (ptrace_reparented(current->group_leader))
2610 			do_notify_parent_cldstop(current->group_leader,
2611 						true, why);
2612 		read_unlock(&tasklist_lock);
2613 
2614 		goto relock;
2615 	}
2616 
2617 	/* Has this task already been marked for death? */
2618 	if (signal_group_exit(signal)) {
2619 		ksig->info.si_signo = signr = SIGKILL;
2620 		sigdelset(&current->pending.signal, SIGKILL);
2621 		trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2622 				&sighand->action[SIGKILL - 1]);
2623 		recalc_sigpending();
2624 		goto fatal;
2625 	}
2626 
2627 	for (;;) {
2628 		struct k_sigaction *ka;
2629 
2630 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2631 		    do_signal_stop(0))
2632 			goto relock;
2633 
2634 		if (unlikely(current->jobctl &
2635 			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2636 			if (current->jobctl & JOBCTL_TRAP_MASK) {
2637 				do_jobctl_trap();
2638 				spin_unlock_irq(&sighand->siglock);
2639 			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2640 				do_freezer_trap();
2641 
2642 			goto relock;
2643 		}
2644 
2645 		/*
2646 		 * If the task is leaving the frozen state, let's update
2647 		 * cgroup counters and reset the frozen bit.
2648 		 */
2649 		if (unlikely(cgroup_task_frozen(current))) {
2650 			spin_unlock_irq(&sighand->siglock);
2651 			cgroup_leave_frozen(false);
2652 			goto relock;
2653 		}
2654 
2655 		/*
2656 		 * Signals generated by the execution of an instruction
2657 		 * need to be delivered before any other pending signals
2658 		 * so that the instruction pointer in the signal stack
2659 		 * frame points to the faulting instruction.
2660 		 */
2661 		signr = dequeue_synchronous_signal(&ksig->info);
2662 		if (!signr)
2663 			signr = dequeue_signal(current, &current->blocked, &ksig->info);
2664 
2665 		if (!signr)
2666 			break; /* will return 0 */
2667 
2668 		if (unlikely(current->ptrace) && signr != SIGKILL) {
2669 			signr = ptrace_signal(signr, &ksig->info);
2670 			if (!signr)
2671 				continue;
2672 		}
2673 
2674 		ka = &sighand->action[signr-1];
2675 
2676 		/* Trace actually delivered signals. */
2677 		trace_signal_deliver(signr, &ksig->info, ka);
2678 
2679 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2680 			continue;
2681 		if (ka->sa.sa_handler != SIG_DFL) {
2682 			/* Run the handler.  */
2683 			ksig->ka = *ka;
2684 
2685 			if (ka->sa.sa_flags & SA_ONESHOT)
2686 				ka->sa.sa_handler = SIG_DFL;
2687 
2688 			break; /* will return non-zero "signr" value */
2689 		}
2690 
2691 		/*
2692 		 * Now we are doing the default action for this signal.
2693 		 */
2694 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2695 			continue;
2696 
2697 		/*
2698 		 * Global init gets no signals it doesn't want.
2699 		 * Container-init gets no signals it doesn't want from same
2700 		 * container.
2701 		 *
2702 		 * Note that if global/container-init sees a sig_kernel_only()
2703 		 * signal here, the signal must have been generated internally
2704 		 * or must have come from an ancestor namespace. In either
2705 		 * case, the signal cannot be dropped.
2706 		 */
2707 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2708 				!sig_kernel_only(signr))
2709 			continue;
2710 
2711 		if (sig_kernel_stop(signr)) {
2712 			/*
2713 			 * The default action is to stop all threads in
2714 			 * the thread group.  The job control signals
2715 			 * do nothing in an orphaned pgrp, but SIGSTOP
2716 			 * always works.  Note that siglock needs to be
2717 			 * dropped during the call to is_orphaned_pgrp()
2718 			 * because of lock ordering with tasklist_lock.
2719 			 * This allows an intervening SIGCONT to be posted.
2720 			 * We need to check for that and bail out if necessary.
2721 			 */
2722 			if (signr != SIGSTOP) {
2723 				spin_unlock_irq(&sighand->siglock);
2724 
2725 				/* signals can be posted during this window */
2726 
2727 				if (is_current_pgrp_orphaned())
2728 					goto relock;
2729 
2730 				spin_lock_irq(&sighand->siglock);
2731 			}
2732 
2733 			if (likely(do_signal_stop(ksig->info.si_signo))) {
2734 				/* It released the siglock.  */
2735 				goto relock;
2736 			}
2737 
2738 			/*
2739 			 * We didn't actually stop, due to a race
2740 			 * with SIGCONT or something like that.
2741 			 */
2742 			continue;
2743 		}
2744 
2745 	fatal:
2746 		spin_unlock_irq(&sighand->siglock);
2747 		if (unlikely(cgroup_task_frozen(current)))
2748 			cgroup_leave_frozen(true);
2749 
2750 		/*
2751 		 * Anything else is fatal, maybe with a core dump.
2752 		 */
2753 		current->flags |= PF_SIGNALED;
2754 
2755 		if (sig_kernel_coredump(signr)) {
2756 			if (print_fatal_signals)
2757 				print_fatal_signal(ksig->info.si_signo);
2758 			proc_coredump_connector(current);
2759 			/*
2760 			 * If it was able to dump core, this kills all
2761 			 * other threads in the group and synchronizes with
2762 			 * their demise.  If we lost the race with another
2763 			 * thread getting here, it set group_exit_code
2764 			 * first and our do_group_exit call below will use
2765 			 * that value and ignore the one we pass it.
2766 			 */
2767 			do_coredump(&ksig->info);
2768 		}
2769 
2770 		/*
2771 		 * PF_IO_WORKER threads will catch and exit on fatal signals
2772 		 * themselves. They have cleanup that must be performed, so
2773 		 * we cannot call do_exit() on their behalf.
2774 		 */
2775 		if (current->flags & PF_IO_WORKER)
2776 			goto out;
2777 
2778 		/*
2779 		 * Death signals, no core dump.
2780 		 */
2781 		do_group_exit(ksig->info.si_signo);
2782 		/* NOTREACHED */
2783 	}
2784 	spin_unlock_irq(&sighand->siglock);
2785 out:
2786 	ksig->sig = signr;
2787 
2788 	if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2789 		hide_si_addr_tag_bits(ksig);
2790 
2791 	return ksig->sig > 0;
2792 }
2793 
2794 /**
2795  * signal_delivered -
2796  * @ksig:		kernel signal struct
2797  * @stepping:		nonzero if debugger single-step or block-step in use
2798  *
2799  * This function should be called when a signal has successfully been
2800  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2801  * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2802  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2803  */
2804 static void signal_delivered(struct ksignal *ksig, int stepping)
2805 {
2806 	sigset_t blocked;
2807 
2808 	/* A signal was successfully delivered, and the
2809 	   saved sigmask was stored on the signal frame,
2810 	   and will be restored by sigreturn.  So we can
2811 	   simply clear the restore sigmask flag.  */
2812 	clear_restore_sigmask();
2813 
2814 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2815 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2816 		sigaddset(&blocked, ksig->sig);
2817 	set_current_blocked(&blocked);
2818 	tracehook_signal_handler(stepping);
2819 }
2820 
2821 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2822 {
2823 	if (failed)
2824 		force_sigsegv(ksig->sig);
2825 	else
2826 		signal_delivered(ksig, stepping);
2827 }
2828 
2829 /*
2830  * It could be that complete_signal() picked us to notify about the
2831  * group-wide signal. Other threads should be notified now to take
2832  * the shared signals in @which since we will not.
2833  */
2834 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2835 {
2836 	sigset_t retarget;
2837 	struct task_struct *t;
2838 
2839 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2840 	if (sigisemptyset(&retarget))
2841 		return;
2842 
2843 	t = tsk;
2844 	while_each_thread(tsk, t) {
2845 		if (t->flags & PF_EXITING)
2846 			continue;
2847 
2848 		if (!has_pending_signals(&retarget, &t->blocked))
2849 			continue;
2850 		/* Remove the signals this thread can handle. */
2851 		sigandsets(&retarget, &retarget, &t->blocked);
2852 
2853 		if (!task_sigpending(t))
2854 			signal_wake_up(t, 0);
2855 
2856 		if (sigisemptyset(&retarget))
2857 			break;
2858 	}
2859 }
2860 
2861 void exit_signals(struct task_struct *tsk)
2862 {
2863 	int group_stop = 0;
2864 	sigset_t unblocked;
2865 
2866 	/*
2867 	 * @tsk is about to have PF_EXITING set - lock out users which
2868 	 * expect stable threadgroup.
2869 	 */
2870 	cgroup_threadgroup_change_begin(tsk);
2871 
2872 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2873 		tsk->flags |= PF_EXITING;
2874 		cgroup_threadgroup_change_end(tsk);
2875 		return;
2876 	}
2877 
2878 	spin_lock_irq(&tsk->sighand->siglock);
2879 	/*
2880 	 * From now this task is not visible for group-wide signals,
2881 	 * see wants_signal(), do_signal_stop().
2882 	 */
2883 	tsk->flags |= PF_EXITING;
2884 
2885 	cgroup_threadgroup_change_end(tsk);
2886 
2887 	if (!task_sigpending(tsk))
2888 		goto out;
2889 
2890 	unblocked = tsk->blocked;
2891 	signotset(&unblocked);
2892 	retarget_shared_pending(tsk, &unblocked);
2893 
2894 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2895 	    task_participate_group_stop(tsk))
2896 		group_stop = CLD_STOPPED;
2897 out:
2898 	spin_unlock_irq(&tsk->sighand->siglock);
2899 
2900 	/*
2901 	 * If group stop has completed, deliver the notification.  This
2902 	 * should always go to the real parent of the group leader.
2903 	 */
2904 	if (unlikely(group_stop)) {
2905 		read_lock(&tasklist_lock);
2906 		do_notify_parent_cldstop(tsk, false, group_stop);
2907 		read_unlock(&tasklist_lock);
2908 	}
2909 }
2910 
2911 /*
2912  * System call entry points.
2913  */
2914 
2915 /**
2916  *  sys_restart_syscall - restart a system call
2917  */
2918 SYSCALL_DEFINE0(restart_syscall)
2919 {
2920 	struct restart_block *restart = &current->restart_block;
2921 	return restart->fn(restart);
2922 }
2923 
2924 long do_no_restart_syscall(struct restart_block *param)
2925 {
2926 	return -EINTR;
2927 }
2928 
2929 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2930 {
2931 	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2932 		sigset_t newblocked;
2933 		/* A set of now blocked but previously unblocked signals. */
2934 		sigandnsets(&newblocked, newset, &current->blocked);
2935 		retarget_shared_pending(tsk, &newblocked);
2936 	}
2937 	tsk->blocked = *newset;
2938 	recalc_sigpending();
2939 }
2940 
2941 /**
2942  * set_current_blocked - change current->blocked mask
2943  * @newset: new mask
2944  *
2945  * It is wrong to change ->blocked directly, this helper should be used
2946  * to ensure the process can't miss a shared signal we are going to block.
2947  */
2948 void set_current_blocked(sigset_t *newset)
2949 {
2950 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2951 	__set_current_blocked(newset);
2952 }
2953 
2954 void __set_current_blocked(const sigset_t *newset)
2955 {
2956 	struct task_struct *tsk = current;
2957 
2958 	/*
2959 	 * In case the signal mask hasn't changed, there is nothing we need
2960 	 * to do. The current->blocked shouldn't be modified by other task.
2961 	 */
2962 	if (sigequalsets(&tsk->blocked, newset))
2963 		return;
2964 
2965 	spin_lock_irq(&tsk->sighand->siglock);
2966 	__set_task_blocked(tsk, newset);
2967 	spin_unlock_irq(&tsk->sighand->siglock);
2968 }
2969 
2970 /*
2971  * This is also useful for kernel threads that want to temporarily
2972  * (or permanently) block certain signals.
2973  *
2974  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2975  * interface happily blocks "unblockable" signals like SIGKILL
2976  * and friends.
2977  */
2978 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2979 {
2980 	struct task_struct *tsk = current;
2981 	sigset_t newset;
2982 
2983 	/* Lockless, only current can change ->blocked, never from irq */
2984 	if (oldset)
2985 		*oldset = tsk->blocked;
2986 
2987 	switch (how) {
2988 	case SIG_BLOCK:
2989 		sigorsets(&newset, &tsk->blocked, set);
2990 		break;
2991 	case SIG_UNBLOCK:
2992 		sigandnsets(&newset, &tsk->blocked, set);
2993 		break;
2994 	case SIG_SETMASK:
2995 		newset = *set;
2996 		break;
2997 	default:
2998 		return -EINVAL;
2999 	}
3000 
3001 	__set_current_blocked(&newset);
3002 	return 0;
3003 }
3004 EXPORT_SYMBOL(sigprocmask);
3005 
3006 /*
3007  * The api helps set app-provided sigmasks.
3008  *
3009  * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3010  * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3011  *
3012  * Note that it does set_restore_sigmask() in advance, so it must be always
3013  * paired with restore_saved_sigmask_unless() before return from syscall.
3014  */
3015 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3016 {
3017 	sigset_t kmask;
3018 
3019 	if (!umask)
3020 		return 0;
3021 	if (sigsetsize != sizeof(sigset_t))
3022 		return -EINVAL;
3023 	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3024 		return -EFAULT;
3025 
3026 	set_restore_sigmask();
3027 	current->saved_sigmask = current->blocked;
3028 	set_current_blocked(&kmask);
3029 
3030 	return 0;
3031 }
3032 
3033 #ifdef CONFIG_COMPAT
3034 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3035 			    size_t sigsetsize)
3036 {
3037 	sigset_t kmask;
3038 
3039 	if (!umask)
3040 		return 0;
3041 	if (sigsetsize != sizeof(compat_sigset_t))
3042 		return -EINVAL;
3043 	if (get_compat_sigset(&kmask, umask))
3044 		return -EFAULT;
3045 
3046 	set_restore_sigmask();
3047 	current->saved_sigmask = current->blocked;
3048 	set_current_blocked(&kmask);
3049 
3050 	return 0;
3051 }
3052 #endif
3053 
3054 /**
3055  *  sys_rt_sigprocmask - change the list of currently blocked signals
3056  *  @how: whether to add, remove, or set signals
3057  *  @nset: stores pending signals
3058  *  @oset: previous value of signal mask if non-null
3059  *  @sigsetsize: size of sigset_t type
3060  */
3061 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3062 		sigset_t __user *, oset, size_t, sigsetsize)
3063 {
3064 	sigset_t old_set, new_set;
3065 	int error;
3066 
3067 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3068 	if (sigsetsize != sizeof(sigset_t))
3069 		return -EINVAL;
3070 
3071 	old_set = current->blocked;
3072 
3073 	if (nset) {
3074 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3075 			return -EFAULT;
3076 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3077 
3078 		error = sigprocmask(how, &new_set, NULL);
3079 		if (error)
3080 			return error;
3081 	}
3082 
3083 	if (oset) {
3084 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3085 			return -EFAULT;
3086 	}
3087 
3088 	return 0;
3089 }
3090 
3091 #ifdef CONFIG_COMPAT
3092 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3093 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3094 {
3095 	sigset_t old_set = current->blocked;
3096 
3097 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3098 	if (sigsetsize != sizeof(sigset_t))
3099 		return -EINVAL;
3100 
3101 	if (nset) {
3102 		sigset_t new_set;
3103 		int error;
3104 		if (get_compat_sigset(&new_set, nset))
3105 			return -EFAULT;
3106 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3107 
3108 		error = sigprocmask(how, &new_set, NULL);
3109 		if (error)
3110 			return error;
3111 	}
3112 	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3113 }
3114 #endif
3115 
3116 static void do_sigpending(sigset_t *set)
3117 {
3118 	spin_lock_irq(&current->sighand->siglock);
3119 	sigorsets(set, &current->pending.signal,
3120 		  &current->signal->shared_pending.signal);
3121 	spin_unlock_irq(&current->sighand->siglock);
3122 
3123 	/* Outside the lock because only this thread touches it.  */
3124 	sigandsets(set, &current->blocked, set);
3125 }
3126 
3127 /**
3128  *  sys_rt_sigpending - examine a pending signal that has been raised
3129  *			while blocked
3130  *  @uset: stores pending signals
3131  *  @sigsetsize: size of sigset_t type or larger
3132  */
3133 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3134 {
3135 	sigset_t set;
3136 
3137 	if (sigsetsize > sizeof(*uset))
3138 		return -EINVAL;
3139 
3140 	do_sigpending(&set);
3141 
3142 	if (copy_to_user(uset, &set, sigsetsize))
3143 		return -EFAULT;
3144 
3145 	return 0;
3146 }
3147 
3148 #ifdef CONFIG_COMPAT
3149 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3150 		compat_size_t, sigsetsize)
3151 {
3152 	sigset_t set;
3153 
3154 	if (sigsetsize > sizeof(*uset))
3155 		return -EINVAL;
3156 
3157 	do_sigpending(&set);
3158 
3159 	return put_compat_sigset(uset, &set, sigsetsize);
3160 }
3161 #endif
3162 
3163 static const struct {
3164 	unsigned char limit, layout;
3165 } sig_sicodes[] = {
3166 	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3167 	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3168 	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3169 	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3170 	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3171 #if defined(SIGEMT)
3172 	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3173 #endif
3174 	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3175 	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3176 	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3177 };
3178 
3179 static bool known_siginfo_layout(unsigned sig, int si_code)
3180 {
3181 	if (si_code == SI_KERNEL)
3182 		return true;
3183 	else if ((si_code > SI_USER)) {
3184 		if (sig_specific_sicodes(sig)) {
3185 			if (si_code <= sig_sicodes[sig].limit)
3186 				return true;
3187 		}
3188 		else if (si_code <= NSIGPOLL)
3189 			return true;
3190 	}
3191 	else if (si_code >= SI_DETHREAD)
3192 		return true;
3193 	else if (si_code == SI_ASYNCNL)
3194 		return true;
3195 	return false;
3196 }
3197 
3198 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3199 {
3200 	enum siginfo_layout layout = SIL_KILL;
3201 	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3202 		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3203 		    (si_code <= sig_sicodes[sig].limit)) {
3204 			layout = sig_sicodes[sig].layout;
3205 			/* Handle the exceptions */
3206 			if ((sig == SIGBUS) &&
3207 			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3208 				layout = SIL_FAULT_MCEERR;
3209 			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3210 				layout = SIL_FAULT_BNDERR;
3211 #ifdef SEGV_PKUERR
3212 			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3213 				layout = SIL_FAULT_PKUERR;
3214 #endif
3215 		}
3216 		else if (si_code <= NSIGPOLL)
3217 			layout = SIL_POLL;
3218 	} else {
3219 		if (si_code == SI_TIMER)
3220 			layout = SIL_TIMER;
3221 		else if (si_code == SI_SIGIO)
3222 			layout = SIL_POLL;
3223 		else if (si_code < 0)
3224 			layout = SIL_RT;
3225 	}
3226 	return layout;
3227 }
3228 
3229 static inline char __user *si_expansion(const siginfo_t __user *info)
3230 {
3231 	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3232 }
3233 
3234 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3235 {
3236 	char __user *expansion = si_expansion(to);
3237 	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3238 		return -EFAULT;
3239 	if (clear_user(expansion, SI_EXPANSION_SIZE))
3240 		return -EFAULT;
3241 	return 0;
3242 }
3243 
3244 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3245 				       const siginfo_t __user *from)
3246 {
3247 	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3248 		char __user *expansion = si_expansion(from);
3249 		char buf[SI_EXPANSION_SIZE];
3250 		int i;
3251 		/*
3252 		 * An unknown si_code might need more than
3253 		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3254 		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3255 		 * will return this data to userspace exactly.
3256 		 */
3257 		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3258 			return -EFAULT;
3259 		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3260 			if (buf[i] != 0)
3261 				return -E2BIG;
3262 		}
3263 	}
3264 	return 0;
3265 }
3266 
3267 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3268 				    const siginfo_t __user *from)
3269 {
3270 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3271 		return -EFAULT;
3272 	to->si_signo = signo;
3273 	return post_copy_siginfo_from_user(to, from);
3274 }
3275 
3276 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3277 {
3278 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3279 		return -EFAULT;
3280 	return post_copy_siginfo_from_user(to, from);
3281 }
3282 
3283 #ifdef CONFIG_COMPAT
3284 /**
3285  * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3286  * @to: compat siginfo destination
3287  * @from: kernel siginfo source
3288  *
3289  * Note: This function does not work properly for the SIGCHLD on x32, but
3290  * fortunately it doesn't have to.  The only valid callers for this function are
3291  * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3292  * The latter does not care because SIGCHLD will never cause a coredump.
3293  */
3294 void copy_siginfo_to_external32(struct compat_siginfo *to,
3295 		const struct kernel_siginfo *from)
3296 {
3297 	memset(to, 0, sizeof(*to));
3298 
3299 	to->si_signo = from->si_signo;
3300 	to->si_errno = from->si_errno;
3301 	to->si_code  = from->si_code;
3302 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3303 	case SIL_KILL:
3304 		to->si_pid = from->si_pid;
3305 		to->si_uid = from->si_uid;
3306 		break;
3307 	case SIL_TIMER:
3308 		to->si_tid     = from->si_tid;
3309 		to->si_overrun = from->si_overrun;
3310 		to->si_int     = from->si_int;
3311 		break;
3312 	case SIL_POLL:
3313 		to->si_band = from->si_band;
3314 		to->si_fd   = from->si_fd;
3315 		break;
3316 	case SIL_FAULT:
3317 		to->si_addr = ptr_to_compat(from->si_addr);
3318 #ifdef __ARCH_SI_TRAPNO
3319 		to->si_trapno = from->si_trapno;
3320 #endif
3321 		break;
3322 	case SIL_FAULT_MCEERR:
3323 		to->si_addr = ptr_to_compat(from->si_addr);
3324 #ifdef __ARCH_SI_TRAPNO
3325 		to->si_trapno = from->si_trapno;
3326 #endif
3327 		to->si_addr_lsb = from->si_addr_lsb;
3328 		break;
3329 	case SIL_FAULT_BNDERR:
3330 		to->si_addr = ptr_to_compat(from->si_addr);
3331 #ifdef __ARCH_SI_TRAPNO
3332 		to->si_trapno = from->si_trapno;
3333 #endif
3334 		to->si_lower = ptr_to_compat(from->si_lower);
3335 		to->si_upper = ptr_to_compat(from->si_upper);
3336 		break;
3337 	case SIL_FAULT_PKUERR:
3338 		to->si_addr = ptr_to_compat(from->si_addr);
3339 #ifdef __ARCH_SI_TRAPNO
3340 		to->si_trapno = from->si_trapno;
3341 #endif
3342 		to->si_pkey = from->si_pkey;
3343 		break;
3344 	case SIL_CHLD:
3345 		to->si_pid = from->si_pid;
3346 		to->si_uid = from->si_uid;
3347 		to->si_status = from->si_status;
3348 		to->si_utime = from->si_utime;
3349 		to->si_stime = from->si_stime;
3350 		break;
3351 	case SIL_RT:
3352 		to->si_pid = from->si_pid;
3353 		to->si_uid = from->si_uid;
3354 		to->si_int = from->si_int;
3355 		break;
3356 	case SIL_SYS:
3357 		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3358 		to->si_syscall   = from->si_syscall;
3359 		to->si_arch      = from->si_arch;
3360 		break;
3361 	}
3362 }
3363 
3364 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3365 			   const struct kernel_siginfo *from)
3366 {
3367 	struct compat_siginfo new;
3368 
3369 	copy_siginfo_to_external32(&new, from);
3370 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3371 		return -EFAULT;
3372 	return 0;
3373 }
3374 
3375 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3376 					 const struct compat_siginfo *from)
3377 {
3378 	clear_siginfo(to);
3379 	to->si_signo = from->si_signo;
3380 	to->si_errno = from->si_errno;
3381 	to->si_code  = from->si_code;
3382 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3383 	case SIL_KILL:
3384 		to->si_pid = from->si_pid;
3385 		to->si_uid = from->si_uid;
3386 		break;
3387 	case SIL_TIMER:
3388 		to->si_tid     = from->si_tid;
3389 		to->si_overrun = from->si_overrun;
3390 		to->si_int     = from->si_int;
3391 		break;
3392 	case SIL_POLL:
3393 		to->si_band = from->si_band;
3394 		to->si_fd   = from->si_fd;
3395 		break;
3396 	case SIL_FAULT:
3397 		to->si_addr = compat_ptr(from->si_addr);
3398 #ifdef __ARCH_SI_TRAPNO
3399 		to->si_trapno = from->si_trapno;
3400 #endif
3401 		break;
3402 	case SIL_FAULT_MCEERR:
3403 		to->si_addr = compat_ptr(from->si_addr);
3404 #ifdef __ARCH_SI_TRAPNO
3405 		to->si_trapno = from->si_trapno;
3406 #endif
3407 		to->si_addr_lsb = from->si_addr_lsb;
3408 		break;
3409 	case SIL_FAULT_BNDERR:
3410 		to->si_addr = compat_ptr(from->si_addr);
3411 #ifdef __ARCH_SI_TRAPNO
3412 		to->si_trapno = from->si_trapno;
3413 #endif
3414 		to->si_lower = compat_ptr(from->si_lower);
3415 		to->si_upper = compat_ptr(from->si_upper);
3416 		break;
3417 	case SIL_FAULT_PKUERR:
3418 		to->si_addr = compat_ptr(from->si_addr);
3419 #ifdef __ARCH_SI_TRAPNO
3420 		to->si_trapno = from->si_trapno;
3421 #endif
3422 		to->si_pkey = from->si_pkey;
3423 		break;
3424 	case SIL_CHLD:
3425 		to->si_pid    = from->si_pid;
3426 		to->si_uid    = from->si_uid;
3427 		to->si_status = from->si_status;
3428 #ifdef CONFIG_X86_X32_ABI
3429 		if (in_x32_syscall()) {
3430 			to->si_utime = from->_sifields._sigchld_x32._utime;
3431 			to->si_stime = from->_sifields._sigchld_x32._stime;
3432 		} else
3433 #endif
3434 		{
3435 			to->si_utime = from->si_utime;
3436 			to->si_stime = from->si_stime;
3437 		}
3438 		break;
3439 	case SIL_RT:
3440 		to->si_pid = from->si_pid;
3441 		to->si_uid = from->si_uid;
3442 		to->si_int = from->si_int;
3443 		break;
3444 	case SIL_SYS:
3445 		to->si_call_addr = compat_ptr(from->si_call_addr);
3446 		to->si_syscall   = from->si_syscall;
3447 		to->si_arch      = from->si_arch;
3448 		break;
3449 	}
3450 	return 0;
3451 }
3452 
3453 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3454 				      const struct compat_siginfo __user *ufrom)
3455 {
3456 	struct compat_siginfo from;
3457 
3458 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3459 		return -EFAULT;
3460 
3461 	from.si_signo = signo;
3462 	return post_copy_siginfo_from_user32(to, &from);
3463 }
3464 
3465 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3466 			     const struct compat_siginfo __user *ufrom)
3467 {
3468 	struct compat_siginfo from;
3469 
3470 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3471 		return -EFAULT;
3472 
3473 	return post_copy_siginfo_from_user32(to, &from);
3474 }
3475 #endif /* CONFIG_COMPAT */
3476 
3477 /**
3478  *  do_sigtimedwait - wait for queued signals specified in @which
3479  *  @which: queued signals to wait for
3480  *  @info: if non-null, the signal's siginfo is returned here
3481  *  @ts: upper bound on process time suspension
3482  */
3483 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3484 		    const struct timespec64 *ts)
3485 {
3486 	ktime_t *to = NULL, timeout = KTIME_MAX;
3487 	struct task_struct *tsk = current;
3488 	sigset_t mask = *which;
3489 	int sig, ret = 0;
3490 
3491 	if (ts) {
3492 		if (!timespec64_valid(ts))
3493 			return -EINVAL;
3494 		timeout = timespec64_to_ktime(*ts);
3495 		to = &timeout;
3496 	}
3497 
3498 	/*
3499 	 * Invert the set of allowed signals to get those we want to block.
3500 	 */
3501 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3502 	signotset(&mask);
3503 
3504 	spin_lock_irq(&tsk->sighand->siglock);
3505 	sig = dequeue_signal(tsk, &mask, info);
3506 	if (!sig && timeout) {
3507 		/*
3508 		 * None ready, temporarily unblock those we're interested
3509 		 * while we are sleeping in so that we'll be awakened when
3510 		 * they arrive. Unblocking is always fine, we can avoid
3511 		 * set_current_blocked().
3512 		 */
3513 		tsk->real_blocked = tsk->blocked;
3514 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3515 		recalc_sigpending();
3516 		spin_unlock_irq(&tsk->sighand->siglock);
3517 
3518 		__set_current_state(TASK_INTERRUPTIBLE);
3519 		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3520 							 HRTIMER_MODE_REL);
3521 		spin_lock_irq(&tsk->sighand->siglock);
3522 		__set_task_blocked(tsk, &tsk->real_blocked);
3523 		sigemptyset(&tsk->real_blocked);
3524 		sig = dequeue_signal(tsk, &mask, info);
3525 	}
3526 	spin_unlock_irq(&tsk->sighand->siglock);
3527 
3528 	if (sig)
3529 		return sig;
3530 	return ret ? -EINTR : -EAGAIN;
3531 }
3532 
3533 /**
3534  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3535  *			in @uthese
3536  *  @uthese: queued signals to wait for
3537  *  @uinfo: if non-null, the signal's siginfo is returned here
3538  *  @uts: upper bound on process time suspension
3539  *  @sigsetsize: size of sigset_t type
3540  */
3541 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3542 		siginfo_t __user *, uinfo,
3543 		const struct __kernel_timespec __user *, uts,
3544 		size_t, sigsetsize)
3545 {
3546 	sigset_t these;
3547 	struct timespec64 ts;
3548 	kernel_siginfo_t info;
3549 	int ret;
3550 
3551 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3552 	if (sigsetsize != sizeof(sigset_t))
3553 		return -EINVAL;
3554 
3555 	if (copy_from_user(&these, uthese, sizeof(these)))
3556 		return -EFAULT;
3557 
3558 	if (uts) {
3559 		if (get_timespec64(&ts, uts))
3560 			return -EFAULT;
3561 	}
3562 
3563 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3564 
3565 	if (ret > 0 && uinfo) {
3566 		if (copy_siginfo_to_user(uinfo, &info))
3567 			ret = -EFAULT;
3568 	}
3569 
3570 	return ret;
3571 }
3572 
3573 #ifdef CONFIG_COMPAT_32BIT_TIME
3574 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3575 		siginfo_t __user *, uinfo,
3576 		const struct old_timespec32 __user *, uts,
3577 		size_t, sigsetsize)
3578 {
3579 	sigset_t these;
3580 	struct timespec64 ts;
3581 	kernel_siginfo_t info;
3582 	int ret;
3583 
3584 	if (sigsetsize != sizeof(sigset_t))
3585 		return -EINVAL;
3586 
3587 	if (copy_from_user(&these, uthese, sizeof(these)))
3588 		return -EFAULT;
3589 
3590 	if (uts) {
3591 		if (get_old_timespec32(&ts, uts))
3592 			return -EFAULT;
3593 	}
3594 
3595 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3596 
3597 	if (ret > 0 && uinfo) {
3598 		if (copy_siginfo_to_user(uinfo, &info))
3599 			ret = -EFAULT;
3600 	}
3601 
3602 	return ret;
3603 }
3604 #endif
3605 
3606 #ifdef CONFIG_COMPAT
3607 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3608 		struct compat_siginfo __user *, uinfo,
3609 		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3610 {
3611 	sigset_t s;
3612 	struct timespec64 t;
3613 	kernel_siginfo_t info;
3614 	long ret;
3615 
3616 	if (sigsetsize != sizeof(sigset_t))
3617 		return -EINVAL;
3618 
3619 	if (get_compat_sigset(&s, uthese))
3620 		return -EFAULT;
3621 
3622 	if (uts) {
3623 		if (get_timespec64(&t, uts))
3624 			return -EFAULT;
3625 	}
3626 
3627 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3628 
3629 	if (ret > 0 && uinfo) {
3630 		if (copy_siginfo_to_user32(uinfo, &info))
3631 			ret = -EFAULT;
3632 	}
3633 
3634 	return ret;
3635 }
3636 
3637 #ifdef CONFIG_COMPAT_32BIT_TIME
3638 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3639 		struct compat_siginfo __user *, uinfo,
3640 		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3641 {
3642 	sigset_t s;
3643 	struct timespec64 t;
3644 	kernel_siginfo_t info;
3645 	long ret;
3646 
3647 	if (sigsetsize != sizeof(sigset_t))
3648 		return -EINVAL;
3649 
3650 	if (get_compat_sigset(&s, uthese))
3651 		return -EFAULT;
3652 
3653 	if (uts) {
3654 		if (get_old_timespec32(&t, uts))
3655 			return -EFAULT;
3656 	}
3657 
3658 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3659 
3660 	if (ret > 0 && uinfo) {
3661 		if (copy_siginfo_to_user32(uinfo, &info))
3662 			ret = -EFAULT;
3663 	}
3664 
3665 	return ret;
3666 }
3667 #endif
3668 #endif
3669 
3670 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3671 {
3672 	clear_siginfo(info);
3673 	info->si_signo = sig;
3674 	info->si_errno = 0;
3675 	info->si_code = SI_USER;
3676 	info->si_pid = task_tgid_vnr(current);
3677 	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3678 }
3679 
3680 /**
3681  *  sys_kill - send a signal to a process
3682  *  @pid: the PID of the process
3683  *  @sig: signal to be sent
3684  */
3685 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3686 {
3687 	struct kernel_siginfo info;
3688 
3689 	prepare_kill_siginfo(sig, &info);
3690 
3691 	return kill_something_info(sig, &info, pid);
3692 }
3693 
3694 /*
3695  * Verify that the signaler and signalee either are in the same pid namespace
3696  * or that the signaler's pid namespace is an ancestor of the signalee's pid
3697  * namespace.
3698  */
3699 static bool access_pidfd_pidns(struct pid *pid)
3700 {
3701 	struct pid_namespace *active = task_active_pid_ns(current);
3702 	struct pid_namespace *p = ns_of_pid(pid);
3703 
3704 	for (;;) {
3705 		if (!p)
3706 			return false;
3707 		if (p == active)
3708 			break;
3709 		p = p->parent;
3710 	}
3711 
3712 	return true;
3713 }
3714 
3715 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3716 		siginfo_t __user *info)
3717 {
3718 #ifdef CONFIG_COMPAT
3719 	/*
3720 	 * Avoid hooking up compat syscalls and instead handle necessary
3721 	 * conversions here. Note, this is a stop-gap measure and should not be
3722 	 * considered a generic solution.
3723 	 */
3724 	if (in_compat_syscall())
3725 		return copy_siginfo_from_user32(
3726 			kinfo, (struct compat_siginfo __user *)info);
3727 #endif
3728 	return copy_siginfo_from_user(kinfo, info);
3729 }
3730 
3731 static struct pid *pidfd_to_pid(const struct file *file)
3732 {
3733 	struct pid *pid;
3734 
3735 	pid = pidfd_pid(file);
3736 	if (!IS_ERR(pid))
3737 		return pid;
3738 
3739 	return tgid_pidfd_to_pid(file);
3740 }
3741 
3742 /**
3743  * sys_pidfd_send_signal - Signal a process through a pidfd
3744  * @pidfd:  file descriptor of the process
3745  * @sig:    signal to send
3746  * @info:   signal info
3747  * @flags:  future flags
3748  *
3749  * The syscall currently only signals via PIDTYPE_PID which covers
3750  * kill(<positive-pid>, <signal>. It does not signal threads or process
3751  * groups.
3752  * In order to extend the syscall to threads and process groups the @flags
3753  * argument should be used. In essence, the @flags argument will determine
3754  * what is signaled and not the file descriptor itself. Put in other words,
3755  * grouping is a property of the flags argument not a property of the file
3756  * descriptor.
3757  *
3758  * Return: 0 on success, negative errno on failure
3759  */
3760 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3761 		siginfo_t __user *, info, unsigned int, flags)
3762 {
3763 	int ret;
3764 	struct fd f;
3765 	struct pid *pid;
3766 	kernel_siginfo_t kinfo;
3767 
3768 	/* Enforce flags be set to 0 until we add an extension. */
3769 	if (flags)
3770 		return -EINVAL;
3771 
3772 	f = fdget(pidfd);
3773 	if (!f.file)
3774 		return -EBADF;
3775 
3776 	/* Is this a pidfd? */
3777 	pid = pidfd_to_pid(f.file);
3778 	if (IS_ERR(pid)) {
3779 		ret = PTR_ERR(pid);
3780 		goto err;
3781 	}
3782 
3783 	ret = -EINVAL;
3784 	if (!access_pidfd_pidns(pid))
3785 		goto err;
3786 
3787 	if (info) {
3788 		ret = copy_siginfo_from_user_any(&kinfo, info);
3789 		if (unlikely(ret))
3790 			goto err;
3791 
3792 		ret = -EINVAL;
3793 		if (unlikely(sig != kinfo.si_signo))
3794 			goto err;
3795 
3796 		/* Only allow sending arbitrary signals to yourself. */
3797 		ret = -EPERM;
3798 		if ((task_pid(current) != pid) &&
3799 		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3800 			goto err;
3801 	} else {
3802 		prepare_kill_siginfo(sig, &kinfo);
3803 	}
3804 
3805 	ret = kill_pid_info(sig, &kinfo, pid);
3806 
3807 err:
3808 	fdput(f);
3809 	return ret;
3810 }
3811 
3812 static int
3813 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3814 {
3815 	struct task_struct *p;
3816 	int error = -ESRCH;
3817 
3818 	rcu_read_lock();
3819 	p = find_task_by_vpid(pid);
3820 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3821 		error = check_kill_permission(sig, info, p);
3822 		/*
3823 		 * The null signal is a permissions and process existence
3824 		 * probe.  No signal is actually delivered.
3825 		 */
3826 		if (!error && sig) {
3827 			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3828 			/*
3829 			 * If lock_task_sighand() failed we pretend the task
3830 			 * dies after receiving the signal. The window is tiny,
3831 			 * and the signal is private anyway.
3832 			 */
3833 			if (unlikely(error == -ESRCH))
3834 				error = 0;
3835 		}
3836 	}
3837 	rcu_read_unlock();
3838 
3839 	return error;
3840 }
3841 
3842 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3843 {
3844 	struct kernel_siginfo info;
3845 
3846 	clear_siginfo(&info);
3847 	info.si_signo = sig;
3848 	info.si_errno = 0;
3849 	info.si_code = SI_TKILL;
3850 	info.si_pid = task_tgid_vnr(current);
3851 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3852 
3853 	return do_send_specific(tgid, pid, sig, &info);
3854 }
3855 
3856 /**
3857  *  sys_tgkill - send signal to one specific thread
3858  *  @tgid: the thread group ID of the thread
3859  *  @pid: the PID of the thread
3860  *  @sig: signal to be sent
3861  *
3862  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3863  *  exists but it's not belonging to the target process anymore. This
3864  *  method solves the problem of threads exiting and PIDs getting reused.
3865  */
3866 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3867 {
3868 	/* This is only valid for single tasks */
3869 	if (pid <= 0 || tgid <= 0)
3870 		return -EINVAL;
3871 
3872 	return do_tkill(tgid, pid, sig);
3873 }
3874 
3875 /**
3876  *  sys_tkill - send signal to one specific task
3877  *  @pid: the PID of the task
3878  *  @sig: signal to be sent
3879  *
3880  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
3881  */
3882 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3883 {
3884 	/* This is only valid for single tasks */
3885 	if (pid <= 0)
3886 		return -EINVAL;
3887 
3888 	return do_tkill(0, pid, sig);
3889 }
3890 
3891 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3892 {
3893 	/* Not even root can pretend to send signals from the kernel.
3894 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3895 	 */
3896 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3897 	    (task_pid_vnr(current) != pid))
3898 		return -EPERM;
3899 
3900 	/* POSIX.1b doesn't mention process groups.  */
3901 	return kill_proc_info(sig, info, pid);
3902 }
3903 
3904 /**
3905  *  sys_rt_sigqueueinfo - send signal information to a signal
3906  *  @pid: the PID of the thread
3907  *  @sig: signal to be sent
3908  *  @uinfo: signal info to be sent
3909  */
3910 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3911 		siginfo_t __user *, uinfo)
3912 {
3913 	kernel_siginfo_t info;
3914 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3915 	if (unlikely(ret))
3916 		return ret;
3917 	return do_rt_sigqueueinfo(pid, sig, &info);
3918 }
3919 
3920 #ifdef CONFIG_COMPAT
3921 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3922 			compat_pid_t, pid,
3923 			int, sig,
3924 			struct compat_siginfo __user *, uinfo)
3925 {
3926 	kernel_siginfo_t info;
3927 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3928 	if (unlikely(ret))
3929 		return ret;
3930 	return do_rt_sigqueueinfo(pid, sig, &info);
3931 }
3932 #endif
3933 
3934 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3935 {
3936 	/* This is only valid for single tasks */
3937 	if (pid <= 0 || tgid <= 0)
3938 		return -EINVAL;
3939 
3940 	/* Not even root can pretend to send signals from the kernel.
3941 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3942 	 */
3943 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3944 	    (task_pid_vnr(current) != pid))
3945 		return -EPERM;
3946 
3947 	return do_send_specific(tgid, pid, sig, info);
3948 }
3949 
3950 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3951 		siginfo_t __user *, uinfo)
3952 {
3953 	kernel_siginfo_t info;
3954 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3955 	if (unlikely(ret))
3956 		return ret;
3957 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3958 }
3959 
3960 #ifdef CONFIG_COMPAT
3961 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3962 			compat_pid_t, tgid,
3963 			compat_pid_t, pid,
3964 			int, sig,
3965 			struct compat_siginfo __user *, uinfo)
3966 {
3967 	kernel_siginfo_t info;
3968 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3969 	if (unlikely(ret))
3970 		return ret;
3971 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3972 }
3973 #endif
3974 
3975 /*
3976  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3977  */
3978 void kernel_sigaction(int sig, __sighandler_t action)
3979 {
3980 	spin_lock_irq(&current->sighand->siglock);
3981 	current->sighand->action[sig - 1].sa.sa_handler = action;
3982 	if (action == SIG_IGN) {
3983 		sigset_t mask;
3984 
3985 		sigemptyset(&mask);
3986 		sigaddset(&mask, sig);
3987 
3988 		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3989 		flush_sigqueue_mask(&mask, &current->pending);
3990 		recalc_sigpending();
3991 	}
3992 	spin_unlock_irq(&current->sighand->siglock);
3993 }
3994 EXPORT_SYMBOL(kernel_sigaction);
3995 
3996 void __weak sigaction_compat_abi(struct k_sigaction *act,
3997 		struct k_sigaction *oact)
3998 {
3999 }
4000 
4001 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4002 {
4003 	struct task_struct *p = current, *t;
4004 	struct k_sigaction *k;
4005 	sigset_t mask;
4006 
4007 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4008 		return -EINVAL;
4009 
4010 	k = &p->sighand->action[sig-1];
4011 
4012 	spin_lock_irq(&p->sighand->siglock);
4013 	if (oact)
4014 		*oact = *k;
4015 
4016 	/*
4017 	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4018 	 * e.g. by having an architecture use the bit in their uapi.
4019 	 */
4020 	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4021 
4022 	/*
4023 	 * Clear unknown flag bits in order to allow userspace to detect missing
4024 	 * support for flag bits and to allow the kernel to use non-uapi bits
4025 	 * internally.
4026 	 */
4027 	if (act)
4028 		act->sa.sa_flags &= UAPI_SA_FLAGS;
4029 	if (oact)
4030 		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4031 
4032 	sigaction_compat_abi(act, oact);
4033 
4034 	if (act) {
4035 		sigdelsetmask(&act->sa.sa_mask,
4036 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4037 		*k = *act;
4038 		/*
4039 		 * POSIX 3.3.1.3:
4040 		 *  "Setting a signal action to SIG_IGN for a signal that is
4041 		 *   pending shall cause the pending signal to be discarded,
4042 		 *   whether or not it is blocked."
4043 		 *
4044 		 *  "Setting a signal action to SIG_DFL for a signal that is
4045 		 *   pending and whose default action is to ignore the signal
4046 		 *   (for example, SIGCHLD), shall cause the pending signal to
4047 		 *   be discarded, whether or not it is blocked"
4048 		 */
4049 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4050 			sigemptyset(&mask);
4051 			sigaddset(&mask, sig);
4052 			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4053 			for_each_thread(p, t)
4054 				flush_sigqueue_mask(&mask, &t->pending);
4055 		}
4056 	}
4057 
4058 	spin_unlock_irq(&p->sighand->siglock);
4059 	return 0;
4060 }
4061 
4062 static int
4063 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4064 		size_t min_ss_size)
4065 {
4066 	struct task_struct *t = current;
4067 
4068 	if (oss) {
4069 		memset(oss, 0, sizeof(stack_t));
4070 		oss->ss_sp = (void __user *) t->sas_ss_sp;
4071 		oss->ss_size = t->sas_ss_size;
4072 		oss->ss_flags = sas_ss_flags(sp) |
4073 			(current->sas_ss_flags & SS_FLAG_BITS);
4074 	}
4075 
4076 	if (ss) {
4077 		void __user *ss_sp = ss->ss_sp;
4078 		size_t ss_size = ss->ss_size;
4079 		unsigned ss_flags = ss->ss_flags;
4080 		int ss_mode;
4081 
4082 		if (unlikely(on_sig_stack(sp)))
4083 			return -EPERM;
4084 
4085 		ss_mode = ss_flags & ~SS_FLAG_BITS;
4086 		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4087 				ss_mode != 0))
4088 			return -EINVAL;
4089 
4090 		if (ss_mode == SS_DISABLE) {
4091 			ss_size = 0;
4092 			ss_sp = NULL;
4093 		} else {
4094 			if (unlikely(ss_size < min_ss_size))
4095 				return -ENOMEM;
4096 		}
4097 
4098 		t->sas_ss_sp = (unsigned long) ss_sp;
4099 		t->sas_ss_size = ss_size;
4100 		t->sas_ss_flags = ss_flags;
4101 	}
4102 	return 0;
4103 }
4104 
4105 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4106 {
4107 	stack_t new, old;
4108 	int err;
4109 	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4110 		return -EFAULT;
4111 	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4112 			      current_user_stack_pointer(),
4113 			      MINSIGSTKSZ);
4114 	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4115 		err = -EFAULT;
4116 	return err;
4117 }
4118 
4119 int restore_altstack(const stack_t __user *uss)
4120 {
4121 	stack_t new;
4122 	if (copy_from_user(&new, uss, sizeof(stack_t)))
4123 		return -EFAULT;
4124 	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4125 			     MINSIGSTKSZ);
4126 	/* squash all but EFAULT for now */
4127 	return 0;
4128 }
4129 
4130 int __save_altstack(stack_t __user *uss, unsigned long sp)
4131 {
4132 	struct task_struct *t = current;
4133 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4134 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4135 		__put_user(t->sas_ss_size, &uss->ss_size);
4136 	if (err)
4137 		return err;
4138 	if (t->sas_ss_flags & SS_AUTODISARM)
4139 		sas_ss_reset(t);
4140 	return 0;
4141 }
4142 
4143 #ifdef CONFIG_COMPAT
4144 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4145 				 compat_stack_t __user *uoss_ptr)
4146 {
4147 	stack_t uss, uoss;
4148 	int ret;
4149 
4150 	if (uss_ptr) {
4151 		compat_stack_t uss32;
4152 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4153 			return -EFAULT;
4154 		uss.ss_sp = compat_ptr(uss32.ss_sp);
4155 		uss.ss_flags = uss32.ss_flags;
4156 		uss.ss_size = uss32.ss_size;
4157 	}
4158 	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4159 			     compat_user_stack_pointer(),
4160 			     COMPAT_MINSIGSTKSZ);
4161 	if (ret >= 0 && uoss_ptr)  {
4162 		compat_stack_t old;
4163 		memset(&old, 0, sizeof(old));
4164 		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4165 		old.ss_flags = uoss.ss_flags;
4166 		old.ss_size = uoss.ss_size;
4167 		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4168 			ret = -EFAULT;
4169 	}
4170 	return ret;
4171 }
4172 
4173 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4174 			const compat_stack_t __user *, uss_ptr,
4175 			compat_stack_t __user *, uoss_ptr)
4176 {
4177 	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4178 }
4179 
4180 int compat_restore_altstack(const compat_stack_t __user *uss)
4181 {
4182 	int err = do_compat_sigaltstack(uss, NULL);
4183 	/* squash all but -EFAULT for now */
4184 	return err == -EFAULT ? err : 0;
4185 }
4186 
4187 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4188 {
4189 	int err;
4190 	struct task_struct *t = current;
4191 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4192 			 &uss->ss_sp) |
4193 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4194 		__put_user(t->sas_ss_size, &uss->ss_size);
4195 	if (err)
4196 		return err;
4197 	if (t->sas_ss_flags & SS_AUTODISARM)
4198 		sas_ss_reset(t);
4199 	return 0;
4200 }
4201 #endif
4202 
4203 #ifdef __ARCH_WANT_SYS_SIGPENDING
4204 
4205 /**
4206  *  sys_sigpending - examine pending signals
4207  *  @uset: where mask of pending signal is returned
4208  */
4209 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4210 {
4211 	sigset_t set;
4212 
4213 	if (sizeof(old_sigset_t) > sizeof(*uset))
4214 		return -EINVAL;
4215 
4216 	do_sigpending(&set);
4217 
4218 	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4219 		return -EFAULT;
4220 
4221 	return 0;
4222 }
4223 
4224 #ifdef CONFIG_COMPAT
4225 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4226 {
4227 	sigset_t set;
4228 
4229 	do_sigpending(&set);
4230 
4231 	return put_user(set.sig[0], set32);
4232 }
4233 #endif
4234 
4235 #endif
4236 
4237 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4238 /**
4239  *  sys_sigprocmask - examine and change blocked signals
4240  *  @how: whether to add, remove, or set signals
4241  *  @nset: signals to add or remove (if non-null)
4242  *  @oset: previous value of signal mask if non-null
4243  *
4244  * Some platforms have their own version with special arguments;
4245  * others support only sys_rt_sigprocmask.
4246  */
4247 
4248 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4249 		old_sigset_t __user *, oset)
4250 {
4251 	old_sigset_t old_set, new_set;
4252 	sigset_t new_blocked;
4253 
4254 	old_set = current->blocked.sig[0];
4255 
4256 	if (nset) {
4257 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4258 			return -EFAULT;
4259 
4260 		new_blocked = current->blocked;
4261 
4262 		switch (how) {
4263 		case SIG_BLOCK:
4264 			sigaddsetmask(&new_blocked, new_set);
4265 			break;
4266 		case SIG_UNBLOCK:
4267 			sigdelsetmask(&new_blocked, new_set);
4268 			break;
4269 		case SIG_SETMASK:
4270 			new_blocked.sig[0] = new_set;
4271 			break;
4272 		default:
4273 			return -EINVAL;
4274 		}
4275 
4276 		set_current_blocked(&new_blocked);
4277 	}
4278 
4279 	if (oset) {
4280 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4281 			return -EFAULT;
4282 	}
4283 
4284 	return 0;
4285 }
4286 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4287 
4288 #ifndef CONFIG_ODD_RT_SIGACTION
4289 /**
4290  *  sys_rt_sigaction - alter an action taken by a process
4291  *  @sig: signal to be sent
4292  *  @act: new sigaction
4293  *  @oact: used to save the previous sigaction
4294  *  @sigsetsize: size of sigset_t type
4295  */
4296 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4297 		const struct sigaction __user *, act,
4298 		struct sigaction __user *, oact,
4299 		size_t, sigsetsize)
4300 {
4301 	struct k_sigaction new_sa, old_sa;
4302 	int ret;
4303 
4304 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4305 	if (sigsetsize != sizeof(sigset_t))
4306 		return -EINVAL;
4307 
4308 	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4309 		return -EFAULT;
4310 
4311 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4312 	if (ret)
4313 		return ret;
4314 
4315 	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4316 		return -EFAULT;
4317 
4318 	return 0;
4319 }
4320 #ifdef CONFIG_COMPAT
4321 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4322 		const struct compat_sigaction __user *, act,
4323 		struct compat_sigaction __user *, oact,
4324 		compat_size_t, sigsetsize)
4325 {
4326 	struct k_sigaction new_ka, old_ka;
4327 #ifdef __ARCH_HAS_SA_RESTORER
4328 	compat_uptr_t restorer;
4329 #endif
4330 	int ret;
4331 
4332 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4333 	if (sigsetsize != sizeof(compat_sigset_t))
4334 		return -EINVAL;
4335 
4336 	if (act) {
4337 		compat_uptr_t handler;
4338 		ret = get_user(handler, &act->sa_handler);
4339 		new_ka.sa.sa_handler = compat_ptr(handler);
4340 #ifdef __ARCH_HAS_SA_RESTORER
4341 		ret |= get_user(restorer, &act->sa_restorer);
4342 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4343 #endif
4344 		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4345 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4346 		if (ret)
4347 			return -EFAULT;
4348 	}
4349 
4350 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4351 	if (!ret && oact) {
4352 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4353 			       &oact->sa_handler);
4354 		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4355 					 sizeof(oact->sa_mask));
4356 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4357 #ifdef __ARCH_HAS_SA_RESTORER
4358 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4359 				&oact->sa_restorer);
4360 #endif
4361 	}
4362 	return ret;
4363 }
4364 #endif
4365 #endif /* !CONFIG_ODD_RT_SIGACTION */
4366 
4367 #ifdef CONFIG_OLD_SIGACTION
4368 SYSCALL_DEFINE3(sigaction, int, sig,
4369 		const struct old_sigaction __user *, act,
4370 	        struct old_sigaction __user *, oact)
4371 {
4372 	struct k_sigaction new_ka, old_ka;
4373 	int ret;
4374 
4375 	if (act) {
4376 		old_sigset_t mask;
4377 		if (!access_ok(act, sizeof(*act)) ||
4378 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4379 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4380 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4381 		    __get_user(mask, &act->sa_mask))
4382 			return -EFAULT;
4383 #ifdef __ARCH_HAS_KA_RESTORER
4384 		new_ka.ka_restorer = NULL;
4385 #endif
4386 		siginitset(&new_ka.sa.sa_mask, mask);
4387 	}
4388 
4389 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4390 
4391 	if (!ret && oact) {
4392 		if (!access_ok(oact, sizeof(*oact)) ||
4393 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4394 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4395 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4396 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4397 			return -EFAULT;
4398 	}
4399 
4400 	return ret;
4401 }
4402 #endif
4403 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4404 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4405 		const struct compat_old_sigaction __user *, act,
4406 	        struct compat_old_sigaction __user *, oact)
4407 {
4408 	struct k_sigaction new_ka, old_ka;
4409 	int ret;
4410 	compat_old_sigset_t mask;
4411 	compat_uptr_t handler, restorer;
4412 
4413 	if (act) {
4414 		if (!access_ok(act, sizeof(*act)) ||
4415 		    __get_user(handler, &act->sa_handler) ||
4416 		    __get_user(restorer, &act->sa_restorer) ||
4417 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4418 		    __get_user(mask, &act->sa_mask))
4419 			return -EFAULT;
4420 
4421 #ifdef __ARCH_HAS_KA_RESTORER
4422 		new_ka.ka_restorer = NULL;
4423 #endif
4424 		new_ka.sa.sa_handler = compat_ptr(handler);
4425 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4426 		siginitset(&new_ka.sa.sa_mask, mask);
4427 	}
4428 
4429 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4430 
4431 	if (!ret && oact) {
4432 		if (!access_ok(oact, sizeof(*oact)) ||
4433 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4434 			       &oact->sa_handler) ||
4435 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4436 			       &oact->sa_restorer) ||
4437 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4438 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4439 			return -EFAULT;
4440 	}
4441 	return ret;
4442 }
4443 #endif
4444 
4445 #ifdef CONFIG_SGETMASK_SYSCALL
4446 
4447 /*
4448  * For backwards compatibility.  Functionality superseded by sigprocmask.
4449  */
4450 SYSCALL_DEFINE0(sgetmask)
4451 {
4452 	/* SMP safe */
4453 	return current->blocked.sig[0];
4454 }
4455 
4456 SYSCALL_DEFINE1(ssetmask, int, newmask)
4457 {
4458 	int old = current->blocked.sig[0];
4459 	sigset_t newset;
4460 
4461 	siginitset(&newset, newmask);
4462 	set_current_blocked(&newset);
4463 
4464 	return old;
4465 }
4466 #endif /* CONFIG_SGETMASK_SYSCALL */
4467 
4468 #ifdef __ARCH_WANT_SYS_SIGNAL
4469 /*
4470  * For backwards compatibility.  Functionality superseded by sigaction.
4471  */
4472 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4473 {
4474 	struct k_sigaction new_sa, old_sa;
4475 	int ret;
4476 
4477 	new_sa.sa.sa_handler = handler;
4478 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4479 	sigemptyset(&new_sa.sa.sa_mask);
4480 
4481 	ret = do_sigaction(sig, &new_sa, &old_sa);
4482 
4483 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4484 }
4485 #endif /* __ARCH_WANT_SYS_SIGNAL */
4486 
4487 #ifdef __ARCH_WANT_SYS_PAUSE
4488 
4489 SYSCALL_DEFINE0(pause)
4490 {
4491 	while (!signal_pending(current)) {
4492 		__set_current_state(TASK_INTERRUPTIBLE);
4493 		schedule();
4494 	}
4495 	return -ERESTARTNOHAND;
4496 }
4497 
4498 #endif
4499 
4500 static int sigsuspend(sigset_t *set)
4501 {
4502 	current->saved_sigmask = current->blocked;
4503 	set_current_blocked(set);
4504 
4505 	while (!signal_pending(current)) {
4506 		__set_current_state(TASK_INTERRUPTIBLE);
4507 		schedule();
4508 	}
4509 	set_restore_sigmask();
4510 	return -ERESTARTNOHAND;
4511 }
4512 
4513 /**
4514  *  sys_rt_sigsuspend - replace the signal mask for a value with the
4515  *	@unewset value until a signal is received
4516  *  @unewset: new signal mask value
4517  *  @sigsetsize: size of sigset_t type
4518  */
4519 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4520 {
4521 	sigset_t newset;
4522 
4523 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4524 	if (sigsetsize != sizeof(sigset_t))
4525 		return -EINVAL;
4526 
4527 	if (copy_from_user(&newset, unewset, sizeof(newset)))
4528 		return -EFAULT;
4529 	return sigsuspend(&newset);
4530 }
4531 
4532 #ifdef CONFIG_COMPAT
4533 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4534 {
4535 	sigset_t newset;
4536 
4537 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4538 	if (sigsetsize != sizeof(sigset_t))
4539 		return -EINVAL;
4540 
4541 	if (get_compat_sigset(&newset, unewset))
4542 		return -EFAULT;
4543 	return sigsuspend(&newset);
4544 }
4545 #endif
4546 
4547 #ifdef CONFIG_OLD_SIGSUSPEND
4548 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4549 {
4550 	sigset_t blocked;
4551 	siginitset(&blocked, mask);
4552 	return sigsuspend(&blocked);
4553 }
4554 #endif
4555 #ifdef CONFIG_OLD_SIGSUSPEND3
4556 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4557 {
4558 	sigset_t blocked;
4559 	siginitset(&blocked, mask);
4560 	return sigsuspend(&blocked);
4561 }
4562 #endif
4563 
4564 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4565 {
4566 	return NULL;
4567 }
4568 
4569 static inline void siginfo_buildtime_checks(void)
4570 {
4571 	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4572 
4573 	/* Verify the offsets in the two siginfos match */
4574 #define CHECK_OFFSET(field) \
4575 	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4576 
4577 	/* kill */
4578 	CHECK_OFFSET(si_pid);
4579 	CHECK_OFFSET(si_uid);
4580 
4581 	/* timer */
4582 	CHECK_OFFSET(si_tid);
4583 	CHECK_OFFSET(si_overrun);
4584 	CHECK_OFFSET(si_value);
4585 
4586 	/* rt */
4587 	CHECK_OFFSET(si_pid);
4588 	CHECK_OFFSET(si_uid);
4589 	CHECK_OFFSET(si_value);
4590 
4591 	/* sigchld */
4592 	CHECK_OFFSET(si_pid);
4593 	CHECK_OFFSET(si_uid);
4594 	CHECK_OFFSET(si_status);
4595 	CHECK_OFFSET(si_utime);
4596 	CHECK_OFFSET(si_stime);
4597 
4598 	/* sigfault */
4599 	CHECK_OFFSET(si_addr);
4600 	CHECK_OFFSET(si_addr_lsb);
4601 	CHECK_OFFSET(si_lower);
4602 	CHECK_OFFSET(si_upper);
4603 	CHECK_OFFSET(si_pkey);
4604 
4605 	/* sigpoll */
4606 	CHECK_OFFSET(si_band);
4607 	CHECK_OFFSET(si_fd);
4608 
4609 	/* sigsys */
4610 	CHECK_OFFSET(si_call_addr);
4611 	CHECK_OFFSET(si_syscall);
4612 	CHECK_OFFSET(si_arch);
4613 #undef CHECK_OFFSET
4614 
4615 	/* usb asyncio */
4616 	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4617 		     offsetof(struct siginfo, si_addr));
4618 	if (sizeof(int) == sizeof(void __user *)) {
4619 		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4620 			     sizeof(void __user *));
4621 	} else {
4622 		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4623 			      sizeof_field(struct siginfo, si_uid)) !=
4624 			     sizeof(void __user *));
4625 		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4626 			     offsetof(struct siginfo, si_uid));
4627 	}
4628 #ifdef CONFIG_COMPAT
4629 	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4630 		     offsetof(struct compat_siginfo, si_addr));
4631 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4632 		     sizeof(compat_uptr_t));
4633 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4634 		     sizeof_field(struct siginfo, si_pid));
4635 #endif
4636 }
4637 
4638 void __init signals_init(void)
4639 {
4640 	siginfo_buildtime_checks();
4641 
4642 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4643 }
4644 
4645 #ifdef CONFIG_KGDB_KDB
4646 #include <linux/kdb.h>
4647 /*
4648  * kdb_send_sig - Allows kdb to send signals without exposing
4649  * signal internals.  This function checks if the required locks are
4650  * available before calling the main signal code, to avoid kdb
4651  * deadlocks.
4652  */
4653 void kdb_send_sig(struct task_struct *t, int sig)
4654 {
4655 	static struct task_struct *kdb_prev_t;
4656 	int new_t, ret;
4657 	if (!spin_trylock(&t->sighand->siglock)) {
4658 		kdb_printf("Can't do kill command now.\n"
4659 			   "The sigmask lock is held somewhere else in "
4660 			   "kernel, try again later\n");
4661 		return;
4662 	}
4663 	new_t = kdb_prev_t != t;
4664 	kdb_prev_t = t;
4665 	if (t->state != TASK_RUNNING && new_t) {
4666 		spin_unlock(&t->sighand->siglock);
4667 		kdb_printf("Process is not RUNNING, sending a signal from "
4668 			   "kdb risks deadlock\n"
4669 			   "on the run queue locks. "
4670 			   "The signal has _not_ been sent.\n"
4671 			   "Reissue the kill command if you want to risk "
4672 			   "the deadlock.\n");
4673 		return;
4674 	}
4675 	ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4676 	spin_unlock(&t->sighand->siglock);
4677 	if (ret)
4678 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4679 			   sig, t->pid);
4680 	else
4681 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4682 }
4683 #endif	/* CONFIG_KGDB_KDB */
4684