xref: /openbmc/linux/kernel/signal.c (revision 7a010c3c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/signal.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
8  *
9  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
10  *		Changes to use preallocated sigqueue structures
11  *		to allow signals to be sent reliably.
12  */
13 
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
48 
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/signal.h>
51 
52 #include <asm/param.h>
53 #include <linux/uaccess.h>
54 #include <asm/unistd.h>
55 #include <asm/siginfo.h>
56 #include <asm/cacheflush.h>
57 #include <asm/syscall.h>	/* for syscall_get_* */
58 
59 /*
60  * SLAB caches for signal bits.
61  */
62 
63 static struct kmem_cache *sigqueue_cachep;
64 
65 int print_fatal_signals __read_mostly;
66 
67 static void __user *sig_handler(struct task_struct *t, int sig)
68 {
69 	return t->sighand->action[sig - 1].sa.sa_handler;
70 }
71 
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 {
74 	/* Is it explicitly or implicitly ignored? */
75 	return handler == SIG_IGN ||
76 	       (handler == SIG_DFL && sig_kernel_ignore(sig));
77 }
78 
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80 {
81 	void __user *handler;
82 
83 	handler = sig_handler(t, sig);
84 
85 	/* SIGKILL and SIGSTOP may not be sent to the global init */
86 	if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 		return true;
88 
89 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 		return true;
92 
93 	/* Only allow kernel generated signals to this kthread */
94 	if (unlikely((t->flags & PF_KTHREAD) &&
95 		     (handler == SIG_KTHREAD_KERNEL) && !force))
96 		return true;
97 
98 	return sig_handler_ignored(handler, sig);
99 }
100 
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
102 {
103 	/*
104 	 * Blocked signals are never ignored, since the
105 	 * signal handler may change by the time it is
106 	 * unblocked.
107 	 */
108 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
109 		return false;
110 
111 	/*
112 	 * Tracers may want to know about even ignored signal unless it
113 	 * is SIGKILL which can't be reported anyway but can be ignored
114 	 * by SIGNAL_UNKILLABLE task.
115 	 */
116 	if (t->ptrace && sig != SIGKILL)
117 		return false;
118 
119 	return sig_task_ignored(t, sig, force);
120 }
121 
122 /*
123  * Re-calculate pending state from the set of locally pending
124  * signals, globally pending signals, and blocked signals.
125  */
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127 {
128 	unsigned long ready;
129 	long i;
130 
131 	switch (_NSIG_WORDS) {
132 	default:
133 		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 			ready |= signal->sig[i] &~ blocked->sig[i];
135 		break;
136 
137 	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
138 		ready |= signal->sig[2] &~ blocked->sig[2];
139 		ready |= signal->sig[1] &~ blocked->sig[1];
140 		ready |= signal->sig[0] &~ blocked->sig[0];
141 		break;
142 
143 	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
144 		ready |= signal->sig[0] &~ blocked->sig[0];
145 		break;
146 
147 	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
148 	}
149 	return ready !=	0;
150 }
151 
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153 
154 static bool recalc_sigpending_tsk(struct task_struct *t)
155 {
156 	if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 	    PENDING(&t->pending, &t->blocked) ||
158 	    PENDING(&t->signal->shared_pending, &t->blocked) ||
159 	    cgroup_task_frozen(t)) {
160 		set_tsk_thread_flag(t, TIF_SIGPENDING);
161 		return true;
162 	}
163 
164 	/*
165 	 * We must never clear the flag in another thread, or in current
166 	 * when it's possible the current syscall is returning -ERESTART*.
167 	 * So we don't clear it here, and only callers who know they should do.
168 	 */
169 	return false;
170 }
171 
172 /*
173  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174  * This is superfluous when called on current, the wakeup is a harmless no-op.
175  */
176 void recalc_sigpending_and_wake(struct task_struct *t)
177 {
178 	if (recalc_sigpending_tsk(t))
179 		signal_wake_up(t, 0);
180 }
181 
182 void recalc_sigpending(void)
183 {
184 	if (!recalc_sigpending_tsk(current) && !freezing(current))
185 		clear_thread_flag(TIF_SIGPENDING);
186 
187 }
188 EXPORT_SYMBOL(recalc_sigpending);
189 
190 void calculate_sigpending(void)
191 {
192 	/* Have any signals or users of TIF_SIGPENDING been delayed
193 	 * until after fork?
194 	 */
195 	spin_lock_irq(&current->sighand->siglock);
196 	set_tsk_thread_flag(current, TIF_SIGPENDING);
197 	recalc_sigpending();
198 	spin_unlock_irq(&current->sighand->siglock);
199 }
200 
201 /* Given the mask, find the first available signal that should be serviced. */
202 
203 #define SYNCHRONOUS_MASK \
204 	(sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
205 	 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
206 
207 int next_signal(struct sigpending *pending, sigset_t *mask)
208 {
209 	unsigned long i, *s, *m, x;
210 	int sig = 0;
211 
212 	s = pending->signal.sig;
213 	m = mask->sig;
214 
215 	/*
216 	 * Handle the first word specially: it contains the
217 	 * synchronous signals that need to be dequeued first.
218 	 */
219 	x = *s &~ *m;
220 	if (x) {
221 		if (x & SYNCHRONOUS_MASK)
222 			x &= SYNCHRONOUS_MASK;
223 		sig = ffz(~x) + 1;
224 		return sig;
225 	}
226 
227 	switch (_NSIG_WORDS) {
228 	default:
229 		for (i = 1; i < _NSIG_WORDS; ++i) {
230 			x = *++s &~ *++m;
231 			if (!x)
232 				continue;
233 			sig = ffz(~x) + i*_NSIG_BPW + 1;
234 			break;
235 		}
236 		break;
237 
238 	case 2:
239 		x = s[1] &~ m[1];
240 		if (!x)
241 			break;
242 		sig = ffz(~x) + _NSIG_BPW + 1;
243 		break;
244 
245 	case 1:
246 		/* Nothing to do */
247 		break;
248 	}
249 
250 	return sig;
251 }
252 
253 static inline void print_dropped_signal(int sig)
254 {
255 	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256 
257 	if (!print_fatal_signals)
258 		return;
259 
260 	if (!__ratelimit(&ratelimit_state))
261 		return;
262 
263 	pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
264 				current->comm, current->pid, sig);
265 }
266 
267 /**
268  * task_set_jobctl_pending - set jobctl pending bits
269  * @task: target task
270  * @mask: pending bits to set
271  *
272  * Clear @mask from @task->jobctl.  @mask must be subset of
273  * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
274  * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
275  * cleared.  If @task is already being killed or exiting, this function
276  * becomes noop.
277  *
278  * CONTEXT:
279  * Must be called with @task->sighand->siglock held.
280  *
281  * RETURNS:
282  * %true if @mask is set, %false if made noop because @task was dying.
283  */
284 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
285 {
286 	BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
287 			JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
288 	BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289 
290 	if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
291 		return false;
292 
293 	if (mask & JOBCTL_STOP_SIGMASK)
294 		task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295 
296 	task->jobctl |= mask;
297 	return true;
298 }
299 
300 /**
301  * task_clear_jobctl_trapping - clear jobctl trapping bit
302  * @task: target task
303  *
304  * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
305  * Clear it and wake up the ptracer.  Note that we don't need any further
306  * locking.  @task->siglock guarantees that @task->parent points to the
307  * ptracer.
308  *
309  * CONTEXT:
310  * Must be called with @task->sighand->siglock held.
311  */
312 void task_clear_jobctl_trapping(struct task_struct *task)
313 {
314 	if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
315 		task->jobctl &= ~JOBCTL_TRAPPING;
316 		smp_mb();	/* advised by wake_up_bit() */
317 		wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
318 	}
319 }
320 
321 /**
322  * task_clear_jobctl_pending - clear jobctl pending bits
323  * @task: target task
324  * @mask: pending bits to clear
325  *
326  * Clear @mask from @task->jobctl.  @mask must be subset of
327  * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
328  * STOP bits are cleared together.
329  *
330  * If clearing of @mask leaves no stop or trap pending, this function calls
331  * task_clear_jobctl_trapping().
332  *
333  * CONTEXT:
334  * Must be called with @task->sighand->siglock held.
335  */
336 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
337 {
338 	BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339 
340 	if (mask & JOBCTL_STOP_PENDING)
341 		mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342 
343 	task->jobctl &= ~mask;
344 
345 	if (!(task->jobctl & JOBCTL_PENDING_MASK))
346 		task_clear_jobctl_trapping(task);
347 }
348 
349 /**
350  * task_participate_group_stop - participate in a group stop
351  * @task: task participating in a group stop
352  *
353  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
354  * Group stop states are cleared and the group stop count is consumed if
355  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
356  * stop, the appropriate `SIGNAL_*` flags are set.
357  *
358  * CONTEXT:
359  * Must be called with @task->sighand->siglock held.
360  *
361  * RETURNS:
362  * %true if group stop completion should be notified to the parent, %false
363  * otherwise.
364  */
365 static bool task_participate_group_stop(struct task_struct *task)
366 {
367 	struct signal_struct *sig = task->signal;
368 	bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
369 
370 	WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
371 
372 	task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
373 
374 	if (!consume)
375 		return false;
376 
377 	if (!WARN_ON_ONCE(sig->group_stop_count == 0))
378 		sig->group_stop_count--;
379 
380 	/*
381 	 * Tell the caller to notify completion iff we are entering into a
382 	 * fresh group stop.  Read comment in do_signal_stop() for details.
383 	 */
384 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
385 		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
386 		return true;
387 	}
388 	return false;
389 }
390 
391 void task_join_group_stop(struct task_struct *task)
392 {
393 	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
394 	struct signal_struct *sig = current->signal;
395 
396 	if (sig->group_stop_count) {
397 		sig->group_stop_count++;
398 		mask |= JOBCTL_STOP_CONSUME;
399 	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
400 		return;
401 
402 	/* Have the new thread join an on-going signal group stop */
403 	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
404 }
405 
406 /*
407  * allocate a new signal queue record
408  * - this may be called without locks if and only if t == current, otherwise an
409  *   appropriate lock must be held to stop the target task from exiting
410  */
411 static struct sigqueue *
412 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
413 		 int override_rlimit, const unsigned int sigqueue_flags)
414 {
415 	struct sigqueue *q = NULL;
416 	struct ucounts *ucounts = NULL;
417 	long sigpending;
418 
419 	/*
420 	 * Protect access to @t credentials. This can go away when all
421 	 * callers hold rcu read lock.
422 	 *
423 	 * NOTE! A pending signal will hold on to the user refcount,
424 	 * and we get/put the refcount only when the sigpending count
425 	 * changes from/to zero.
426 	 */
427 	rcu_read_lock();
428 	ucounts = task_ucounts(t);
429 	sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
430 	switch (sigpending) {
431 	case 1:
432 		if (likely(get_ucounts(ucounts)))
433 			break;
434 		fallthrough;
435 	case LONG_MAX:
436 		/*
437 		 * we need to decrease the ucount in the userns tree on any
438 		 * failure to avoid counts leaking.
439 		 */
440 		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
441 		rcu_read_unlock();
442 		return NULL;
443 	}
444 	rcu_read_unlock();
445 
446 	if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
447 		q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
448 	} else {
449 		print_dropped_signal(sig);
450 	}
451 
452 	if (unlikely(q == NULL)) {
453 		if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
454 			put_ucounts(ucounts);
455 	} else {
456 		INIT_LIST_HEAD(&q->list);
457 		q->flags = sigqueue_flags;
458 		q->ucounts = ucounts;
459 	}
460 	return q;
461 }
462 
463 static void __sigqueue_free(struct sigqueue *q)
464 {
465 	if (q->flags & SIGQUEUE_PREALLOC)
466 		return;
467 	if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
468 		put_ucounts(q->ucounts);
469 		q->ucounts = NULL;
470 	}
471 	kmem_cache_free(sigqueue_cachep, q);
472 }
473 
474 void flush_sigqueue(struct sigpending *queue)
475 {
476 	struct sigqueue *q;
477 
478 	sigemptyset(&queue->signal);
479 	while (!list_empty(&queue->list)) {
480 		q = list_entry(queue->list.next, struct sigqueue , list);
481 		list_del_init(&q->list);
482 		__sigqueue_free(q);
483 	}
484 }
485 
486 /*
487  * Flush all pending signals for this kthread.
488  */
489 void flush_signals(struct task_struct *t)
490 {
491 	unsigned long flags;
492 
493 	spin_lock_irqsave(&t->sighand->siglock, flags);
494 	clear_tsk_thread_flag(t, TIF_SIGPENDING);
495 	flush_sigqueue(&t->pending);
496 	flush_sigqueue(&t->signal->shared_pending);
497 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
498 }
499 EXPORT_SYMBOL(flush_signals);
500 
501 #ifdef CONFIG_POSIX_TIMERS
502 static void __flush_itimer_signals(struct sigpending *pending)
503 {
504 	sigset_t signal, retain;
505 	struct sigqueue *q, *n;
506 
507 	signal = pending->signal;
508 	sigemptyset(&retain);
509 
510 	list_for_each_entry_safe(q, n, &pending->list, list) {
511 		int sig = q->info.si_signo;
512 
513 		if (likely(q->info.si_code != SI_TIMER)) {
514 			sigaddset(&retain, sig);
515 		} else {
516 			sigdelset(&signal, sig);
517 			list_del_init(&q->list);
518 			__sigqueue_free(q);
519 		}
520 	}
521 
522 	sigorsets(&pending->signal, &signal, &retain);
523 }
524 
525 void flush_itimer_signals(void)
526 {
527 	struct task_struct *tsk = current;
528 	unsigned long flags;
529 
530 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
531 	__flush_itimer_signals(&tsk->pending);
532 	__flush_itimer_signals(&tsk->signal->shared_pending);
533 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
534 }
535 #endif
536 
537 void ignore_signals(struct task_struct *t)
538 {
539 	int i;
540 
541 	for (i = 0; i < _NSIG; ++i)
542 		t->sighand->action[i].sa.sa_handler = SIG_IGN;
543 
544 	flush_signals(t);
545 }
546 
547 /*
548  * Flush all handlers for a task.
549  */
550 
551 void
552 flush_signal_handlers(struct task_struct *t, int force_default)
553 {
554 	int i;
555 	struct k_sigaction *ka = &t->sighand->action[0];
556 	for (i = _NSIG ; i != 0 ; i--) {
557 		if (force_default || ka->sa.sa_handler != SIG_IGN)
558 			ka->sa.sa_handler = SIG_DFL;
559 		ka->sa.sa_flags = 0;
560 #ifdef __ARCH_HAS_SA_RESTORER
561 		ka->sa.sa_restorer = NULL;
562 #endif
563 		sigemptyset(&ka->sa.sa_mask);
564 		ka++;
565 	}
566 }
567 
568 bool unhandled_signal(struct task_struct *tsk, int sig)
569 {
570 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
571 	if (is_global_init(tsk))
572 		return true;
573 
574 	if (handler != SIG_IGN && handler != SIG_DFL)
575 		return false;
576 
577 	/* if ptraced, let the tracer determine */
578 	return !tsk->ptrace;
579 }
580 
581 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
582 			   bool *resched_timer)
583 {
584 	struct sigqueue *q, *first = NULL;
585 
586 	/*
587 	 * Collect the siginfo appropriate to this signal.  Check if
588 	 * there is another siginfo for the same signal.
589 	*/
590 	list_for_each_entry(q, &list->list, list) {
591 		if (q->info.si_signo == sig) {
592 			if (first)
593 				goto still_pending;
594 			first = q;
595 		}
596 	}
597 
598 	sigdelset(&list->signal, sig);
599 
600 	if (first) {
601 still_pending:
602 		list_del_init(&first->list);
603 		copy_siginfo(info, &first->info);
604 
605 		*resched_timer =
606 			(first->flags & SIGQUEUE_PREALLOC) &&
607 			(info->si_code == SI_TIMER) &&
608 			(info->si_sys_private);
609 
610 		__sigqueue_free(first);
611 	} else {
612 		/*
613 		 * Ok, it wasn't in the queue.  This must be
614 		 * a fast-pathed signal or we must have been
615 		 * out of queue space.  So zero out the info.
616 		 */
617 		clear_siginfo(info);
618 		info->si_signo = sig;
619 		info->si_errno = 0;
620 		info->si_code = SI_USER;
621 		info->si_pid = 0;
622 		info->si_uid = 0;
623 	}
624 }
625 
626 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
627 			kernel_siginfo_t *info, bool *resched_timer)
628 {
629 	int sig = next_signal(pending, mask);
630 
631 	if (sig)
632 		collect_signal(sig, pending, info, resched_timer);
633 	return sig;
634 }
635 
636 /*
637  * Dequeue a signal and return the element to the caller, which is
638  * expected to free it.
639  *
640  * All callers have to hold the siglock.
641  */
642 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
643 {
644 	bool resched_timer = false;
645 	int signr;
646 
647 	/* We only dequeue private signals from ourselves, we don't let
648 	 * signalfd steal them
649 	 */
650 	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
651 	if (!signr) {
652 		signr = __dequeue_signal(&tsk->signal->shared_pending,
653 					 mask, info, &resched_timer);
654 #ifdef CONFIG_POSIX_TIMERS
655 		/*
656 		 * itimer signal ?
657 		 *
658 		 * itimers are process shared and we restart periodic
659 		 * itimers in the signal delivery path to prevent DoS
660 		 * attacks in the high resolution timer case. This is
661 		 * compliant with the old way of self-restarting
662 		 * itimers, as the SIGALRM is a legacy signal and only
663 		 * queued once. Changing the restart behaviour to
664 		 * restart the timer in the signal dequeue path is
665 		 * reducing the timer noise on heavy loaded !highres
666 		 * systems too.
667 		 */
668 		if (unlikely(signr == SIGALRM)) {
669 			struct hrtimer *tmr = &tsk->signal->real_timer;
670 
671 			if (!hrtimer_is_queued(tmr) &&
672 			    tsk->signal->it_real_incr != 0) {
673 				hrtimer_forward(tmr, tmr->base->get_time(),
674 						tsk->signal->it_real_incr);
675 				hrtimer_restart(tmr);
676 			}
677 		}
678 #endif
679 	}
680 
681 	recalc_sigpending();
682 	if (!signr)
683 		return 0;
684 
685 	if (unlikely(sig_kernel_stop(signr))) {
686 		/*
687 		 * Set a marker that we have dequeued a stop signal.  Our
688 		 * caller might release the siglock and then the pending
689 		 * stop signal it is about to process is no longer in the
690 		 * pending bitmasks, but must still be cleared by a SIGCONT
691 		 * (and overruled by a SIGKILL).  So those cases clear this
692 		 * shared flag after we've set it.  Note that this flag may
693 		 * remain set after the signal we return is ignored or
694 		 * handled.  That doesn't matter because its only purpose
695 		 * is to alert stop-signal processing code when another
696 		 * processor has come along and cleared the flag.
697 		 */
698 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
699 	}
700 #ifdef CONFIG_POSIX_TIMERS
701 	if (resched_timer) {
702 		/*
703 		 * Release the siglock to ensure proper locking order
704 		 * of timer locks outside of siglocks.  Note, we leave
705 		 * irqs disabled here, since the posix-timers code is
706 		 * about to disable them again anyway.
707 		 */
708 		spin_unlock(&tsk->sighand->siglock);
709 		posixtimer_rearm(info);
710 		spin_lock(&tsk->sighand->siglock);
711 
712 		/* Don't expose the si_sys_private value to userspace */
713 		info->si_sys_private = 0;
714 	}
715 #endif
716 	return signr;
717 }
718 EXPORT_SYMBOL_GPL(dequeue_signal);
719 
720 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
721 {
722 	struct task_struct *tsk = current;
723 	struct sigpending *pending = &tsk->pending;
724 	struct sigqueue *q, *sync = NULL;
725 
726 	/*
727 	 * Might a synchronous signal be in the queue?
728 	 */
729 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
730 		return 0;
731 
732 	/*
733 	 * Return the first synchronous signal in the queue.
734 	 */
735 	list_for_each_entry(q, &pending->list, list) {
736 		/* Synchronous signals have a positive si_code */
737 		if ((q->info.si_code > SI_USER) &&
738 		    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
739 			sync = q;
740 			goto next;
741 		}
742 	}
743 	return 0;
744 next:
745 	/*
746 	 * Check if there is another siginfo for the same signal.
747 	 */
748 	list_for_each_entry_continue(q, &pending->list, list) {
749 		if (q->info.si_signo == sync->info.si_signo)
750 			goto still_pending;
751 	}
752 
753 	sigdelset(&pending->signal, sync->info.si_signo);
754 	recalc_sigpending();
755 still_pending:
756 	list_del_init(&sync->list);
757 	copy_siginfo(info, &sync->info);
758 	__sigqueue_free(sync);
759 	return info->si_signo;
760 }
761 
762 /*
763  * Tell a process that it has a new active signal..
764  *
765  * NOTE! we rely on the previous spin_lock to
766  * lock interrupts for us! We can only be called with
767  * "siglock" held, and the local interrupt must
768  * have been disabled when that got acquired!
769  *
770  * No need to set need_resched since signal event passing
771  * goes through ->blocked
772  */
773 void signal_wake_up_state(struct task_struct *t, unsigned int state)
774 {
775 	set_tsk_thread_flag(t, TIF_SIGPENDING);
776 	/*
777 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
778 	 * case. We don't check t->state here because there is a race with it
779 	 * executing another processor and just now entering stopped state.
780 	 * By using wake_up_state, we ensure the process will wake up and
781 	 * handle its death signal.
782 	 */
783 	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
784 		kick_process(t);
785 }
786 
787 /*
788  * Remove signals in mask from the pending set and queue.
789  * Returns 1 if any signals were found.
790  *
791  * All callers must be holding the siglock.
792  */
793 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
794 {
795 	struct sigqueue *q, *n;
796 	sigset_t m;
797 
798 	sigandsets(&m, mask, &s->signal);
799 	if (sigisemptyset(&m))
800 		return;
801 
802 	sigandnsets(&s->signal, &s->signal, mask);
803 	list_for_each_entry_safe(q, n, &s->list, list) {
804 		if (sigismember(mask, q->info.si_signo)) {
805 			list_del_init(&q->list);
806 			__sigqueue_free(q);
807 		}
808 	}
809 }
810 
811 static inline int is_si_special(const struct kernel_siginfo *info)
812 {
813 	return info <= SEND_SIG_PRIV;
814 }
815 
816 static inline bool si_fromuser(const struct kernel_siginfo *info)
817 {
818 	return info == SEND_SIG_NOINFO ||
819 		(!is_si_special(info) && SI_FROMUSER(info));
820 }
821 
822 /*
823  * called with RCU read lock from check_kill_permission()
824  */
825 static bool kill_ok_by_cred(struct task_struct *t)
826 {
827 	const struct cred *cred = current_cred();
828 	const struct cred *tcred = __task_cred(t);
829 
830 	return uid_eq(cred->euid, tcred->suid) ||
831 	       uid_eq(cred->euid, tcred->uid) ||
832 	       uid_eq(cred->uid, tcred->suid) ||
833 	       uid_eq(cred->uid, tcred->uid) ||
834 	       ns_capable(tcred->user_ns, CAP_KILL);
835 }
836 
837 /*
838  * Bad permissions for sending the signal
839  * - the caller must hold the RCU read lock
840  */
841 static int check_kill_permission(int sig, struct kernel_siginfo *info,
842 				 struct task_struct *t)
843 {
844 	struct pid *sid;
845 	int error;
846 
847 	if (!valid_signal(sig))
848 		return -EINVAL;
849 
850 	if (!si_fromuser(info))
851 		return 0;
852 
853 	error = audit_signal_info(sig, t); /* Let audit system see the signal */
854 	if (error)
855 		return error;
856 
857 	if (!same_thread_group(current, t) &&
858 	    !kill_ok_by_cred(t)) {
859 		switch (sig) {
860 		case SIGCONT:
861 			sid = task_session(t);
862 			/*
863 			 * We don't return the error if sid == NULL. The
864 			 * task was unhashed, the caller must notice this.
865 			 */
866 			if (!sid || sid == task_session(current))
867 				break;
868 			fallthrough;
869 		default:
870 			return -EPERM;
871 		}
872 	}
873 
874 	return security_task_kill(t, info, sig, NULL);
875 }
876 
877 /**
878  * ptrace_trap_notify - schedule trap to notify ptracer
879  * @t: tracee wanting to notify tracer
880  *
881  * This function schedules sticky ptrace trap which is cleared on the next
882  * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
883  * ptracer.
884  *
885  * If @t is running, STOP trap will be taken.  If trapped for STOP and
886  * ptracer is listening for events, tracee is woken up so that it can
887  * re-trap for the new event.  If trapped otherwise, STOP trap will be
888  * eventually taken without returning to userland after the existing traps
889  * are finished by PTRACE_CONT.
890  *
891  * CONTEXT:
892  * Must be called with @task->sighand->siglock held.
893  */
894 static void ptrace_trap_notify(struct task_struct *t)
895 {
896 	WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
897 	assert_spin_locked(&t->sighand->siglock);
898 
899 	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
900 	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
901 }
902 
903 /*
904  * Handle magic process-wide effects of stop/continue signals. Unlike
905  * the signal actions, these happen immediately at signal-generation
906  * time regardless of blocking, ignoring, or handling.  This does the
907  * actual continuing for SIGCONT, but not the actual stopping for stop
908  * signals. The process stop is done as a signal action for SIG_DFL.
909  *
910  * Returns true if the signal should be actually delivered, otherwise
911  * it should be dropped.
912  */
913 static bool prepare_signal(int sig, struct task_struct *p, bool force)
914 {
915 	struct signal_struct *signal = p->signal;
916 	struct task_struct *t;
917 	sigset_t flush;
918 
919 	if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
920 		if (!(signal->flags & SIGNAL_GROUP_EXIT))
921 			return sig == SIGKILL;
922 		/*
923 		 * The process is in the middle of dying, nothing to do.
924 		 */
925 	} else if (sig_kernel_stop(sig)) {
926 		/*
927 		 * This is a stop signal.  Remove SIGCONT from all queues.
928 		 */
929 		siginitset(&flush, sigmask(SIGCONT));
930 		flush_sigqueue_mask(&flush, &signal->shared_pending);
931 		for_each_thread(p, t)
932 			flush_sigqueue_mask(&flush, &t->pending);
933 	} else if (sig == SIGCONT) {
934 		unsigned int why;
935 		/*
936 		 * Remove all stop signals from all queues, wake all threads.
937 		 */
938 		siginitset(&flush, SIG_KERNEL_STOP_MASK);
939 		flush_sigqueue_mask(&flush, &signal->shared_pending);
940 		for_each_thread(p, t) {
941 			flush_sigqueue_mask(&flush, &t->pending);
942 			task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
943 			if (likely(!(t->ptrace & PT_SEIZED)))
944 				wake_up_state(t, __TASK_STOPPED);
945 			else
946 				ptrace_trap_notify(t);
947 		}
948 
949 		/*
950 		 * Notify the parent with CLD_CONTINUED if we were stopped.
951 		 *
952 		 * If we were in the middle of a group stop, we pretend it
953 		 * was already finished, and then continued. Since SIGCHLD
954 		 * doesn't queue we report only CLD_STOPPED, as if the next
955 		 * CLD_CONTINUED was dropped.
956 		 */
957 		why = 0;
958 		if (signal->flags & SIGNAL_STOP_STOPPED)
959 			why |= SIGNAL_CLD_CONTINUED;
960 		else if (signal->group_stop_count)
961 			why |= SIGNAL_CLD_STOPPED;
962 
963 		if (why) {
964 			/*
965 			 * The first thread which returns from do_signal_stop()
966 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
967 			 * notify its parent. See get_signal().
968 			 */
969 			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
970 			signal->group_stop_count = 0;
971 			signal->group_exit_code = 0;
972 		}
973 	}
974 
975 	return !sig_ignored(p, sig, force);
976 }
977 
978 /*
979  * Test if P wants to take SIG.  After we've checked all threads with this,
980  * it's equivalent to finding no threads not blocking SIG.  Any threads not
981  * blocking SIG were ruled out because they are not running and already
982  * have pending signals.  Such threads will dequeue from the shared queue
983  * as soon as they're available, so putting the signal on the shared queue
984  * will be equivalent to sending it to one such thread.
985  */
986 static inline bool wants_signal(int sig, struct task_struct *p)
987 {
988 	if (sigismember(&p->blocked, sig))
989 		return false;
990 
991 	if (p->flags & PF_EXITING)
992 		return false;
993 
994 	if (sig == SIGKILL)
995 		return true;
996 
997 	if (task_is_stopped_or_traced(p))
998 		return false;
999 
1000 	return task_curr(p) || !task_sigpending(p);
1001 }
1002 
1003 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1004 {
1005 	struct signal_struct *signal = p->signal;
1006 	struct task_struct *t;
1007 
1008 	/*
1009 	 * Now find a thread we can wake up to take the signal off the queue.
1010 	 *
1011 	 * If the main thread wants the signal, it gets first crack.
1012 	 * Probably the least surprising to the average bear.
1013 	 */
1014 	if (wants_signal(sig, p))
1015 		t = p;
1016 	else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1017 		/*
1018 		 * There is just one thread and it does not need to be woken.
1019 		 * It will dequeue unblocked signals before it runs again.
1020 		 */
1021 		return;
1022 	else {
1023 		/*
1024 		 * Otherwise try to find a suitable thread.
1025 		 */
1026 		t = signal->curr_target;
1027 		while (!wants_signal(sig, t)) {
1028 			t = next_thread(t);
1029 			if (t == signal->curr_target)
1030 				/*
1031 				 * No thread needs to be woken.
1032 				 * Any eligible threads will see
1033 				 * the signal in the queue soon.
1034 				 */
1035 				return;
1036 		}
1037 		signal->curr_target = t;
1038 	}
1039 
1040 	/*
1041 	 * Found a killable thread.  If the signal will be fatal,
1042 	 * then start taking the whole group down immediately.
1043 	 */
1044 	if (sig_fatal(p, sig) &&
1045 	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
1046 	    !sigismember(&t->real_blocked, sig) &&
1047 	    (sig == SIGKILL || !p->ptrace)) {
1048 		/*
1049 		 * This signal will be fatal to the whole group.
1050 		 */
1051 		if (!sig_kernel_coredump(sig)) {
1052 			/*
1053 			 * Start a group exit and wake everybody up.
1054 			 * This way we don't have other threads
1055 			 * running and doing things after a slower
1056 			 * thread has the fatal signal pending.
1057 			 */
1058 			signal->flags = SIGNAL_GROUP_EXIT;
1059 			signal->group_exit_code = sig;
1060 			signal->group_stop_count = 0;
1061 			t = p;
1062 			do {
1063 				task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1064 				sigaddset(&t->pending.signal, SIGKILL);
1065 				signal_wake_up(t, 1);
1066 			} while_each_thread(p, t);
1067 			return;
1068 		}
1069 	}
1070 
1071 	/*
1072 	 * The signal is already in the shared-pending queue.
1073 	 * Tell the chosen thread to wake up and dequeue it.
1074 	 */
1075 	signal_wake_up(t, sig == SIGKILL);
1076 	return;
1077 }
1078 
1079 static inline bool legacy_queue(struct sigpending *signals, int sig)
1080 {
1081 	return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1082 }
1083 
1084 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1085 			enum pid_type type, bool force)
1086 {
1087 	struct sigpending *pending;
1088 	struct sigqueue *q;
1089 	int override_rlimit;
1090 	int ret = 0, result;
1091 
1092 	assert_spin_locked(&t->sighand->siglock);
1093 
1094 	result = TRACE_SIGNAL_IGNORED;
1095 	if (!prepare_signal(sig, t, force))
1096 		goto ret;
1097 
1098 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1099 	/*
1100 	 * Short-circuit ignored signals and support queuing
1101 	 * exactly one non-rt signal, so that we can get more
1102 	 * detailed information about the cause of the signal.
1103 	 */
1104 	result = TRACE_SIGNAL_ALREADY_PENDING;
1105 	if (legacy_queue(pending, sig))
1106 		goto ret;
1107 
1108 	result = TRACE_SIGNAL_DELIVERED;
1109 	/*
1110 	 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1111 	 */
1112 	if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1113 		goto out_set;
1114 
1115 	/*
1116 	 * Real-time signals must be queued if sent by sigqueue, or
1117 	 * some other real-time mechanism.  It is implementation
1118 	 * defined whether kill() does so.  We attempt to do so, on
1119 	 * the principle of least surprise, but since kill is not
1120 	 * allowed to fail with EAGAIN when low on memory we just
1121 	 * make sure at least one signal gets delivered and don't
1122 	 * pass on the info struct.
1123 	 */
1124 	if (sig < SIGRTMIN)
1125 		override_rlimit = (is_si_special(info) || info->si_code >= 0);
1126 	else
1127 		override_rlimit = 0;
1128 
1129 	q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1130 
1131 	if (q) {
1132 		list_add_tail(&q->list, &pending->list);
1133 		switch ((unsigned long) info) {
1134 		case (unsigned long) SEND_SIG_NOINFO:
1135 			clear_siginfo(&q->info);
1136 			q->info.si_signo = sig;
1137 			q->info.si_errno = 0;
1138 			q->info.si_code = SI_USER;
1139 			q->info.si_pid = task_tgid_nr_ns(current,
1140 							task_active_pid_ns(t));
1141 			rcu_read_lock();
1142 			q->info.si_uid =
1143 				from_kuid_munged(task_cred_xxx(t, user_ns),
1144 						 current_uid());
1145 			rcu_read_unlock();
1146 			break;
1147 		case (unsigned long) SEND_SIG_PRIV:
1148 			clear_siginfo(&q->info);
1149 			q->info.si_signo = sig;
1150 			q->info.si_errno = 0;
1151 			q->info.si_code = SI_KERNEL;
1152 			q->info.si_pid = 0;
1153 			q->info.si_uid = 0;
1154 			break;
1155 		default:
1156 			copy_siginfo(&q->info, info);
1157 			break;
1158 		}
1159 	} else if (!is_si_special(info) &&
1160 		   sig >= SIGRTMIN && info->si_code != SI_USER) {
1161 		/*
1162 		 * Queue overflow, abort.  We may abort if the
1163 		 * signal was rt and sent by user using something
1164 		 * other than kill().
1165 		 */
1166 		result = TRACE_SIGNAL_OVERFLOW_FAIL;
1167 		ret = -EAGAIN;
1168 		goto ret;
1169 	} else {
1170 		/*
1171 		 * This is a silent loss of information.  We still
1172 		 * send the signal, but the *info bits are lost.
1173 		 */
1174 		result = TRACE_SIGNAL_LOSE_INFO;
1175 	}
1176 
1177 out_set:
1178 	signalfd_notify(t, sig);
1179 	sigaddset(&pending->signal, sig);
1180 
1181 	/* Let multiprocess signals appear after on-going forks */
1182 	if (type > PIDTYPE_TGID) {
1183 		struct multiprocess_signals *delayed;
1184 		hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1185 			sigset_t *signal = &delayed->signal;
1186 			/* Can't queue both a stop and a continue signal */
1187 			if (sig == SIGCONT)
1188 				sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1189 			else if (sig_kernel_stop(sig))
1190 				sigdelset(signal, SIGCONT);
1191 			sigaddset(signal, sig);
1192 		}
1193 	}
1194 
1195 	complete_signal(sig, t, type);
1196 ret:
1197 	trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1198 	return ret;
1199 }
1200 
1201 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1202 {
1203 	bool ret = false;
1204 	switch (siginfo_layout(info->si_signo, info->si_code)) {
1205 	case SIL_KILL:
1206 	case SIL_CHLD:
1207 	case SIL_RT:
1208 		ret = true;
1209 		break;
1210 	case SIL_TIMER:
1211 	case SIL_POLL:
1212 	case SIL_FAULT:
1213 	case SIL_FAULT_TRAPNO:
1214 	case SIL_FAULT_MCEERR:
1215 	case SIL_FAULT_BNDERR:
1216 	case SIL_FAULT_PKUERR:
1217 	case SIL_FAULT_PERF_EVENT:
1218 	case SIL_SYS:
1219 		ret = false;
1220 		break;
1221 	}
1222 	return ret;
1223 }
1224 
1225 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1226 			enum pid_type type)
1227 {
1228 	/* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1229 	bool force = false;
1230 
1231 	if (info == SEND_SIG_NOINFO) {
1232 		/* Force if sent from an ancestor pid namespace */
1233 		force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1234 	} else if (info == SEND_SIG_PRIV) {
1235 		/* Don't ignore kernel generated signals */
1236 		force = true;
1237 	} else if (has_si_pid_and_uid(info)) {
1238 		/* SIGKILL and SIGSTOP is special or has ids */
1239 		struct user_namespace *t_user_ns;
1240 
1241 		rcu_read_lock();
1242 		t_user_ns = task_cred_xxx(t, user_ns);
1243 		if (current_user_ns() != t_user_ns) {
1244 			kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1245 			info->si_uid = from_kuid_munged(t_user_ns, uid);
1246 		}
1247 		rcu_read_unlock();
1248 
1249 		/* A kernel generated signal? */
1250 		force = (info->si_code == SI_KERNEL);
1251 
1252 		/* From an ancestor pid namespace? */
1253 		if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1254 			info->si_pid = 0;
1255 			force = true;
1256 		}
1257 	}
1258 	return __send_signal(sig, info, t, type, force);
1259 }
1260 
1261 static void print_fatal_signal(int signr)
1262 {
1263 	struct pt_regs *regs = signal_pt_regs();
1264 	pr_info("potentially unexpected fatal signal %d.\n", signr);
1265 
1266 #if defined(__i386__) && !defined(__arch_um__)
1267 	pr_info("code at %08lx: ", regs->ip);
1268 	{
1269 		int i;
1270 		for (i = 0; i < 16; i++) {
1271 			unsigned char insn;
1272 
1273 			if (get_user(insn, (unsigned char *)(regs->ip + i)))
1274 				break;
1275 			pr_cont("%02x ", insn);
1276 		}
1277 	}
1278 	pr_cont("\n");
1279 #endif
1280 	preempt_disable();
1281 	show_regs(regs);
1282 	preempt_enable();
1283 }
1284 
1285 static int __init setup_print_fatal_signals(char *str)
1286 {
1287 	get_option (&str, &print_fatal_signals);
1288 
1289 	return 1;
1290 }
1291 
1292 __setup("print-fatal-signals=", setup_print_fatal_signals);
1293 
1294 int
1295 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1296 {
1297 	return send_signal(sig, info, p, PIDTYPE_TGID);
1298 }
1299 
1300 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1301 			enum pid_type type)
1302 {
1303 	unsigned long flags;
1304 	int ret = -ESRCH;
1305 
1306 	if (lock_task_sighand(p, &flags)) {
1307 		ret = send_signal(sig, info, p, type);
1308 		unlock_task_sighand(p, &flags);
1309 	}
1310 
1311 	return ret;
1312 }
1313 
1314 /*
1315  * Force a signal that the process can't ignore: if necessary
1316  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1317  *
1318  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1319  * since we do not want to have a signal handler that was blocked
1320  * be invoked when user space had explicitly blocked it.
1321  *
1322  * We don't want to have recursive SIGSEGV's etc, for example,
1323  * that is why we also clear SIGNAL_UNKILLABLE.
1324  */
1325 static int
1326 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool sigdfl)
1327 {
1328 	unsigned long int flags;
1329 	int ret, blocked, ignored;
1330 	struct k_sigaction *action;
1331 	int sig = info->si_signo;
1332 
1333 	spin_lock_irqsave(&t->sighand->siglock, flags);
1334 	action = &t->sighand->action[sig-1];
1335 	ignored = action->sa.sa_handler == SIG_IGN;
1336 	blocked = sigismember(&t->blocked, sig);
1337 	if (blocked || ignored || sigdfl) {
1338 		action->sa.sa_handler = SIG_DFL;
1339 		if (blocked) {
1340 			sigdelset(&t->blocked, sig);
1341 			recalc_sigpending_and_wake(t);
1342 		}
1343 	}
1344 	/*
1345 	 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1346 	 * debugging to leave init killable.
1347 	 */
1348 	if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1349 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
1350 	ret = send_signal(sig, info, t, PIDTYPE_PID);
1351 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
1352 
1353 	return ret;
1354 }
1355 
1356 int force_sig_info(struct kernel_siginfo *info)
1357 {
1358 	return force_sig_info_to_task(info, current, false);
1359 }
1360 
1361 /*
1362  * Nuke all other threads in the group.
1363  */
1364 int zap_other_threads(struct task_struct *p)
1365 {
1366 	struct task_struct *t = p;
1367 	int count = 0;
1368 
1369 	p->signal->group_stop_count = 0;
1370 
1371 	while_each_thread(p, t) {
1372 		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1373 		count++;
1374 
1375 		/* Don't bother with already dead threads */
1376 		if (t->exit_state)
1377 			continue;
1378 		sigaddset(&t->pending.signal, SIGKILL);
1379 		signal_wake_up(t, 1);
1380 	}
1381 
1382 	return count;
1383 }
1384 
1385 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1386 					   unsigned long *flags)
1387 {
1388 	struct sighand_struct *sighand;
1389 
1390 	rcu_read_lock();
1391 	for (;;) {
1392 		sighand = rcu_dereference(tsk->sighand);
1393 		if (unlikely(sighand == NULL))
1394 			break;
1395 
1396 		/*
1397 		 * This sighand can be already freed and even reused, but
1398 		 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1399 		 * initializes ->siglock: this slab can't go away, it has
1400 		 * the same object type, ->siglock can't be reinitialized.
1401 		 *
1402 		 * We need to ensure that tsk->sighand is still the same
1403 		 * after we take the lock, we can race with de_thread() or
1404 		 * __exit_signal(). In the latter case the next iteration
1405 		 * must see ->sighand == NULL.
1406 		 */
1407 		spin_lock_irqsave(&sighand->siglock, *flags);
1408 		if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1409 			break;
1410 		spin_unlock_irqrestore(&sighand->siglock, *flags);
1411 	}
1412 	rcu_read_unlock();
1413 
1414 	return sighand;
1415 }
1416 
1417 #ifdef CONFIG_LOCKDEP
1418 void lockdep_assert_task_sighand_held(struct task_struct *task)
1419 {
1420 	struct sighand_struct *sighand;
1421 
1422 	rcu_read_lock();
1423 	sighand = rcu_dereference(task->sighand);
1424 	if (sighand)
1425 		lockdep_assert_held(&sighand->siglock);
1426 	else
1427 		WARN_ON_ONCE(1);
1428 	rcu_read_unlock();
1429 }
1430 #endif
1431 
1432 /*
1433  * send signal info to all the members of a group
1434  */
1435 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1436 			struct task_struct *p, enum pid_type type)
1437 {
1438 	int ret;
1439 
1440 	rcu_read_lock();
1441 	ret = check_kill_permission(sig, info, p);
1442 	rcu_read_unlock();
1443 
1444 	if (!ret && sig)
1445 		ret = do_send_sig_info(sig, info, p, type);
1446 
1447 	return ret;
1448 }
1449 
1450 /*
1451  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1452  * control characters do (^C, ^Z etc)
1453  * - the caller must hold at least a readlock on tasklist_lock
1454  */
1455 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1456 {
1457 	struct task_struct *p = NULL;
1458 	int retval, success;
1459 
1460 	success = 0;
1461 	retval = -ESRCH;
1462 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1463 		int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1464 		success |= !err;
1465 		retval = err;
1466 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1467 	return success ? 0 : retval;
1468 }
1469 
1470 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1471 {
1472 	int error = -ESRCH;
1473 	struct task_struct *p;
1474 
1475 	for (;;) {
1476 		rcu_read_lock();
1477 		p = pid_task(pid, PIDTYPE_PID);
1478 		if (p)
1479 			error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1480 		rcu_read_unlock();
1481 		if (likely(!p || error != -ESRCH))
1482 			return error;
1483 
1484 		/*
1485 		 * The task was unhashed in between, try again.  If it
1486 		 * is dead, pid_task() will return NULL, if we race with
1487 		 * de_thread() it will find the new leader.
1488 		 */
1489 	}
1490 }
1491 
1492 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1493 {
1494 	int error;
1495 	rcu_read_lock();
1496 	error = kill_pid_info(sig, info, find_vpid(pid));
1497 	rcu_read_unlock();
1498 	return error;
1499 }
1500 
1501 static inline bool kill_as_cred_perm(const struct cred *cred,
1502 				     struct task_struct *target)
1503 {
1504 	const struct cred *pcred = __task_cred(target);
1505 
1506 	return uid_eq(cred->euid, pcred->suid) ||
1507 	       uid_eq(cred->euid, pcred->uid) ||
1508 	       uid_eq(cred->uid, pcred->suid) ||
1509 	       uid_eq(cred->uid, pcred->uid);
1510 }
1511 
1512 /*
1513  * The usb asyncio usage of siginfo is wrong.  The glibc support
1514  * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1515  * AKA after the generic fields:
1516  *	kernel_pid_t	si_pid;
1517  *	kernel_uid32_t	si_uid;
1518  *	sigval_t	si_value;
1519  *
1520  * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1521  * after the generic fields is:
1522  *	void __user 	*si_addr;
1523  *
1524  * This is a practical problem when there is a 64bit big endian kernel
1525  * and a 32bit userspace.  As the 32bit address will encoded in the low
1526  * 32bits of the pointer.  Those low 32bits will be stored at higher
1527  * address than appear in a 32 bit pointer.  So userspace will not
1528  * see the address it was expecting for it's completions.
1529  *
1530  * There is nothing in the encoding that can allow
1531  * copy_siginfo_to_user32 to detect this confusion of formats, so
1532  * handle this by requiring the caller of kill_pid_usb_asyncio to
1533  * notice when this situration takes place and to store the 32bit
1534  * pointer in sival_int, instead of sival_addr of the sigval_t addr
1535  * parameter.
1536  */
1537 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1538 			 struct pid *pid, const struct cred *cred)
1539 {
1540 	struct kernel_siginfo info;
1541 	struct task_struct *p;
1542 	unsigned long flags;
1543 	int ret = -EINVAL;
1544 
1545 	if (!valid_signal(sig))
1546 		return ret;
1547 
1548 	clear_siginfo(&info);
1549 	info.si_signo = sig;
1550 	info.si_errno = errno;
1551 	info.si_code = SI_ASYNCIO;
1552 	*((sigval_t *)&info.si_pid) = addr;
1553 
1554 	rcu_read_lock();
1555 	p = pid_task(pid, PIDTYPE_PID);
1556 	if (!p) {
1557 		ret = -ESRCH;
1558 		goto out_unlock;
1559 	}
1560 	if (!kill_as_cred_perm(cred, p)) {
1561 		ret = -EPERM;
1562 		goto out_unlock;
1563 	}
1564 	ret = security_task_kill(p, &info, sig, cred);
1565 	if (ret)
1566 		goto out_unlock;
1567 
1568 	if (sig) {
1569 		if (lock_task_sighand(p, &flags)) {
1570 			ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1571 			unlock_task_sighand(p, &flags);
1572 		} else
1573 			ret = -ESRCH;
1574 	}
1575 out_unlock:
1576 	rcu_read_unlock();
1577 	return ret;
1578 }
1579 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1580 
1581 /*
1582  * kill_something_info() interprets pid in interesting ways just like kill(2).
1583  *
1584  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1585  * is probably wrong.  Should make it like BSD or SYSV.
1586  */
1587 
1588 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1589 {
1590 	int ret;
1591 
1592 	if (pid > 0)
1593 		return kill_proc_info(sig, info, pid);
1594 
1595 	/* -INT_MIN is undefined.  Exclude this case to avoid a UBSAN warning */
1596 	if (pid == INT_MIN)
1597 		return -ESRCH;
1598 
1599 	read_lock(&tasklist_lock);
1600 	if (pid != -1) {
1601 		ret = __kill_pgrp_info(sig, info,
1602 				pid ? find_vpid(-pid) : task_pgrp(current));
1603 	} else {
1604 		int retval = 0, count = 0;
1605 		struct task_struct * p;
1606 
1607 		for_each_process(p) {
1608 			if (task_pid_vnr(p) > 1 &&
1609 					!same_thread_group(p, current)) {
1610 				int err = group_send_sig_info(sig, info, p,
1611 							      PIDTYPE_MAX);
1612 				++count;
1613 				if (err != -EPERM)
1614 					retval = err;
1615 			}
1616 		}
1617 		ret = count ? retval : -ESRCH;
1618 	}
1619 	read_unlock(&tasklist_lock);
1620 
1621 	return ret;
1622 }
1623 
1624 /*
1625  * These are for backward compatibility with the rest of the kernel source.
1626  */
1627 
1628 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1629 {
1630 	/*
1631 	 * Make sure legacy kernel users don't send in bad values
1632 	 * (normal paths check this in check_kill_permission).
1633 	 */
1634 	if (!valid_signal(sig))
1635 		return -EINVAL;
1636 
1637 	return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1638 }
1639 EXPORT_SYMBOL(send_sig_info);
1640 
1641 #define __si_special(priv) \
1642 	((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1643 
1644 int
1645 send_sig(int sig, struct task_struct *p, int priv)
1646 {
1647 	return send_sig_info(sig, __si_special(priv), p);
1648 }
1649 EXPORT_SYMBOL(send_sig);
1650 
1651 void force_sig(int sig)
1652 {
1653 	struct kernel_siginfo info;
1654 
1655 	clear_siginfo(&info);
1656 	info.si_signo = sig;
1657 	info.si_errno = 0;
1658 	info.si_code = SI_KERNEL;
1659 	info.si_pid = 0;
1660 	info.si_uid = 0;
1661 	force_sig_info(&info);
1662 }
1663 EXPORT_SYMBOL(force_sig);
1664 
1665 /*
1666  * When things go south during signal handling, we
1667  * will force a SIGSEGV. And if the signal that caused
1668  * the problem was already a SIGSEGV, we'll want to
1669  * make sure we don't even try to deliver the signal..
1670  */
1671 void force_sigsegv(int sig)
1672 {
1673 	struct task_struct *p = current;
1674 
1675 	if (sig == SIGSEGV) {
1676 		unsigned long flags;
1677 		spin_lock_irqsave(&p->sighand->siglock, flags);
1678 		p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1679 		spin_unlock_irqrestore(&p->sighand->siglock, flags);
1680 	}
1681 	force_sig(SIGSEGV);
1682 }
1683 
1684 int force_sig_fault_to_task(int sig, int code, void __user *addr
1685 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1686 	, struct task_struct *t)
1687 {
1688 	struct kernel_siginfo info;
1689 
1690 	clear_siginfo(&info);
1691 	info.si_signo = sig;
1692 	info.si_errno = 0;
1693 	info.si_code  = code;
1694 	info.si_addr  = addr;
1695 #ifdef __ia64__
1696 	info.si_imm = imm;
1697 	info.si_flags = flags;
1698 	info.si_isr = isr;
1699 #endif
1700 	return force_sig_info_to_task(&info, t, false);
1701 }
1702 
1703 int force_sig_fault(int sig, int code, void __user *addr
1704 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1705 {
1706 	return force_sig_fault_to_task(sig, code, addr
1707 				       ___ARCH_SI_IA64(imm, flags, isr), current);
1708 }
1709 
1710 int send_sig_fault(int sig, int code, void __user *addr
1711 	___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1712 	, struct task_struct *t)
1713 {
1714 	struct kernel_siginfo info;
1715 
1716 	clear_siginfo(&info);
1717 	info.si_signo = sig;
1718 	info.si_errno = 0;
1719 	info.si_code  = code;
1720 	info.si_addr  = addr;
1721 #ifdef __ia64__
1722 	info.si_imm = imm;
1723 	info.si_flags = flags;
1724 	info.si_isr = isr;
1725 #endif
1726 	return send_sig_info(info.si_signo, &info, t);
1727 }
1728 
1729 int force_sig_mceerr(int code, void __user *addr, short lsb)
1730 {
1731 	struct kernel_siginfo info;
1732 
1733 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1734 	clear_siginfo(&info);
1735 	info.si_signo = SIGBUS;
1736 	info.si_errno = 0;
1737 	info.si_code = code;
1738 	info.si_addr = addr;
1739 	info.si_addr_lsb = lsb;
1740 	return force_sig_info(&info);
1741 }
1742 
1743 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1744 {
1745 	struct kernel_siginfo info;
1746 
1747 	WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1748 	clear_siginfo(&info);
1749 	info.si_signo = SIGBUS;
1750 	info.si_errno = 0;
1751 	info.si_code = code;
1752 	info.si_addr = addr;
1753 	info.si_addr_lsb = lsb;
1754 	return send_sig_info(info.si_signo, &info, t);
1755 }
1756 EXPORT_SYMBOL(send_sig_mceerr);
1757 
1758 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1759 {
1760 	struct kernel_siginfo info;
1761 
1762 	clear_siginfo(&info);
1763 	info.si_signo = SIGSEGV;
1764 	info.si_errno = 0;
1765 	info.si_code  = SEGV_BNDERR;
1766 	info.si_addr  = addr;
1767 	info.si_lower = lower;
1768 	info.si_upper = upper;
1769 	return force_sig_info(&info);
1770 }
1771 
1772 #ifdef SEGV_PKUERR
1773 int force_sig_pkuerr(void __user *addr, u32 pkey)
1774 {
1775 	struct kernel_siginfo info;
1776 
1777 	clear_siginfo(&info);
1778 	info.si_signo = SIGSEGV;
1779 	info.si_errno = 0;
1780 	info.si_code  = SEGV_PKUERR;
1781 	info.si_addr  = addr;
1782 	info.si_pkey  = pkey;
1783 	return force_sig_info(&info);
1784 }
1785 #endif
1786 
1787 int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1788 {
1789 	struct kernel_siginfo info;
1790 
1791 	clear_siginfo(&info);
1792 	info.si_signo     = SIGTRAP;
1793 	info.si_errno     = 0;
1794 	info.si_code      = TRAP_PERF;
1795 	info.si_addr      = addr;
1796 	info.si_perf_data = sig_data;
1797 	info.si_perf_type = type;
1798 
1799 	return force_sig_info(&info);
1800 }
1801 
1802 /**
1803  * force_sig_seccomp - signals the task to allow in-process syscall emulation
1804  * @syscall: syscall number to send to userland
1805  * @reason: filter-supplied reason code to send to userland (via si_errno)
1806  *
1807  * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1808  */
1809 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1810 {
1811 	struct kernel_siginfo info;
1812 
1813 	clear_siginfo(&info);
1814 	info.si_signo = SIGSYS;
1815 	info.si_code = SYS_SECCOMP;
1816 	info.si_call_addr = (void __user *)KSTK_EIP(current);
1817 	info.si_errno = reason;
1818 	info.si_arch = syscall_get_arch(current);
1819 	info.si_syscall = syscall;
1820 	return force_sig_info_to_task(&info, current, force_coredump);
1821 }
1822 
1823 /* For the crazy architectures that include trap information in
1824  * the errno field, instead of an actual errno value.
1825  */
1826 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1827 {
1828 	struct kernel_siginfo info;
1829 
1830 	clear_siginfo(&info);
1831 	info.si_signo = SIGTRAP;
1832 	info.si_errno = errno;
1833 	info.si_code  = TRAP_HWBKPT;
1834 	info.si_addr  = addr;
1835 	return force_sig_info(&info);
1836 }
1837 
1838 /* For the rare architectures that include trap information using
1839  * si_trapno.
1840  */
1841 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1842 {
1843 	struct kernel_siginfo info;
1844 
1845 	clear_siginfo(&info);
1846 	info.si_signo = sig;
1847 	info.si_errno = 0;
1848 	info.si_code  = code;
1849 	info.si_addr  = addr;
1850 	info.si_trapno = trapno;
1851 	return force_sig_info(&info);
1852 }
1853 
1854 /* For the rare architectures that include trap information using
1855  * si_trapno.
1856  */
1857 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1858 			  struct task_struct *t)
1859 {
1860 	struct kernel_siginfo info;
1861 
1862 	clear_siginfo(&info);
1863 	info.si_signo = sig;
1864 	info.si_errno = 0;
1865 	info.si_code  = code;
1866 	info.si_addr  = addr;
1867 	info.si_trapno = trapno;
1868 	return send_sig_info(info.si_signo, &info, t);
1869 }
1870 
1871 int kill_pgrp(struct pid *pid, int sig, int priv)
1872 {
1873 	int ret;
1874 
1875 	read_lock(&tasklist_lock);
1876 	ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1877 	read_unlock(&tasklist_lock);
1878 
1879 	return ret;
1880 }
1881 EXPORT_SYMBOL(kill_pgrp);
1882 
1883 int kill_pid(struct pid *pid, int sig, int priv)
1884 {
1885 	return kill_pid_info(sig, __si_special(priv), pid);
1886 }
1887 EXPORT_SYMBOL(kill_pid);
1888 
1889 /*
1890  * These functions support sending signals using preallocated sigqueue
1891  * structures.  This is needed "because realtime applications cannot
1892  * afford to lose notifications of asynchronous events, like timer
1893  * expirations or I/O completions".  In the case of POSIX Timers
1894  * we allocate the sigqueue structure from the timer_create.  If this
1895  * allocation fails we are able to report the failure to the application
1896  * with an EAGAIN error.
1897  */
1898 struct sigqueue *sigqueue_alloc(void)
1899 {
1900 	return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1901 }
1902 
1903 void sigqueue_free(struct sigqueue *q)
1904 {
1905 	unsigned long flags;
1906 	spinlock_t *lock = &current->sighand->siglock;
1907 
1908 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1909 	/*
1910 	 * We must hold ->siglock while testing q->list
1911 	 * to serialize with collect_signal() or with
1912 	 * __exit_signal()->flush_sigqueue().
1913 	 */
1914 	spin_lock_irqsave(lock, flags);
1915 	q->flags &= ~SIGQUEUE_PREALLOC;
1916 	/*
1917 	 * If it is queued it will be freed when dequeued,
1918 	 * like the "regular" sigqueue.
1919 	 */
1920 	if (!list_empty(&q->list))
1921 		q = NULL;
1922 	spin_unlock_irqrestore(lock, flags);
1923 
1924 	if (q)
1925 		__sigqueue_free(q);
1926 }
1927 
1928 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1929 {
1930 	int sig = q->info.si_signo;
1931 	struct sigpending *pending;
1932 	struct task_struct *t;
1933 	unsigned long flags;
1934 	int ret, result;
1935 
1936 	BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1937 
1938 	ret = -1;
1939 	rcu_read_lock();
1940 	t = pid_task(pid, type);
1941 	if (!t || !likely(lock_task_sighand(t, &flags)))
1942 		goto ret;
1943 
1944 	ret = 1; /* the signal is ignored */
1945 	result = TRACE_SIGNAL_IGNORED;
1946 	if (!prepare_signal(sig, t, false))
1947 		goto out;
1948 
1949 	ret = 0;
1950 	if (unlikely(!list_empty(&q->list))) {
1951 		/*
1952 		 * If an SI_TIMER entry is already queue just increment
1953 		 * the overrun count.
1954 		 */
1955 		BUG_ON(q->info.si_code != SI_TIMER);
1956 		q->info.si_overrun++;
1957 		result = TRACE_SIGNAL_ALREADY_PENDING;
1958 		goto out;
1959 	}
1960 	q->info.si_overrun = 0;
1961 
1962 	signalfd_notify(t, sig);
1963 	pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1964 	list_add_tail(&q->list, &pending->list);
1965 	sigaddset(&pending->signal, sig);
1966 	complete_signal(sig, t, type);
1967 	result = TRACE_SIGNAL_DELIVERED;
1968 out:
1969 	trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1970 	unlock_task_sighand(t, &flags);
1971 ret:
1972 	rcu_read_unlock();
1973 	return ret;
1974 }
1975 
1976 static void do_notify_pidfd(struct task_struct *task)
1977 {
1978 	struct pid *pid;
1979 
1980 	WARN_ON(task->exit_state == 0);
1981 	pid = task_pid(task);
1982 	wake_up_all(&pid->wait_pidfd);
1983 }
1984 
1985 /*
1986  * Let a parent know about the death of a child.
1987  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1988  *
1989  * Returns true if our parent ignored us and so we've switched to
1990  * self-reaping.
1991  */
1992 bool do_notify_parent(struct task_struct *tsk, int sig)
1993 {
1994 	struct kernel_siginfo info;
1995 	unsigned long flags;
1996 	struct sighand_struct *psig;
1997 	bool autoreap = false;
1998 	u64 utime, stime;
1999 
2000 	BUG_ON(sig == -1);
2001 
2002  	/* do_notify_parent_cldstop should have been called instead.  */
2003  	BUG_ON(task_is_stopped_or_traced(tsk));
2004 
2005 	BUG_ON(!tsk->ptrace &&
2006 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2007 
2008 	/* Wake up all pidfd waiters */
2009 	do_notify_pidfd(tsk);
2010 
2011 	if (sig != SIGCHLD) {
2012 		/*
2013 		 * This is only possible if parent == real_parent.
2014 		 * Check if it has changed security domain.
2015 		 */
2016 		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2017 			sig = SIGCHLD;
2018 	}
2019 
2020 	clear_siginfo(&info);
2021 	info.si_signo = sig;
2022 	info.si_errno = 0;
2023 	/*
2024 	 * We are under tasklist_lock here so our parent is tied to
2025 	 * us and cannot change.
2026 	 *
2027 	 * task_active_pid_ns will always return the same pid namespace
2028 	 * until a task passes through release_task.
2029 	 *
2030 	 * write_lock() currently calls preempt_disable() which is the
2031 	 * same as rcu_read_lock(), but according to Oleg, this is not
2032 	 * correct to rely on this
2033 	 */
2034 	rcu_read_lock();
2035 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2036 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2037 				       task_uid(tsk));
2038 	rcu_read_unlock();
2039 
2040 	task_cputime(tsk, &utime, &stime);
2041 	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2042 	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2043 
2044 	info.si_status = tsk->exit_code & 0x7f;
2045 	if (tsk->exit_code & 0x80)
2046 		info.si_code = CLD_DUMPED;
2047 	else if (tsk->exit_code & 0x7f)
2048 		info.si_code = CLD_KILLED;
2049 	else {
2050 		info.si_code = CLD_EXITED;
2051 		info.si_status = tsk->exit_code >> 8;
2052 	}
2053 
2054 	psig = tsk->parent->sighand;
2055 	spin_lock_irqsave(&psig->siglock, flags);
2056 	if (!tsk->ptrace && sig == SIGCHLD &&
2057 	    (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2058 	     (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2059 		/*
2060 		 * We are exiting and our parent doesn't care.  POSIX.1
2061 		 * defines special semantics for setting SIGCHLD to SIG_IGN
2062 		 * or setting the SA_NOCLDWAIT flag: we should be reaped
2063 		 * automatically and not left for our parent's wait4 call.
2064 		 * Rather than having the parent do it as a magic kind of
2065 		 * signal handler, we just set this to tell do_exit that we
2066 		 * can be cleaned up without becoming a zombie.  Note that
2067 		 * we still call __wake_up_parent in this case, because a
2068 		 * blocked sys_wait4 might now return -ECHILD.
2069 		 *
2070 		 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2071 		 * is implementation-defined: we do (if you don't want
2072 		 * it, just use SIG_IGN instead).
2073 		 */
2074 		autoreap = true;
2075 		if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2076 			sig = 0;
2077 	}
2078 	/*
2079 	 * Send with __send_signal as si_pid and si_uid are in the
2080 	 * parent's namespaces.
2081 	 */
2082 	if (valid_signal(sig) && sig)
2083 		__send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2084 	__wake_up_parent(tsk, tsk->parent);
2085 	spin_unlock_irqrestore(&psig->siglock, flags);
2086 
2087 	return autoreap;
2088 }
2089 
2090 /**
2091  * do_notify_parent_cldstop - notify parent of stopped/continued state change
2092  * @tsk: task reporting the state change
2093  * @for_ptracer: the notification is for ptracer
2094  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2095  *
2096  * Notify @tsk's parent that the stopped/continued state has changed.  If
2097  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2098  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2099  *
2100  * CONTEXT:
2101  * Must be called with tasklist_lock at least read locked.
2102  */
2103 static void do_notify_parent_cldstop(struct task_struct *tsk,
2104 				     bool for_ptracer, int why)
2105 {
2106 	struct kernel_siginfo info;
2107 	unsigned long flags;
2108 	struct task_struct *parent;
2109 	struct sighand_struct *sighand;
2110 	u64 utime, stime;
2111 
2112 	if (for_ptracer) {
2113 		parent = tsk->parent;
2114 	} else {
2115 		tsk = tsk->group_leader;
2116 		parent = tsk->real_parent;
2117 	}
2118 
2119 	clear_siginfo(&info);
2120 	info.si_signo = SIGCHLD;
2121 	info.si_errno = 0;
2122 	/*
2123 	 * see comment in do_notify_parent() about the following 4 lines
2124 	 */
2125 	rcu_read_lock();
2126 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2127 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2128 	rcu_read_unlock();
2129 
2130 	task_cputime(tsk, &utime, &stime);
2131 	info.si_utime = nsec_to_clock_t(utime);
2132 	info.si_stime = nsec_to_clock_t(stime);
2133 
2134  	info.si_code = why;
2135  	switch (why) {
2136  	case CLD_CONTINUED:
2137  		info.si_status = SIGCONT;
2138  		break;
2139  	case CLD_STOPPED:
2140  		info.si_status = tsk->signal->group_exit_code & 0x7f;
2141  		break;
2142  	case CLD_TRAPPED:
2143  		info.si_status = tsk->exit_code & 0x7f;
2144  		break;
2145  	default:
2146  		BUG();
2147  	}
2148 
2149 	sighand = parent->sighand;
2150 	spin_lock_irqsave(&sighand->siglock, flags);
2151 	if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2152 	    !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2153 		__group_send_sig_info(SIGCHLD, &info, parent);
2154 	/*
2155 	 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2156 	 */
2157 	__wake_up_parent(tsk, parent);
2158 	spin_unlock_irqrestore(&sighand->siglock, flags);
2159 }
2160 
2161 static inline bool may_ptrace_stop(void)
2162 {
2163 	if (!likely(current->ptrace))
2164 		return false;
2165 	/*
2166 	 * Are we in the middle of do_coredump?
2167 	 * If so and our tracer is also part of the coredump stopping
2168 	 * is a deadlock situation, and pointless because our tracer
2169 	 * is dead so don't allow us to stop.
2170 	 * If SIGKILL was already sent before the caller unlocked
2171 	 * ->siglock we must see ->core_state != NULL. Otherwise it
2172 	 * is safe to enter schedule().
2173 	 *
2174 	 * This is almost outdated, a task with the pending SIGKILL can't
2175 	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2176 	 * after SIGKILL was already dequeued.
2177 	 */
2178 	if (unlikely(current->mm->core_state) &&
2179 	    unlikely(current->mm == current->parent->mm))
2180 		return false;
2181 
2182 	return true;
2183 }
2184 
2185 /*
2186  * Return non-zero if there is a SIGKILL that should be waking us up.
2187  * Called with the siglock held.
2188  */
2189 static bool sigkill_pending(struct task_struct *tsk)
2190 {
2191 	return sigismember(&tsk->pending.signal, SIGKILL) ||
2192 	       sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2193 }
2194 
2195 /*
2196  * This must be called with current->sighand->siglock held.
2197  *
2198  * This should be the path for all ptrace stops.
2199  * We always set current->last_siginfo while stopped here.
2200  * That makes it a way to test a stopped process for
2201  * being ptrace-stopped vs being job-control-stopped.
2202  *
2203  * If we actually decide not to stop at all because the tracer
2204  * is gone, we keep current->exit_code unless clear_code.
2205  */
2206 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2207 	__releases(&current->sighand->siglock)
2208 	__acquires(&current->sighand->siglock)
2209 {
2210 	bool gstop_done = false;
2211 
2212 	if (arch_ptrace_stop_needed(exit_code, info)) {
2213 		/*
2214 		 * The arch code has something special to do before a
2215 		 * ptrace stop.  This is allowed to block, e.g. for faults
2216 		 * on user stack pages.  We can't keep the siglock while
2217 		 * calling arch_ptrace_stop, so we must release it now.
2218 		 * To preserve proper semantics, we must do this before
2219 		 * any signal bookkeeping like checking group_stop_count.
2220 		 * Meanwhile, a SIGKILL could come in before we retake the
2221 		 * siglock.  That must prevent us from sleeping in TASK_TRACED.
2222 		 * So after regaining the lock, we must check for SIGKILL.
2223 		 */
2224 		spin_unlock_irq(&current->sighand->siglock);
2225 		arch_ptrace_stop(exit_code, info);
2226 		spin_lock_irq(&current->sighand->siglock);
2227 		if (sigkill_pending(current))
2228 			return;
2229 	}
2230 
2231 	set_special_state(TASK_TRACED);
2232 
2233 	/*
2234 	 * We're committing to trapping.  TRACED should be visible before
2235 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2236 	 * Also, transition to TRACED and updates to ->jobctl should be
2237 	 * atomic with respect to siglock and should be done after the arch
2238 	 * hook as siglock is released and regrabbed across it.
2239 	 *
2240 	 *     TRACER				    TRACEE
2241 	 *
2242 	 *     ptrace_attach()
2243 	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
2244 	 *     do_wait()
2245 	 *       set_current_state()                smp_wmb();
2246 	 *       ptrace_do_wait()
2247 	 *         wait_task_stopped()
2248 	 *           task_stopped_code()
2249 	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
2250 	 */
2251 	smp_wmb();
2252 
2253 	current->last_siginfo = info;
2254 	current->exit_code = exit_code;
2255 
2256 	/*
2257 	 * If @why is CLD_STOPPED, we're trapping to participate in a group
2258 	 * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
2259 	 * across siglock relocks since INTERRUPT was scheduled, PENDING
2260 	 * could be clear now.  We act as if SIGCONT is received after
2261 	 * TASK_TRACED is entered - ignore it.
2262 	 */
2263 	if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2264 		gstop_done = task_participate_group_stop(current);
2265 
2266 	/* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2267 	task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2268 	if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2269 		task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2270 
2271 	/* entering a trap, clear TRAPPING */
2272 	task_clear_jobctl_trapping(current);
2273 
2274 	spin_unlock_irq(&current->sighand->siglock);
2275 	read_lock(&tasklist_lock);
2276 	if (may_ptrace_stop()) {
2277 		/*
2278 		 * Notify parents of the stop.
2279 		 *
2280 		 * While ptraced, there are two parents - the ptracer and
2281 		 * the real_parent of the group_leader.  The ptracer should
2282 		 * know about every stop while the real parent is only
2283 		 * interested in the completion of group stop.  The states
2284 		 * for the two don't interact with each other.  Notify
2285 		 * separately unless they're gonna be duplicates.
2286 		 */
2287 		do_notify_parent_cldstop(current, true, why);
2288 		if (gstop_done && ptrace_reparented(current))
2289 			do_notify_parent_cldstop(current, false, why);
2290 
2291 		/*
2292 		 * Don't want to allow preemption here, because
2293 		 * sys_ptrace() needs this task to be inactive.
2294 		 *
2295 		 * XXX: implement read_unlock_no_resched().
2296 		 */
2297 		preempt_disable();
2298 		read_unlock(&tasklist_lock);
2299 		cgroup_enter_frozen();
2300 		preempt_enable_no_resched();
2301 		freezable_schedule();
2302 		cgroup_leave_frozen(true);
2303 	} else {
2304 		/*
2305 		 * By the time we got the lock, our tracer went away.
2306 		 * Don't drop the lock yet, another tracer may come.
2307 		 *
2308 		 * If @gstop_done, the ptracer went away between group stop
2309 		 * completion and here.  During detach, it would have set
2310 		 * JOBCTL_STOP_PENDING on us and we'll re-enter
2311 		 * TASK_STOPPED in do_signal_stop() on return, so notifying
2312 		 * the real parent of the group stop completion is enough.
2313 		 */
2314 		if (gstop_done)
2315 			do_notify_parent_cldstop(current, false, why);
2316 
2317 		/* tasklist protects us from ptrace_freeze_traced() */
2318 		__set_current_state(TASK_RUNNING);
2319 		if (clear_code)
2320 			current->exit_code = 0;
2321 		read_unlock(&tasklist_lock);
2322 	}
2323 
2324 	/*
2325 	 * We are back.  Now reacquire the siglock before touching
2326 	 * last_siginfo, so that we are sure to have synchronized with
2327 	 * any signal-sending on another CPU that wants to examine it.
2328 	 */
2329 	spin_lock_irq(&current->sighand->siglock);
2330 	current->last_siginfo = NULL;
2331 
2332 	/* LISTENING can be set only during STOP traps, clear it */
2333 	current->jobctl &= ~JOBCTL_LISTENING;
2334 
2335 	/*
2336 	 * Queued signals ignored us while we were stopped for tracing.
2337 	 * So check for any that we should take before resuming user mode.
2338 	 * This sets TIF_SIGPENDING, but never clears it.
2339 	 */
2340 	recalc_sigpending_tsk(current);
2341 }
2342 
2343 static void ptrace_do_notify(int signr, int exit_code, int why)
2344 {
2345 	kernel_siginfo_t info;
2346 
2347 	clear_siginfo(&info);
2348 	info.si_signo = signr;
2349 	info.si_code = exit_code;
2350 	info.si_pid = task_pid_vnr(current);
2351 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2352 
2353 	/* Let the debugger run.  */
2354 	ptrace_stop(exit_code, why, 1, &info);
2355 }
2356 
2357 void ptrace_notify(int exit_code)
2358 {
2359 	BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2360 	if (unlikely(current->task_works))
2361 		task_work_run();
2362 
2363 	spin_lock_irq(&current->sighand->siglock);
2364 	ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2365 	spin_unlock_irq(&current->sighand->siglock);
2366 }
2367 
2368 /**
2369  * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2370  * @signr: signr causing group stop if initiating
2371  *
2372  * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2373  * and participate in it.  If already set, participate in the existing
2374  * group stop.  If participated in a group stop (and thus slept), %true is
2375  * returned with siglock released.
2376  *
2377  * If ptraced, this function doesn't handle stop itself.  Instead,
2378  * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2379  * untouched.  The caller must ensure that INTERRUPT trap handling takes
2380  * places afterwards.
2381  *
2382  * CONTEXT:
2383  * Must be called with @current->sighand->siglock held, which is released
2384  * on %true return.
2385  *
2386  * RETURNS:
2387  * %false if group stop is already cancelled or ptrace trap is scheduled.
2388  * %true if participated in group stop.
2389  */
2390 static bool do_signal_stop(int signr)
2391 	__releases(&current->sighand->siglock)
2392 {
2393 	struct signal_struct *sig = current->signal;
2394 
2395 	if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2396 		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2397 		struct task_struct *t;
2398 
2399 		/* signr will be recorded in task->jobctl for retries */
2400 		WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2401 
2402 		if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2403 		    unlikely(signal_group_exit(sig)))
2404 			return false;
2405 		/*
2406 		 * There is no group stop already in progress.  We must
2407 		 * initiate one now.
2408 		 *
2409 		 * While ptraced, a task may be resumed while group stop is
2410 		 * still in effect and then receive a stop signal and
2411 		 * initiate another group stop.  This deviates from the
2412 		 * usual behavior as two consecutive stop signals can't
2413 		 * cause two group stops when !ptraced.  That is why we
2414 		 * also check !task_is_stopped(t) below.
2415 		 *
2416 		 * The condition can be distinguished by testing whether
2417 		 * SIGNAL_STOP_STOPPED is already set.  Don't generate
2418 		 * group_exit_code in such case.
2419 		 *
2420 		 * This is not necessary for SIGNAL_STOP_CONTINUED because
2421 		 * an intervening stop signal is required to cause two
2422 		 * continued events regardless of ptrace.
2423 		 */
2424 		if (!(sig->flags & SIGNAL_STOP_STOPPED))
2425 			sig->group_exit_code = signr;
2426 
2427 		sig->group_stop_count = 0;
2428 
2429 		if (task_set_jobctl_pending(current, signr | gstop))
2430 			sig->group_stop_count++;
2431 
2432 		t = current;
2433 		while_each_thread(current, t) {
2434 			/*
2435 			 * Setting state to TASK_STOPPED for a group
2436 			 * stop is always done with the siglock held,
2437 			 * so this check has no races.
2438 			 */
2439 			if (!task_is_stopped(t) &&
2440 			    task_set_jobctl_pending(t, signr | gstop)) {
2441 				sig->group_stop_count++;
2442 				if (likely(!(t->ptrace & PT_SEIZED)))
2443 					signal_wake_up(t, 0);
2444 				else
2445 					ptrace_trap_notify(t);
2446 			}
2447 		}
2448 	}
2449 
2450 	if (likely(!current->ptrace)) {
2451 		int notify = 0;
2452 
2453 		/*
2454 		 * If there are no other threads in the group, or if there
2455 		 * is a group stop in progress and we are the last to stop,
2456 		 * report to the parent.
2457 		 */
2458 		if (task_participate_group_stop(current))
2459 			notify = CLD_STOPPED;
2460 
2461 		set_special_state(TASK_STOPPED);
2462 		spin_unlock_irq(&current->sighand->siglock);
2463 
2464 		/*
2465 		 * Notify the parent of the group stop completion.  Because
2466 		 * we're not holding either the siglock or tasklist_lock
2467 		 * here, ptracer may attach inbetween; however, this is for
2468 		 * group stop and should always be delivered to the real
2469 		 * parent of the group leader.  The new ptracer will get
2470 		 * its notification when this task transitions into
2471 		 * TASK_TRACED.
2472 		 */
2473 		if (notify) {
2474 			read_lock(&tasklist_lock);
2475 			do_notify_parent_cldstop(current, false, notify);
2476 			read_unlock(&tasklist_lock);
2477 		}
2478 
2479 		/* Now we don't run again until woken by SIGCONT or SIGKILL */
2480 		cgroup_enter_frozen();
2481 		freezable_schedule();
2482 		return true;
2483 	} else {
2484 		/*
2485 		 * While ptraced, group stop is handled by STOP trap.
2486 		 * Schedule it and let the caller deal with it.
2487 		 */
2488 		task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2489 		return false;
2490 	}
2491 }
2492 
2493 /**
2494  * do_jobctl_trap - take care of ptrace jobctl traps
2495  *
2496  * When PT_SEIZED, it's used for both group stop and explicit
2497  * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
2498  * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
2499  * the stop signal; otherwise, %SIGTRAP.
2500  *
2501  * When !PT_SEIZED, it's used only for group stop trap with stop signal
2502  * number as exit_code and no siginfo.
2503  *
2504  * CONTEXT:
2505  * Must be called with @current->sighand->siglock held, which may be
2506  * released and re-acquired before returning with intervening sleep.
2507  */
2508 static void do_jobctl_trap(void)
2509 {
2510 	struct signal_struct *signal = current->signal;
2511 	int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2512 
2513 	if (current->ptrace & PT_SEIZED) {
2514 		if (!signal->group_stop_count &&
2515 		    !(signal->flags & SIGNAL_STOP_STOPPED))
2516 			signr = SIGTRAP;
2517 		WARN_ON_ONCE(!signr);
2518 		ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2519 				 CLD_STOPPED);
2520 	} else {
2521 		WARN_ON_ONCE(!signr);
2522 		ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2523 		current->exit_code = 0;
2524 	}
2525 }
2526 
2527 /**
2528  * do_freezer_trap - handle the freezer jobctl trap
2529  *
2530  * Puts the task into frozen state, if only the task is not about to quit.
2531  * In this case it drops JOBCTL_TRAP_FREEZE.
2532  *
2533  * CONTEXT:
2534  * Must be called with @current->sighand->siglock held,
2535  * which is always released before returning.
2536  */
2537 static void do_freezer_trap(void)
2538 	__releases(&current->sighand->siglock)
2539 {
2540 	/*
2541 	 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2542 	 * let's make another loop to give it a chance to be handled.
2543 	 * In any case, we'll return back.
2544 	 */
2545 	if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2546 	     JOBCTL_TRAP_FREEZE) {
2547 		spin_unlock_irq(&current->sighand->siglock);
2548 		return;
2549 	}
2550 
2551 	/*
2552 	 * Now we're sure that there is no pending fatal signal and no
2553 	 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2554 	 * immediately (if there is a non-fatal signal pending), and
2555 	 * put the task into sleep.
2556 	 */
2557 	__set_current_state(TASK_INTERRUPTIBLE);
2558 	clear_thread_flag(TIF_SIGPENDING);
2559 	spin_unlock_irq(&current->sighand->siglock);
2560 	cgroup_enter_frozen();
2561 	freezable_schedule();
2562 }
2563 
2564 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2565 {
2566 	/*
2567 	 * We do not check sig_kernel_stop(signr) but set this marker
2568 	 * unconditionally because we do not know whether debugger will
2569 	 * change signr. This flag has no meaning unless we are going
2570 	 * to stop after return from ptrace_stop(). In this case it will
2571 	 * be checked in do_signal_stop(), we should only stop if it was
2572 	 * not cleared by SIGCONT while we were sleeping. See also the
2573 	 * comment in dequeue_signal().
2574 	 */
2575 	current->jobctl |= JOBCTL_STOP_DEQUEUED;
2576 	ptrace_stop(signr, CLD_TRAPPED, 0, info);
2577 
2578 	/* We're back.  Did the debugger cancel the sig?  */
2579 	signr = current->exit_code;
2580 	if (signr == 0)
2581 		return signr;
2582 
2583 	current->exit_code = 0;
2584 
2585 	/*
2586 	 * Update the siginfo structure if the signal has
2587 	 * changed.  If the debugger wanted something
2588 	 * specific in the siginfo structure then it should
2589 	 * have updated *info via PTRACE_SETSIGINFO.
2590 	 */
2591 	if (signr != info->si_signo) {
2592 		clear_siginfo(info);
2593 		info->si_signo = signr;
2594 		info->si_errno = 0;
2595 		info->si_code = SI_USER;
2596 		rcu_read_lock();
2597 		info->si_pid = task_pid_vnr(current->parent);
2598 		info->si_uid = from_kuid_munged(current_user_ns(),
2599 						task_uid(current->parent));
2600 		rcu_read_unlock();
2601 	}
2602 
2603 	/* If the (new) signal is now blocked, requeue it.  */
2604 	if (sigismember(&current->blocked, signr)) {
2605 		send_signal(signr, info, current, PIDTYPE_PID);
2606 		signr = 0;
2607 	}
2608 
2609 	return signr;
2610 }
2611 
2612 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2613 {
2614 	switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2615 	case SIL_FAULT:
2616 	case SIL_FAULT_TRAPNO:
2617 	case SIL_FAULT_MCEERR:
2618 	case SIL_FAULT_BNDERR:
2619 	case SIL_FAULT_PKUERR:
2620 	case SIL_FAULT_PERF_EVENT:
2621 		ksig->info.si_addr = arch_untagged_si_addr(
2622 			ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2623 		break;
2624 	case SIL_KILL:
2625 	case SIL_TIMER:
2626 	case SIL_POLL:
2627 	case SIL_CHLD:
2628 	case SIL_RT:
2629 	case SIL_SYS:
2630 		break;
2631 	}
2632 }
2633 
2634 bool get_signal(struct ksignal *ksig)
2635 {
2636 	struct sighand_struct *sighand = current->sighand;
2637 	struct signal_struct *signal = current->signal;
2638 	int signr;
2639 
2640 	if (unlikely(current->task_works))
2641 		task_work_run();
2642 
2643 	/*
2644 	 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2645 	 * that the arch handlers don't all have to do it. If we get here
2646 	 * without TIF_SIGPENDING, just exit after running signal work.
2647 	 */
2648 	if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2649 		if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2650 			tracehook_notify_signal();
2651 		if (!task_sigpending(current))
2652 			return false;
2653 	}
2654 
2655 	if (unlikely(uprobe_deny_signal()))
2656 		return false;
2657 
2658 	/*
2659 	 * Do this once, we can't return to user-mode if freezing() == T.
2660 	 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2661 	 * thus do not need another check after return.
2662 	 */
2663 	try_to_freeze();
2664 
2665 relock:
2666 	spin_lock_irq(&sighand->siglock);
2667 
2668 	/*
2669 	 * Every stopped thread goes here after wakeup. Check to see if
2670 	 * we should notify the parent, prepare_signal(SIGCONT) encodes
2671 	 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2672 	 */
2673 	if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2674 		int why;
2675 
2676 		if (signal->flags & SIGNAL_CLD_CONTINUED)
2677 			why = CLD_CONTINUED;
2678 		else
2679 			why = CLD_STOPPED;
2680 
2681 		signal->flags &= ~SIGNAL_CLD_MASK;
2682 
2683 		spin_unlock_irq(&sighand->siglock);
2684 
2685 		/*
2686 		 * Notify the parent that we're continuing.  This event is
2687 		 * always per-process and doesn't make whole lot of sense
2688 		 * for ptracers, who shouldn't consume the state via
2689 		 * wait(2) either, but, for backward compatibility, notify
2690 		 * the ptracer of the group leader too unless it's gonna be
2691 		 * a duplicate.
2692 		 */
2693 		read_lock(&tasklist_lock);
2694 		do_notify_parent_cldstop(current, false, why);
2695 
2696 		if (ptrace_reparented(current->group_leader))
2697 			do_notify_parent_cldstop(current->group_leader,
2698 						true, why);
2699 		read_unlock(&tasklist_lock);
2700 
2701 		goto relock;
2702 	}
2703 
2704 	/* Has this task already been marked for death? */
2705 	if (signal_group_exit(signal)) {
2706 		ksig->info.si_signo = signr = SIGKILL;
2707 		sigdelset(&current->pending.signal, SIGKILL);
2708 		trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2709 				&sighand->action[SIGKILL - 1]);
2710 		recalc_sigpending();
2711 		goto fatal;
2712 	}
2713 
2714 	for (;;) {
2715 		struct k_sigaction *ka;
2716 
2717 		if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2718 		    do_signal_stop(0))
2719 			goto relock;
2720 
2721 		if (unlikely(current->jobctl &
2722 			     (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2723 			if (current->jobctl & JOBCTL_TRAP_MASK) {
2724 				do_jobctl_trap();
2725 				spin_unlock_irq(&sighand->siglock);
2726 			} else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2727 				do_freezer_trap();
2728 
2729 			goto relock;
2730 		}
2731 
2732 		/*
2733 		 * If the task is leaving the frozen state, let's update
2734 		 * cgroup counters and reset the frozen bit.
2735 		 */
2736 		if (unlikely(cgroup_task_frozen(current))) {
2737 			spin_unlock_irq(&sighand->siglock);
2738 			cgroup_leave_frozen(false);
2739 			goto relock;
2740 		}
2741 
2742 		/*
2743 		 * Signals generated by the execution of an instruction
2744 		 * need to be delivered before any other pending signals
2745 		 * so that the instruction pointer in the signal stack
2746 		 * frame points to the faulting instruction.
2747 		 */
2748 		signr = dequeue_synchronous_signal(&ksig->info);
2749 		if (!signr)
2750 			signr = dequeue_signal(current, &current->blocked, &ksig->info);
2751 
2752 		if (!signr)
2753 			break; /* will return 0 */
2754 
2755 		if (unlikely(current->ptrace) && signr != SIGKILL) {
2756 			signr = ptrace_signal(signr, &ksig->info);
2757 			if (!signr)
2758 				continue;
2759 		}
2760 
2761 		ka = &sighand->action[signr-1];
2762 
2763 		/* Trace actually delivered signals. */
2764 		trace_signal_deliver(signr, &ksig->info, ka);
2765 
2766 		if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2767 			continue;
2768 		if (ka->sa.sa_handler != SIG_DFL) {
2769 			/* Run the handler.  */
2770 			ksig->ka = *ka;
2771 
2772 			if (ka->sa.sa_flags & SA_ONESHOT)
2773 				ka->sa.sa_handler = SIG_DFL;
2774 
2775 			break; /* will return non-zero "signr" value */
2776 		}
2777 
2778 		/*
2779 		 * Now we are doing the default action for this signal.
2780 		 */
2781 		if (sig_kernel_ignore(signr)) /* Default is nothing. */
2782 			continue;
2783 
2784 		/*
2785 		 * Global init gets no signals it doesn't want.
2786 		 * Container-init gets no signals it doesn't want from same
2787 		 * container.
2788 		 *
2789 		 * Note that if global/container-init sees a sig_kernel_only()
2790 		 * signal here, the signal must have been generated internally
2791 		 * or must have come from an ancestor namespace. In either
2792 		 * case, the signal cannot be dropped.
2793 		 */
2794 		if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2795 				!sig_kernel_only(signr))
2796 			continue;
2797 
2798 		if (sig_kernel_stop(signr)) {
2799 			/*
2800 			 * The default action is to stop all threads in
2801 			 * the thread group.  The job control signals
2802 			 * do nothing in an orphaned pgrp, but SIGSTOP
2803 			 * always works.  Note that siglock needs to be
2804 			 * dropped during the call to is_orphaned_pgrp()
2805 			 * because of lock ordering with tasklist_lock.
2806 			 * This allows an intervening SIGCONT to be posted.
2807 			 * We need to check for that and bail out if necessary.
2808 			 */
2809 			if (signr != SIGSTOP) {
2810 				spin_unlock_irq(&sighand->siglock);
2811 
2812 				/* signals can be posted during this window */
2813 
2814 				if (is_current_pgrp_orphaned())
2815 					goto relock;
2816 
2817 				spin_lock_irq(&sighand->siglock);
2818 			}
2819 
2820 			if (likely(do_signal_stop(ksig->info.si_signo))) {
2821 				/* It released the siglock.  */
2822 				goto relock;
2823 			}
2824 
2825 			/*
2826 			 * We didn't actually stop, due to a race
2827 			 * with SIGCONT or something like that.
2828 			 */
2829 			continue;
2830 		}
2831 
2832 	fatal:
2833 		spin_unlock_irq(&sighand->siglock);
2834 		if (unlikely(cgroup_task_frozen(current)))
2835 			cgroup_leave_frozen(true);
2836 
2837 		/*
2838 		 * Anything else is fatal, maybe with a core dump.
2839 		 */
2840 		current->flags |= PF_SIGNALED;
2841 
2842 		if (sig_kernel_coredump(signr)) {
2843 			if (print_fatal_signals)
2844 				print_fatal_signal(ksig->info.si_signo);
2845 			proc_coredump_connector(current);
2846 			/*
2847 			 * If it was able to dump core, this kills all
2848 			 * other threads in the group and synchronizes with
2849 			 * their demise.  If we lost the race with another
2850 			 * thread getting here, it set group_exit_code
2851 			 * first and our do_group_exit call below will use
2852 			 * that value and ignore the one we pass it.
2853 			 */
2854 			do_coredump(&ksig->info);
2855 		}
2856 
2857 		/*
2858 		 * PF_IO_WORKER threads will catch and exit on fatal signals
2859 		 * themselves. They have cleanup that must be performed, so
2860 		 * we cannot call do_exit() on their behalf.
2861 		 */
2862 		if (current->flags & PF_IO_WORKER)
2863 			goto out;
2864 
2865 		/*
2866 		 * Death signals, no core dump.
2867 		 */
2868 		do_group_exit(ksig->info.si_signo);
2869 		/* NOTREACHED */
2870 	}
2871 	spin_unlock_irq(&sighand->siglock);
2872 out:
2873 	ksig->sig = signr;
2874 
2875 	if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2876 		hide_si_addr_tag_bits(ksig);
2877 
2878 	return ksig->sig > 0;
2879 }
2880 
2881 /**
2882  * signal_delivered -
2883  * @ksig:		kernel signal struct
2884  * @stepping:		nonzero if debugger single-step or block-step in use
2885  *
2886  * This function should be called when a signal has successfully been
2887  * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2888  * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2889  * is set in @ksig->ka.sa.sa_flags.  Tracing is notified.
2890  */
2891 static void signal_delivered(struct ksignal *ksig, int stepping)
2892 {
2893 	sigset_t blocked;
2894 
2895 	/* A signal was successfully delivered, and the
2896 	   saved sigmask was stored on the signal frame,
2897 	   and will be restored by sigreturn.  So we can
2898 	   simply clear the restore sigmask flag.  */
2899 	clear_restore_sigmask();
2900 
2901 	sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2902 	if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2903 		sigaddset(&blocked, ksig->sig);
2904 	set_current_blocked(&blocked);
2905 	if (current->sas_ss_flags & SS_AUTODISARM)
2906 		sas_ss_reset(current);
2907 	tracehook_signal_handler(stepping);
2908 }
2909 
2910 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2911 {
2912 	if (failed)
2913 		force_sigsegv(ksig->sig);
2914 	else
2915 		signal_delivered(ksig, stepping);
2916 }
2917 
2918 /*
2919  * It could be that complete_signal() picked us to notify about the
2920  * group-wide signal. Other threads should be notified now to take
2921  * the shared signals in @which since we will not.
2922  */
2923 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2924 {
2925 	sigset_t retarget;
2926 	struct task_struct *t;
2927 
2928 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2929 	if (sigisemptyset(&retarget))
2930 		return;
2931 
2932 	t = tsk;
2933 	while_each_thread(tsk, t) {
2934 		if (t->flags & PF_EXITING)
2935 			continue;
2936 
2937 		if (!has_pending_signals(&retarget, &t->blocked))
2938 			continue;
2939 		/* Remove the signals this thread can handle. */
2940 		sigandsets(&retarget, &retarget, &t->blocked);
2941 
2942 		if (!task_sigpending(t))
2943 			signal_wake_up(t, 0);
2944 
2945 		if (sigisemptyset(&retarget))
2946 			break;
2947 	}
2948 }
2949 
2950 void exit_signals(struct task_struct *tsk)
2951 {
2952 	int group_stop = 0;
2953 	sigset_t unblocked;
2954 
2955 	/*
2956 	 * @tsk is about to have PF_EXITING set - lock out users which
2957 	 * expect stable threadgroup.
2958 	 */
2959 	cgroup_threadgroup_change_begin(tsk);
2960 
2961 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2962 		tsk->flags |= PF_EXITING;
2963 		cgroup_threadgroup_change_end(tsk);
2964 		return;
2965 	}
2966 
2967 	spin_lock_irq(&tsk->sighand->siglock);
2968 	/*
2969 	 * From now this task is not visible for group-wide signals,
2970 	 * see wants_signal(), do_signal_stop().
2971 	 */
2972 	tsk->flags |= PF_EXITING;
2973 
2974 	cgroup_threadgroup_change_end(tsk);
2975 
2976 	if (!task_sigpending(tsk))
2977 		goto out;
2978 
2979 	unblocked = tsk->blocked;
2980 	signotset(&unblocked);
2981 	retarget_shared_pending(tsk, &unblocked);
2982 
2983 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2984 	    task_participate_group_stop(tsk))
2985 		group_stop = CLD_STOPPED;
2986 out:
2987 	spin_unlock_irq(&tsk->sighand->siglock);
2988 
2989 	/*
2990 	 * If group stop has completed, deliver the notification.  This
2991 	 * should always go to the real parent of the group leader.
2992 	 */
2993 	if (unlikely(group_stop)) {
2994 		read_lock(&tasklist_lock);
2995 		do_notify_parent_cldstop(tsk, false, group_stop);
2996 		read_unlock(&tasklist_lock);
2997 	}
2998 }
2999 
3000 /*
3001  * System call entry points.
3002  */
3003 
3004 /**
3005  *  sys_restart_syscall - restart a system call
3006  */
3007 SYSCALL_DEFINE0(restart_syscall)
3008 {
3009 	struct restart_block *restart = &current->restart_block;
3010 	return restart->fn(restart);
3011 }
3012 
3013 long do_no_restart_syscall(struct restart_block *param)
3014 {
3015 	return -EINTR;
3016 }
3017 
3018 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3019 {
3020 	if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3021 		sigset_t newblocked;
3022 		/* A set of now blocked but previously unblocked signals. */
3023 		sigandnsets(&newblocked, newset, &current->blocked);
3024 		retarget_shared_pending(tsk, &newblocked);
3025 	}
3026 	tsk->blocked = *newset;
3027 	recalc_sigpending();
3028 }
3029 
3030 /**
3031  * set_current_blocked - change current->blocked mask
3032  * @newset: new mask
3033  *
3034  * It is wrong to change ->blocked directly, this helper should be used
3035  * to ensure the process can't miss a shared signal we are going to block.
3036  */
3037 void set_current_blocked(sigset_t *newset)
3038 {
3039 	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3040 	__set_current_blocked(newset);
3041 }
3042 
3043 void __set_current_blocked(const sigset_t *newset)
3044 {
3045 	struct task_struct *tsk = current;
3046 
3047 	/*
3048 	 * In case the signal mask hasn't changed, there is nothing we need
3049 	 * to do. The current->blocked shouldn't be modified by other task.
3050 	 */
3051 	if (sigequalsets(&tsk->blocked, newset))
3052 		return;
3053 
3054 	spin_lock_irq(&tsk->sighand->siglock);
3055 	__set_task_blocked(tsk, newset);
3056 	spin_unlock_irq(&tsk->sighand->siglock);
3057 }
3058 
3059 /*
3060  * This is also useful for kernel threads that want to temporarily
3061  * (or permanently) block certain signals.
3062  *
3063  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3064  * interface happily blocks "unblockable" signals like SIGKILL
3065  * and friends.
3066  */
3067 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3068 {
3069 	struct task_struct *tsk = current;
3070 	sigset_t newset;
3071 
3072 	/* Lockless, only current can change ->blocked, never from irq */
3073 	if (oldset)
3074 		*oldset = tsk->blocked;
3075 
3076 	switch (how) {
3077 	case SIG_BLOCK:
3078 		sigorsets(&newset, &tsk->blocked, set);
3079 		break;
3080 	case SIG_UNBLOCK:
3081 		sigandnsets(&newset, &tsk->blocked, set);
3082 		break;
3083 	case SIG_SETMASK:
3084 		newset = *set;
3085 		break;
3086 	default:
3087 		return -EINVAL;
3088 	}
3089 
3090 	__set_current_blocked(&newset);
3091 	return 0;
3092 }
3093 EXPORT_SYMBOL(sigprocmask);
3094 
3095 /*
3096  * The api helps set app-provided sigmasks.
3097  *
3098  * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3099  * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3100  *
3101  * Note that it does set_restore_sigmask() in advance, so it must be always
3102  * paired with restore_saved_sigmask_unless() before return from syscall.
3103  */
3104 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3105 {
3106 	sigset_t kmask;
3107 
3108 	if (!umask)
3109 		return 0;
3110 	if (sigsetsize != sizeof(sigset_t))
3111 		return -EINVAL;
3112 	if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3113 		return -EFAULT;
3114 
3115 	set_restore_sigmask();
3116 	current->saved_sigmask = current->blocked;
3117 	set_current_blocked(&kmask);
3118 
3119 	return 0;
3120 }
3121 
3122 #ifdef CONFIG_COMPAT
3123 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3124 			    size_t sigsetsize)
3125 {
3126 	sigset_t kmask;
3127 
3128 	if (!umask)
3129 		return 0;
3130 	if (sigsetsize != sizeof(compat_sigset_t))
3131 		return -EINVAL;
3132 	if (get_compat_sigset(&kmask, umask))
3133 		return -EFAULT;
3134 
3135 	set_restore_sigmask();
3136 	current->saved_sigmask = current->blocked;
3137 	set_current_blocked(&kmask);
3138 
3139 	return 0;
3140 }
3141 #endif
3142 
3143 /**
3144  *  sys_rt_sigprocmask - change the list of currently blocked signals
3145  *  @how: whether to add, remove, or set signals
3146  *  @nset: stores pending signals
3147  *  @oset: previous value of signal mask if non-null
3148  *  @sigsetsize: size of sigset_t type
3149  */
3150 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3151 		sigset_t __user *, oset, size_t, sigsetsize)
3152 {
3153 	sigset_t old_set, new_set;
3154 	int error;
3155 
3156 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3157 	if (sigsetsize != sizeof(sigset_t))
3158 		return -EINVAL;
3159 
3160 	old_set = current->blocked;
3161 
3162 	if (nset) {
3163 		if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3164 			return -EFAULT;
3165 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3166 
3167 		error = sigprocmask(how, &new_set, NULL);
3168 		if (error)
3169 			return error;
3170 	}
3171 
3172 	if (oset) {
3173 		if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3174 			return -EFAULT;
3175 	}
3176 
3177 	return 0;
3178 }
3179 
3180 #ifdef CONFIG_COMPAT
3181 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3182 		compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3183 {
3184 	sigset_t old_set = current->blocked;
3185 
3186 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3187 	if (sigsetsize != sizeof(sigset_t))
3188 		return -EINVAL;
3189 
3190 	if (nset) {
3191 		sigset_t new_set;
3192 		int error;
3193 		if (get_compat_sigset(&new_set, nset))
3194 			return -EFAULT;
3195 		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3196 
3197 		error = sigprocmask(how, &new_set, NULL);
3198 		if (error)
3199 			return error;
3200 	}
3201 	return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3202 }
3203 #endif
3204 
3205 static void do_sigpending(sigset_t *set)
3206 {
3207 	spin_lock_irq(&current->sighand->siglock);
3208 	sigorsets(set, &current->pending.signal,
3209 		  &current->signal->shared_pending.signal);
3210 	spin_unlock_irq(&current->sighand->siglock);
3211 
3212 	/* Outside the lock because only this thread touches it.  */
3213 	sigandsets(set, &current->blocked, set);
3214 }
3215 
3216 /**
3217  *  sys_rt_sigpending - examine a pending signal that has been raised
3218  *			while blocked
3219  *  @uset: stores pending signals
3220  *  @sigsetsize: size of sigset_t type or larger
3221  */
3222 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3223 {
3224 	sigset_t set;
3225 
3226 	if (sigsetsize > sizeof(*uset))
3227 		return -EINVAL;
3228 
3229 	do_sigpending(&set);
3230 
3231 	if (copy_to_user(uset, &set, sigsetsize))
3232 		return -EFAULT;
3233 
3234 	return 0;
3235 }
3236 
3237 #ifdef CONFIG_COMPAT
3238 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3239 		compat_size_t, sigsetsize)
3240 {
3241 	sigset_t set;
3242 
3243 	if (sigsetsize > sizeof(*uset))
3244 		return -EINVAL;
3245 
3246 	do_sigpending(&set);
3247 
3248 	return put_compat_sigset(uset, &set, sigsetsize);
3249 }
3250 #endif
3251 
3252 static const struct {
3253 	unsigned char limit, layout;
3254 } sig_sicodes[] = {
3255 	[SIGILL]  = { NSIGILL,  SIL_FAULT },
3256 	[SIGFPE]  = { NSIGFPE,  SIL_FAULT },
3257 	[SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3258 	[SIGBUS]  = { NSIGBUS,  SIL_FAULT },
3259 	[SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3260 #if defined(SIGEMT)
3261 	[SIGEMT]  = { NSIGEMT,  SIL_FAULT },
3262 #endif
3263 	[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3264 	[SIGPOLL] = { NSIGPOLL, SIL_POLL },
3265 	[SIGSYS]  = { NSIGSYS,  SIL_SYS },
3266 };
3267 
3268 static bool known_siginfo_layout(unsigned sig, int si_code)
3269 {
3270 	if (si_code == SI_KERNEL)
3271 		return true;
3272 	else if ((si_code > SI_USER)) {
3273 		if (sig_specific_sicodes(sig)) {
3274 			if (si_code <= sig_sicodes[sig].limit)
3275 				return true;
3276 		}
3277 		else if (si_code <= NSIGPOLL)
3278 			return true;
3279 	}
3280 	else if (si_code >= SI_DETHREAD)
3281 		return true;
3282 	else if (si_code == SI_ASYNCNL)
3283 		return true;
3284 	return false;
3285 }
3286 
3287 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3288 {
3289 	enum siginfo_layout layout = SIL_KILL;
3290 	if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3291 		if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3292 		    (si_code <= sig_sicodes[sig].limit)) {
3293 			layout = sig_sicodes[sig].layout;
3294 			/* Handle the exceptions */
3295 			if ((sig == SIGBUS) &&
3296 			    (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3297 				layout = SIL_FAULT_MCEERR;
3298 			else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3299 				layout = SIL_FAULT_BNDERR;
3300 #ifdef SEGV_PKUERR
3301 			else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3302 				layout = SIL_FAULT_PKUERR;
3303 #endif
3304 			else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3305 				layout = SIL_FAULT_PERF_EVENT;
3306 			else if (IS_ENABLED(CONFIG_SPARC) &&
3307 				 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3308 				layout = SIL_FAULT_TRAPNO;
3309 			else if (IS_ENABLED(CONFIG_ALPHA) &&
3310 				 ((sig == SIGFPE) ||
3311 				  ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3312 				layout = SIL_FAULT_TRAPNO;
3313 		}
3314 		else if (si_code <= NSIGPOLL)
3315 			layout = SIL_POLL;
3316 	} else {
3317 		if (si_code == SI_TIMER)
3318 			layout = SIL_TIMER;
3319 		else if (si_code == SI_SIGIO)
3320 			layout = SIL_POLL;
3321 		else if (si_code < 0)
3322 			layout = SIL_RT;
3323 	}
3324 	return layout;
3325 }
3326 
3327 static inline char __user *si_expansion(const siginfo_t __user *info)
3328 {
3329 	return ((char __user *)info) + sizeof(struct kernel_siginfo);
3330 }
3331 
3332 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3333 {
3334 	char __user *expansion = si_expansion(to);
3335 	if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3336 		return -EFAULT;
3337 	if (clear_user(expansion, SI_EXPANSION_SIZE))
3338 		return -EFAULT;
3339 	return 0;
3340 }
3341 
3342 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3343 				       const siginfo_t __user *from)
3344 {
3345 	if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3346 		char __user *expansion = si_expansion(from);
3347 		char buf[SI_EXPANSION_SIZE];
3348 		int i;
3349 		/*
3350 		 * An unknown si_code might need more than
3351 		 * sizeof(struct kernel_siginfo) bytes.  Verify all of the
3352 		 * extra bytes are 0.  This guarantees copy_siginfo_to_user
3353 		 * will return this data to userspace exactly.
3354 		 */
3355 		if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3356 			return -EFAULT;
3357 		for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3358 			if (buf[i] != 0)
3359 				return -E2BIG;
3360 		}
3361 	}
3362 	return 0;
3363 }
3364 
3365 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3366 				    const siginfo_t __user *from)
3367 {
3368 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3369 		return -EFAULT;
3370 	to->si_signo = signo;
3371 	return post_copy_siginfo_from_user(to, from);
3372 }
3373 
3374 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3375 {
3376 	if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3377 		return -EFAULT;
3378 	return post_copy_siginfo_from_user(to, from);
3379 }
3380 
3381 #ifdef CONFIG_COMPAT
3382 /**
3383  * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3384  * @to: compat siginfo destination
3385  * @from: kernel siginfo source
3386  *
3387  * Note: This function does not work properly for the SIGCHLD on x32, but
3388  * fortunately it doesn't have to.  The only valid callers for this function are
3389  * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3390  * The latter does not care because SIGCHLD will never cause a coredump.
3391  */
3392 void copy_siginfo_to_external32(struct compat_siginfo *to,
3393 		const struct kernel_siginfo *from)
3394 {
3395 	memset(to, 0, sizeof(*to));
3396 
3397 	to->si_signo = from->si_signo;
3398 	to->si_errno = from->si_errno;
3399 	to->si_code  = from->si_code;
3400 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3401 	case SIL_KILL:
3402 		to->si_pid = from->si_pid;
3403 		to->si_uid = from->si_uid;
3404 		break;
3405 	case SIL_TIMER:
3406 		to->si_tid     = from->si_tid;
3407 		to->si_overrun = from->si_overrun;
3408 		to->si_int     = from->si_int;
3409 		break;
3410 	case SIL_POLL:
3411 		to->si_band = from->si_band;
3412 		to->si_fd   = from->si_fd;
3413 		break;
3414 	case SIL_FAULT:
3415 		to->si_addr = ptr_to_compat(from->si_addr);
3416 		break;
3417 	case SIL_FAULT_TRAPNO:
3418 		to->si_addr = ptr_to_compat(from->si_addr);
3419 		to->si_trapno = from->si_trapno;
3420 		break;
3421 	case SIL_FAULT_MCEERR:
3422 		to->si_addr = ptr_to_compat(from->si_addr);
3423 		to->si_addr_lsb = from->si_addr_lsb;
3424 		break;
3425 	case SIL_FAULT_BNDERR:
3426 		to->si_addr = ptr_to_compat(from->si_addr);
3427 		to->si_lower = ptr_to_compat(from->si_lower);
3428 		to->si_upper = ptr_to_compat(from->si_upper);
3429 		break;
3430 	case SIL_FAULT_PKUERR:
3431 		to->si_addr = ptr_to_compat(from->si_addr);
3432 		to->si_pkey = from->si_pkey;
3433 		break;
3434 	case SIL_FAULT_PERF_EVENT:
3435 		to->si_addr = ptr_to_compat(from->si_addr);
3436 		to->si_perf_data = from->si_perf_data;
3437 		to->si_perf_type = from->si_perf_type;
3438 		break;
3439 	case SIL_CHLD:
3440 		to->si_pid = from->si_pid;
3441 		to->si_uid = from->si_uid;
3442 		to->si_status = from->si_status;
3443 		to->si_utime = from->si_utime;
3444 		to->si_stime = from->si_stime;
3445 		break;
3446 	case SIL_RT:
3447 		to->si_pid = from->si_pid;
3448 		to->si_uid = from->si_uid;
3449 		to->si_int = from->si_int;
3450 		break;
3451 	case SIL_SYS:
3452 		to->si_call_addr = ptr_to_compat(from->si_call_addr);
3453 		to->si_syscall   = from->si_syscall;
3454 		to->si_arch      = from->si_arch;
3455 		break;
3456 	}
3457 }
3458 
3459 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3460 			   const struct kernel_siginfo *from)
3461 {
3462 	struct compat_siginfo new;
3463 
3464 	copy_siginfo_to_external32(&new, from);
3465 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3466 		return -EFAULT;
3467 	return 0;
3468 }
3469 
3470 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3471 					 const struct compat_siginfo *from)
3472 {
3473 	clear_siginfo(to);
3474 	to->si_signo = from->si_signo;
3475 	to->si_errno = from->si_errno;
3476 	to->si_code  = from->si_code;
3477 	switch(siginfo_layout(from->si_signo, from->si_code)) {
3478 	case SIL_KILL:
3479 		to->si_pid = from->si_pid;
3480 		to->si_uid = from->si_uid;
3481 		break;
3482 	case SIL_TIMER:
3483 		to->si_tid     = from->si_tid;
3484 		to->si_overrun = from->si_overrun;
3485 		to->si_int     = from->si_int;
3486 		break;
3487 	case SIL_POLL:
3488 		to->si_band = from->si_band;
3489 		to->si_fd   = from->si_fd;
3490 		break;
3491 	case SIL_FAULT:
3492 		to->si_addr = compat_ptr(from->si_addr);
3493 		break;
3494 	case SIL_FAULT_TRAPNO:
3495 		to->si_addr = compat_ptr(from->si_addr);
3496 		to->si_trapno = from->si_trapno;
3497 		break;
3498 	case SIL_FAULT_MCEERR:
3499 		to->si_addr = compat_ptr(from->si_addr);
3500 		to->si_addr_lsb = from->si_addr_lsb;
3501 		break;
3502 	case SIL_FAULT_BNDERR:
3503 		to->si_addr = compat_ptr(from->si_addr);
3504 		to->si_lower = compat_ptr(from->si_lower);
3505 		to->si_upper = compat_ptr(from->si_upper);
3506 		break;
3507 	case SIL_FAULT_PKUERR:
3508 		to->si_addr = compat_ptr(from->si_addr);
3509 		to->si_pkey = from->si_pkey;
3510 		break;
3511 	case SIL_FAULT_PERF_EVENT:
3512 		to->si_addr = compat_ptr(from->si_addr);
3513 		to->si_perf_data = from->si_perf_data;
3514 		to->si_perf_type = from->si_perf_type;
3515 		break;
3516 	case SIL_CHLD:
3517 		to->si_pid    = from->si_pid;
3518 		to->si_uid    = from->si_uid;
3519 		to->si_status = from->si_status;
3520 #ifdef CONFIG_X86_X32_ABI
3521 		if (in_x32_syscall()) {
3522 			to->si_utime = from->_sifields._sigchld_x32._utime;
3523 			to->si_stime = from->_sifields._sigchld_x32._stime;
3524 		} else
3525 #endif
3526 		{
3527 			to->si_utime = from->si_utime;
3528 			to->si_stime = from->si_stime;
3529 		}
3530 		break;
3531 	case SIL_RT:
3532 		to->si_pid = from->si_pid;
3533 		to->si_uid = from->si_uid;
3534 		to->si_int = from->si_int;
3535 		break;
3536 	case SIL_SYS:
3537 		to->si_call_addr = compat_ptr(from->si_call_addr);
3538 		to->si_syscall   = from->si_syscall;
3539 		to->si_arch      = from->si_arch;
3540 		break;
3541 	}
3542 	return 0;
3543 }
3544 
3545 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3546 				      const struct compat_siginfo __user *ufrom)
3547 {
3548 	struct compat_siginfo from;
3549 
3550 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3551 		return -EFAULT;
3552 
3553 	from.si_signo = signo;
3554 	return post_copy_siginfo_from_user32(to, &from);
3555 }
3556 
3557 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3558 			     const struct compat_siginfo __user *ufrom)
3559 {
3560 	struct compat_siginfo from;
3561 
3562 	if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3563 		return -EFAULT;
3564 
3565 	return post_copy_siginfo_from_user32(to, &from);
3566 }
3567 #endif /* CONFIG_COMPAT */
3568 
3569 /**
3570  *  do_sigtimedwait - wait for queued signals specified in @which
3571  *  @which: queued signals to wait for
3572  *  @info: if non-null, the signal's siginfo is returned here
3573  *  @ts: upper bound on process time suspension
3574  */
3575 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3576 		    const struct timespec64 *ts)
3577 {
3578 	ktime_t *to = NULL, timeout = KTIME_MAX;
3579 	struct task_struct *tsk = current;
3580 	sigset_t mask = *which;
3581 	int sig, ret = 0;
3582 
3583 	if (ts) {
3584 		if (!timespec64_valid(ts))
3585 			return -EINVAL;
3586 		timeout = timespec64_to_ktime(*ts);
3587 		to = &timeout;
3588 	}
3589 
3590 	/*
3591 	 * Invert the set of allowed signals to get those we want to block.
3592 	 */
3593 	sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3594 	signotset(&mask);
3595 
3596 	spin_lock_irq(&tsk->sighand->siglock);
3597 	sig = dequeue_signal(tsk, &mask, info);
3598 	if (!sig && timeout) {
3599 		/*
3600 		 * None ready, temporarily unblock those we're interested
3601 		 * while we are sleeping in so that we'll be awakened when
3602 		 * they arrive. Unblocking is always fine, we can avoid
3603 		 * set_current_blocked().
3604 		 */
3605 		tsk->real_blocked = tsk->blocked;
3606 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3607 		recalc_sigpending();
3608 		spin_unlock_irq(&tsk->sighand->siglock);
3609 
3610 		__set_current_state(TASK_INTERRUPTIBLE);
3611 		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3612 							 HRTIMER_MODE_REL);
3613 		spin_lock_irq(&tsk->sighand->siglock);
3614 		__set_task_blocked(tsk, &tsk->real_blocked);
3615 		sigemptyset(&tsk->real_blocked);
3616 		sig = dequeue_signal(tsk, &mask, info);
3617 	}
3618 	spin_unlock_irq(&tsk->sighand->siglock);
3619 
3620 	if (sig)
3621 		return sig;
3622 	return ret ? -EINTR : -EAGAIN;
3623 }
3624 
3625 /**
3626  *  sys_rt_sigtimedwait - synchronously wait for queued signals specified
3627  *			in @uthese
3628  *  @uthese: queued signals to wait for
3629  *  @uinfo: if non-null, the signal's siginfo is returned here
3630  *  @uts: upper bound on process time suspension
3631  *  @sigsetsize: size of sigset_t type
3632  */
3633 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3634 		siginfo_t __user *, uinfo,
3635 		const struct __kernel_timespec __user *, uts,
3636 		size_t, sigsetsize)
3637 {
3638 	sigset_t these;
3639 	struct timespec64 ts;
3640 	kernel_siginfo_t info;
3641 	int ret;
3642 
3643 	/* XXX: Don't preclude handling different sized sigset_t's.  */
3644 	if (sigsetsize != sizeof(sigset_t))
3645 		return -EINVAL;
3646 
3647 	if (copy_from_user(&these, uthese, sizeof(these)))
3648 		return -EFAULT;
3649 
3650 	if (uts) {
3651 		if (get_timespec64(&ts, uts))
3652 			return -EFAULT;
3653 	}
3654 
3655 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3656 
3657 	if (ret > 0 && uinfo) {
3658 		if (copy_siginfo_to_user(uinfo, &info))
3659 			ret = -EFAULT;
3660 	}
3661 
3662 	return ret;
3663 }
3664 
3665 #ifdef CONFIG_COMPAT_32BIT_TIME
3666 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3667 		siginfo_t __user *, uinfo,
3668 		const struct old_timespec32 __user *, uts,
3669 		size_t, sigsetsize)
3670 {
3671 	sigset_t these;
3672 	struct timespec64 ts;
3673 	kernel_siginfo_t info;
3674 	int ret;
3675 
3676 	if (sigsetsize != sizeof(sigset_t))
3677 		return -EINVAL;
3678 
3679 	if (copy_from_user(&these, uthese, sizeof(these)))
3680 		return -EFAULT;
3681 
3682 	if (uts) {
3683 		if (get_old_timespec32(&ts, uts))
3684 			return -EFAULT;
3685 	}
3686 
3687 	ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3688 
3689 	if (ret > 0 && uinfo) {
3690 		if (copy_siginfo_to_user(uinfo, &info))
3691 			ret = -EFAULT;
3692 	}
3693 
3694 	return ret;
3695 }
3696 #endif
3697 
3698 #ifdef CONFIG_COMPAT
3699 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3700 		struct compat_siginfo __user *, uinfo,
3701 		struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3702 {
3703 	sigset_t s;
3704 	struct timespec64 t;
3705 	kernel_siginfo_t info;
3706 	long ret;
3707 
3708 	if (sigsetsize != sizeof(sigset_t))
3709 		return -EINVAL;
3710 
3711 	if (get_compat_sigset(&s, uthese))
3712 		return -EFAULT;
3713 
3714 	if (uts) {
3715 		if (get_timespec64(&t, uts))
3716 			return -EFAULT;
3717 	}
3718 
3719 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3720 
3721 	if (ret > 0 && uinfo) {
3722 		if (copy_siginfo_to_user32(uinfo, &info))
3723 			ret = -EFAULT;
3724 	}
3725 
3726 	return ret;
3727 }
3728 
3729 #ifdef CONFIG_COMPAT_32BIT_TIME
3730 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3731 		struct compat_siginfo __user *, uinfo,
3732 		struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3733 {
3734 	sigset_t s;
3735 	struct timespec64 t;
3736 	kernel_siginfo_t info;
3737 	long ret;
3738 
3739 	if (sigsetsize != sizeof(sigset_t))
3740 		return -EINVAL;
3741 
3742 	if (get_compat_sigset(&s, uthese))
3743 		return -EFAULT;
3744 
3745 	if (uts) {
3746 		if (get_old_timespec32(&t, uts))
3747 			return -EFAULT;
3748 	}
3749 
3750 	ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3751 
3752 	if (ret > 0 && uinfo) {
3753 		if (copy_siginfo_to_user32(uinfo, &info))
3754 			ret = -EFAULT;
3755 	}
3756 
3757 	return ret;
3758 }
3759 #endif
3760 #endif
3761 
3762 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3763 {
3764 	clear_siginfo(info);
3765 	info->si_signo = sig;
3766 	info->si_errno = 0;
3767 	info->si_code = SI_USER;
3768 	info->si_pid = task_tgid_vnr(current);
3769 	info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3770 }
3771 
3772 /**
3773  *  sys_kill - send a signal to a process
3774  *  @pid: the PID of the process
3775  *  @sig: signal to be sent
3776  */
3777 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3778 {
3779 	struct kernel_siginfo info;
3780 
3781 	prepare_kill_siginfo(sig, &info);
3782 
3783 	return kill_something_info(sig, &info, pid);
3784 }
3785 
3786 /*
3787  * Verify that the signaler and signalee either are in the same pid namespace
3788  * or that the signaler's pid namespace is an ancestor of the signalee's pid
3789  * namespace.
3790  */
3791 static bool access_pidfd_pidns(struct pid *pid)
3792 {
3793 	struct pid_namespace *active = task_active_pid_ns(current);
3794 	struct pid_namespace *p = ns_of_pid(pid);
3795 
3796 	for (;;) {
3797 		if (!p)
3798 			return false;
3799 		if (p == active)
3800 			break;
3801 		p = p->parent;
3802 	}
3803 
3804 	return true;
3805 }
3806 
3807 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3808 		siginfo_t __user *info)
3809 {
3810 #ifdef CONFIG_COMPAT
3811 	/*
3812 	 * Avoid hooking up compat syscalls and instead handle necessary
3813 	 * conversions here. Note, this is a stop-gap measure and should not be
3814 	 * considered a generic solution.
3815 	 */
3816 	if (in_compat_syscall())
3817 		return copy_siginfo_from_user32(
3818 			kinfo, (struct compat_siginfo __user *)info);
3819 #endif
3820 	return copy_siginfo_from_user(kinfo, info);
3821 }
3822 
3823 static struct pid *pidfd_to_pid(const struct file *file)
3824 {
3825 	struct pid *pid;
3826 
3827 	pid = pidfd_pid(file);
3828 	if (!IS_ERR(pid))
3829 		return pid;
3830 
3831 	return tgid_pidfd_to_pid(file);
3832 }
3833 
3834 /**
3835  * sys_pidfd_send_signal - Signal a process through a pidfd
3836  * @pidfd:  file descriptor of the process
3837  * @sig:    signal to send
3838  * @info:   signal info
3839  * @flags:  future flags
3840  *
3841  * The syscall currently only signals via PIDTYPE_PID which covers
3842  * kill(<positive-pid>, <signal>. It does not signal threads or process
3843  * groups.
3844  * In order to extend the syscall to threads and process groups the @flags
3845  * argument should be used. In essence, the @flags argument will determine
3846  * what is signaled and not the file descriptor itself. Put in other words,
3847  * grouping is a property of the flags argument not a property of the file
3848  * descriptor.
3849  *
3850  * Return: 0 on success, negative errno on failure
3851  */
3852 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3853 		siginfo_t __user *, info, unsigned int, flags)
3854 {
3855 	int ret;
3856 	struct fd f;
3857 	struct pid *pid;
3858 	kernel_siginfo_t kinfo;
3859 
3860 	/* Enforce flags be set to 0 until we add an extension. */
3861 	if (flags)
3862 		return -EINVAL;
3863 
3864 	f = fdget(pidfd);
3865 	if (!f.file)
3866 		return -EBADF;
3867 
3868 	/* Is this a pidfd? */
3869 	pid = pidfd_to_pid(f.file);
3870 	if (IS_ERR(pid)) {
3871 		ret = PTR_ERR(pid);
3872 		goto err;
3873 	}
3874 
3875 	ret = -EINVAL;
3876 	if (!access_pidfd_pidns(pid))
3877 		goto err;
3878 
3879 	if (info) {
3880 		ret = copy_siginfo_from_user_any(&kinfo, info);
3881 		if (unlikely(ret))
3882 			goto err;
3883 
3884 		ret = -EINVAL;
3885 		if (unlikely(sig != kinfo.si_signo))
3886 			goto err;
3887 
3888 		/* Only allow sending arbitrary signals to yourself. */
3889 		ret = -EPERM;
3890 		if ((task_pid(current) != pid) &&
3891 		    (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3892 			goto err;
3893 	} else {
3894 		prepare_kill_siginfo(sig, &kinfo);
3895 	}
3896 
3897 	ret = kill_pid_info(sig, &kinfo, pid);
3898 
3899 err:
3900 	fdput(f);
3901 	return ret;
3902 }
3903 
3904 static int
3905 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3906 {
3907 	struct task_struct *p;
3908 	int error = -ESRCH;
3909 
3910 	rcu_read_lock();
3911 	p = find_task_by_vpid(pid);
3912 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3913 		error = check_kill_permission(sig, info, p);
3914 		/*
3915 		 * The null signal is a permissions and process existence
3916 		 * probe.  No signal is actually delivered.
3917 		 */
3918 		if (!error && sig) {
3919 			error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3920 			/*
3921 			 * If lock_task_sighand() failed we pretend the task
3922 			 * dies after receiving the signal. The window is tiny,
3923 			 * and the signal is private anyway.
3924 			 */
3925 			if (unlikely(error == -ESRCH))
3926 				error = 0;
3927 		}
3928 	}
3929 	rcu_read_unlock();
3930 
3931 	return error;
3932 }
3933 
3934 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3935 {
3936 	struct kernel_siginfo info;
3937 
3938 	clear_siginfo(&info);
3939 	info.si_signo = sig;
3940 	info.si_errno = 0;
3941 	info.si_code = SI_TKILL;
3942 	info.si_pid = task_tgid_vnr(current);
3943 	info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3944 
3945 	return do_send_specific(tgid, pid, sig, &info);
3946 }
3947 
3948 /**
3949  *  sys_tgkill - send signal to one specific thread
3950  *  @tgid: the thread group ID of the thread
3951  *  @pid: the PID of the thread
3952  *  @sig: signal to be sent
3953  *
3954  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
3955  *  exists but it's not belonging to the target process anymore. This
3956  *  method solves the problem of threads exiting and PIDs getting reused.
3957  */
3958 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3959 {
3960 	/* This is only valid for single tasks */
3961 	if (pid <= 0 || tgid <= 0)
3962 		return -EINVAL;
3963 
3964 	return do_tkill(tgid, pid, sig);
3965 }
3966 
3967 /**
3968  *  sys_tkill - send signal to one specific task
3969  *  @pid: the PID of the task
3970  *  @sig: signal to be sent
3971  *
3972  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
3973  */
3974 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3975 {
3976 	/* This is only valid for single tasks */
3977 	if (pid <= 0)
3978 		return -EINVAL;
3979 
3980 	return do_tkill(0, pid, sig);
3981 }
3982 
3983 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3984 {
3985 	/* Not even root can pretend to send signals from the kernel.
3986 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3987 	 */
3988 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3989 	    (task_pid_vnr(current) != pid))
3990 		return -EPERM;
3991 
3992 	/* POSIX.1b doesn't mention process groups.  */
3993 	return kill_proc_info(sig, info, pid);
3994 }
3995 
3996 /**
3997  *  sys_rt_sigqueueinfo - send signal information to a signal
3998  *  @pid: the PID of the thread
3999  *  @sig: signal to be sent
4000  *  @uinfo: signal info to be sent
4001  */
4002 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4003 		siginfo_t __user *, uinfo)
4004 {
4005 	kernel_siginfo_t info;
4006 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4007 	if (unlikely(ret))
4008 		return ret;
4009 	return do_rt_sigqueueinfo(pid, sig, &info);
4010 }
4011 
4012 #ifdef CONFIG_COMPAT
4013 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4014 			compat_pid_t, pid,
4015 			int, sig,
4016 			struct compat_siginfo __user *, uinfo)
4017 {
4018 	kernel_siginfo_t info;
4019 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4020 	if (unlikely(ret))
4021 		return ret;
4022 	return do_rt_sigqueueinfo(pid, sig, &info);
4023 }
4024 #endif
4025 
4026 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4027 {
4028 	/* This is only valid for single tasks */
4029 	if (pid <= 0 || tgid <= 0)
4030 		return -EINVAL;
4031 
4032 	/* Not even root can pretend to send signals from the kernel.
4033 	 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4034 	 */
4035 	if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4036 	    (task_pid_vnr(current) != pid))
4037 		return -EPERM;
4038 
4039 	return do_send_specific(tgid, pid, sig, info);
4040 }
4041 
4042 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4043 		siginfo_t __user *, uinfo)
4044 {
4045 	kernel_siginfo_t info;
4046 	int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4047 	if (unlikely(ret))
4048 		return ret;
4049 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4050 }
4051 
4052 #ifdef CONFIG_COMPAT
4053 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4054 			compat_pid_t, tgid,
4055 			compat_pid_t, pid,
4056 			int, sig,
4057 			struct compat_siginfo __user *, uinfo)
4058 {
4059 	kernel_siginfo_t info;
4060 	int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4061 	if (unlikely(ret))
4062 		return ret;
4063 	return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4064 }
4065 #endif
4066 
4067 /*
4068  * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4069  */
4070 void kernel_sigaction(int sig, __sighandler_t action)
4071 {
4072 	spin_lock_irq(&current->sighand->siglock);
4073 	current->sighand->action[sig - 1].sa.sa_handler = action;
4074 	if (action == SIG_IGN) {
4075 		sigset_t mask;
4076 
4077 		sigemptyset(&mask);
4078 		sigaddset(&mask, sig);
4079 
4080 		flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4081 		flush_sigqueue_mask(&mask, &current->pending);
4082 		recalc_sigpending();
4083 	}
4084 	spin_unlock_irq(&current->sighand->siglock);
4085 }
4086 EXPORT_SYMBOL(kernel_sigaction);
4087 
4088 void __weak sigaction_compat_abi(struct k_sigaction *act,
4089 		struct k_sigaction *oact)
4090 {
4091 }
4092 
4093 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4094 {
4095 	struct task_struct *p = current, *t;
4096 	struct k_sigaction *k;
4097 	sigset_t mask;
4098 
4099 	if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4100 		return -EINVAL;
4101 
4102 	k = &p->sighand->action[sig-1];
4103 
4104 	spin_lock_irq(&p->sighand->siglock);
4105 	if (oact)
4106 		*oact = *k;
4107 
4108 	/*
4109 	 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4110 	 * e.g. by having an architecture use the bit in their uapi.
4111 	 */
4112 	BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4113 
4114 	/*
4115 	 * Clear unknown flag bits in order to allow userspace to detect missing
4116 	 * support for flag bits and to allow the kernel to use non-uapi bits
4117 	 * internally.
4118 	 */
4119 	if (act)
4120 		act->sa.sa_flags &= UAPI_SA_FLAGS;
4121 	if (oact)
4122 		oact->sa.sa_flags &= UAPI_SA_FLAGS;
4123 
4124 	sigaction_compat_abi(act, oact);
4125 
4126 	if (act) {
4127 		sigdelsetmask(&act->sa.sa_mask,
4128 			      sigmask(SIGKILL) | sigmask(SIGSTOP));
4129 		*k = *act;
4130 		/*
4131 		 * POSIX 3.3.1.3:
4132 		 *  "Setting a signal action to SIG_IGN for a signal that is
4133 		 *   pending shall cause the pending signal to be discarded,
4134 		 *   whether or not it is blocked."
4135 		 *
4136 		 *  "Setting a signal action to SIG_DFL for a signal that is
4137 		 *   pending and whose default action is to ignore the signal
4138 		 *   (for example, SIGCHLD), shall cause the pending signal to
4139 		 *   be discarded, whether or not it is blocked"
4140 		 */
4141 		if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4142 			sigemptyset(&mask);
4143 			sigaddset(&mask, sig);
4144 			flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4145 			for_each_thread(p, t)
4146 				flush_sigqueue_mask(&mask, &t->pending);
4147 		}
4148 	}
4149 
4150 	spin_unlock_irq(&p->sighand->siglock);
4151 	return 0;
4152 }
4153 
4154 static int
4155 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4156 		size_t min_ss_size)
4157 {
4158 	struct task_struct *t = current;
4159 
4160 	if (oss) {
4161 		memset(oss, 0, sizeof(stack_t));
4162 		oss->ss_sp = (void __user *) t->sas_ss_sp;
4163 		oss->ss_size = t->sas_ss_size;
4164 		oss->ss_flags = sas_ss_flags(sp) |
4165 			(current->sas_ss_flags & SS_FLAG_BITS);
4166 	}
4167 
4168 	if (ss) {
4169 		void __user *ss_sp = ss->ss_sp;
4170 		size_t ss_size = ss->ss_size;
4171 		unsigned ss_flags = ss->ss_flags;
4172 		int ss_mode;
4173 
4174 		if (unlikely(on_sig_stack(sp)))
4175 			return -EPERM;
4176 
4177 		ss_mode = ss_flags & ~SS_FLAG_BITS;
4178 		if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4179 				ss_mode != 0))
4180 			return -EINVAL;
4181 
4182 		if (ss_mode == SS_DISABLE) {
4183 			ss_size = 0;
4184 			ss_sp = NULL;
4185 		} else {
4186 			if (unlikely(ss_size < min_ss_size))
4187 				return -ENOMEM;
4188 		}
4189 
4190 		t->sas_ss_sp = (unsigned long) ss_sp;
4191 		t->sas_ss_size = ss_size;
4192 		t->sas_ss_flags = ss_flags;
4193 	}
4194 	return 0;
4195 }
4196 
4197 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4198 {
4199 	stack_t new, old;
4200 	int err;
4201 	if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4202 		return -EFAULT;
4203 	err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4204 			      current_user_stack_pointer(),
4205 			      MINSIGSTKSZ);
4206 	if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4207 		err = -EFAULT;
4208 	return err;
4209 }
4210 
4211 int restore_altstack(const stack_t __user *uss)
4212 {
4213 	stack_t new;
4214 	if (copy_from_user(&new, uss, sizeof(stack_t)))
4215 		return -EFAULT;
4216 	(void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4217 			     MINSIGSTKSZ);
4218 	/* squash all but EFAULT for now */
4219 	return 0;
4220 }
4221 
4222 int __save_altstack(stack_t __user *uss, unsigned long sp)
4223 {
4224 	struct task_struct *t = current;
4225 	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4226 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4227 		__put_user(t->sas_ss_size, &uss->ss_size);
4228 	return err;
4229 }
4230 
4231 #ifdef CONFIG_COMPAT
4232 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4233 				 compat_stack_t __user *uoss_ptr)
4234 {
4235 	stack_t uss, uoss;
4236 	int ret;
4237 
4238 	if (uss_ptr) {
4239 		compat_stack_t uss32;
4240 		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4241 			return -EFAULT;
4242 		uss.ss_sp = compat_ptr(uss32.ss_sp);
4243 		uss.ss_flags = uss32.ss_flags;
4244 		uss.ss_size = uss32.ss_size;
4245 	}
4246 	ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4247 			     compat_user_stack_pointer(),
4248 			     COMPAT_MINSIGSTKSZ);
4249 	if (ret >= 0 && uoss_ptr)  {
4250 		compat_stack_t old;
4251 		memset(&old, 0, sizeof(old));
4252 		old.ss_sp = ptr_to_compat(uoss.ss_sp);
4253 		old.ss_flags = uoss.ss_flags;
4254 		old.ss_size = uoss.ss_size;
4255 		if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4256 			ret = -EFAULT;
4257 	}
4258 	return ret;
4259 }
4260 
4261 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4262 			const compat_stack_t __user *, uss_ptr,
4263 			compat_stack_t __user *, uoss_ptr)
4264 {
4265 	return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4266 }
4267 
4268 int compat_restore_altstack(const compat_stack_t __user *uss)
4269 {
4270 	int err = do_compat_sigaltstack(uss, NULL);
4271 	/* squash all but -EFAULT for now */
4272 	return err == -EFAULT ? err : 0;
4273 }
4274 
4275 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4276 {
4277 	int err;
4278 	struct task_struct *t = current;
4279 	err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4280 			 &uss->ss_sp) |
4281 		__put_user(t->sas_ss_flags, &uss->ss_flags) |
4282 		__put_user(t->sas_ss_size, &uss->ss_size);
4283 	return err;
4284 }
4285 #endif
4286 
4287 #ifdef __ARCH_WANT_SYS_SIGPENDING
4288 
4289 /**
4290  *  sys_sigpending - examine pending signals
4291  *  @uset: where mask of pending signal is returned
4292  */
4293 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4294 {
4295 	sigset_t set;
4296 
4297 	if (sizeof(old_sigset_t) > sizeof(*uset))
4298 		return -EINVAL;
4299 
4300 	do_sigpending(&set);
4301 
4302 	if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4303 		return -EFAULT;
4304 
4305 	return 0;
4306 }
4307 
4308 #ifdef CONFIG_COMPAT
4309 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4310 {
4311 	sigset_t set;
4312 
4313 	do_sigpending(&set);
4314 
4315 	return put_user(set.sig[0], set32);
4316 }
4317 #endif
4318 
4319 #endif
4320 
4321 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4322 /**
4323  *  sys_sigprocmask - examine and change blocked signals
4324  *  @how: whether to add, remove, or set signals
4325  *  @nset: signals to add or remove (if non-null)
4326  *  @oset: previous value of signal mask if non-null
4327  *
4328  * Some platforms have their own version with special arguments;
4329  * others support only sys_rt_sigprocmask.
4330  */
4331 
4332 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4333 		old_sigset_t __user *, oset)
4334 {
4335 	old_sigset_t old_set, new_set;
4336 	sigset_t new_blocked;
4337 
4338 	old_set = current->blocked.sig[0];
4339 
4340 	if (nset) {
4341 		if (copy_from_user(&new_set, nset, sizeof(*nset)))
4342 			return -EFAULT;
4343 
4344 		new_blocked = current->blocked;
4345 
4346 		switch (how) {
4347 		case SIG_BLOCK:
4348 			sigaddsetmask(&new_blocked, new_set);
4349 			break;
4350 		case SIG_UNBLOCK:
4351 			sigdelsetmask(&new_blocked, new_set);
4352 			break;
4353 		case SIG_SETMASK:
4354 			new_blocked.sig[0] = new_set;
4355 			break;
4356 		default:
4357 			return -EINVAL;
4358 		}
4359 
4360 		set_current_blocked(&new_blocked);
4361 	}
4362 
4363 	if (oset) {
4364 		if (copy_to_user(oset, &old_set, sizeof(*oset)))
4365 			return -EFAULT;
4366 	}
4367 
4368 	return 0;
4369 }
4370 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4371 
4372 #ifndef CONFIG_ODD_RT_SIGACTION
4373 /**
4374  *  sys_rt_sigaction - alter an action taken by a process
4375  *  @sig: signal to be sent
4376  *  @act: new sigaction
4377  *  @oact: used to save the previous sigaction
4378  *  @sigsetsize: size of sigset_t type
4379  */
4380 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4381 		const struct sigaction __user *, act,
4382 		struct sigaction __user *, oact,
4383 		size_t, sigsetsize)
4384 {
4385 	struct k_sigaction new_sa, old_sa;
4386 	int ret;
4387 
4388 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4389 	if (sigsetsize != sizeof(sigset_t))
4390 		return -EINVAL;
4391 
4392 	if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4393 		return -EFAULT;
4394 
4395 	ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4396 	if (ret)
4397 		return ret;
4398 
4399 	if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4400 		return -EFAULT;
4401 
4402 	return 0;
4403 }
4404 #ifdef CONFIG_COMPAT
4405 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4406 		const struct compat_sigaction __user *, act,
4407 		struct compat_sigaction __user *, oact,
4408 		compat_size_t, sigsetsize)
4409 {
4410 	struct k_sigaction new_ka, old_ka;
4411 #ifdef __ARCH_HAS_SA_RESTORER
4412 	compat_uptr_t restorer;
4413 #endif
4414 	int ret;
4415 
4416 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4417 	if (sigsetsize != sizeof(compat_sigset_t))
4418 		return -EINVAL;
4419 
4420 	if (act) {
4421 		compat_uptr_t handler;
4422 		ret = get_user(handler, &act->sa_handler);
4423 		new_ka.sa.sa_handler = compat_ptr(handler);
4424 #ifdef __ARCH_HAS_SA_RESTORER
4425 		ret |= get_user(restorer, &act->sa_restorer);
4426 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4427 #endif
4428 		ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4429 		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4430 		if (ret)
4431 			return -EFAULT;
4432 	}
4433 
4434 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4435 	if (!ret && oact) {
4436 		ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4437 			       &oact->sa_handler);
4438 		ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4439 					 sizeof(oact->sa_mask));
4440 		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4441 #ifdef __ARCH_HAS_SA_RESTORER
4442 		ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4443 				&oact->sa_restorer);
4444 #endif
4445 	}
4446 	return ret;
4447 }
4448 #endif
4449 #endif /* !CONFIG_ODD_RT_SIGACTION */
4450 
4451 #ifdef CONFIG_OLD_SIGACTION
4452 SYSCALL_DEFINE3(sigaction, int, sig,
4453 		const struct old_sigaction __user *, act,
4454 	        struct old_sigaction __user *, oact)
4455 {
4456 	struct k_sigaction new_ka, old_ka;
4457 	int ret;
4458 
4459 	if (act) {
4460 		old_sigset_t mask;
4461 		if (!access_ok(act, sizeof(*act)) ||
4462 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4463 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4464 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4465 		    __get_user(mask, &act->sa_mask))
4466 			return -EFAULT;
4467 #ifdef __ARCH_HAS_KA_RESTORER
4468 		new_ka.ka_restorer = NULL;
4469 #endif
4470 		siginitset(&new_ka.sa.sa_mask, mask);
4471 	}
4472 
4473 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4474 
4475 	if (!ret && oact) {
4476 		if (!access_ok(oact, sizeof(*oact)) ||
4477 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4478 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4479 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4480 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4481 			return -EFAULT;
4482 	}
4483 
4484 	return ret;
4485 }
4486 #endif
4487 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4488 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4489 		const struct compat_old_sigaction __user *, act,
4490 	        struct compat_old_sigaction __user *, oact)
4491 {
4492 	struct k_sigaction new_ka, old_ka;
4493 	int ret;
4494 	compat_old_sigset_t mask;
4495 	compat_uptr_t handler, restorer;
4496 
4497 	if (act) {
4498 		if (!access_ok(act, sizeof(*act)) ||
4499 		    __get_user(handler, &act->sa_handler) ||
4500 		    __get_user(restorer, &act->sa_restorer) ||
4501 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4502 		    __get_user(mask, &act->sa_mask))
4503 			return -EFAULT;
4504 
4505 #ifdef __ARCH_HAS_KA_RESTORER
4506 		new_ka.ka_restorer = NULL;
4507 #endif
4508 		new_ka.sa.sa_handler = compat_ptr(handler);
4509 		new_ka.sa.sa_restorer = compat_ptr(restorer);
4510 		siginitset(&new_ka.sa.sa_mask, mask);
4511 	}
4512 
4513 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4514 
4515 	if (!ret && oact) {
4516 		if (!access_ok(oact, sizeof(*oact)) ||
4517 		    __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4518 			       &oact->sa_handler) ||
4519 		    __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4520 			       &oact->sa_restorer) ||
4521 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4522 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4523 			return -EFAULT;
4524 	}
4525 	return ret;
4526 }
4527 #endif
4528 
4529 #ifdef CONFIG_SGETMASK_SYSCALL
4530 
4531 /*
4532  * For backwards compatibility.  Functionality superseded by sigprocmask.
4533  */
4534 SYSCALL_DEFINE0(sgetmask)
4535 {
4536 	/* SMP safe */
4537 	return current->blocked.sig[0];
4538 }
4539 
4540 SYSCALL_DEFINE1(ssetmask, int, newmask)
4541 {
4542 	int old = current->blocked.sig[0];
4543 	sigset_t newset;
4544 
4545 	siginitset(&newset, newmask);
4546 	set_current_blocked(&newset);
4547 
4548 	return old;
4549 }
4550 #endif /* CONFIG_SGETMASK_SYSCALL */
4551 
4552 #ifdef __ARCH_WANT_SYS_SIGNAL
4553 /*
4554  * For backwards compatibility.  Functionality superseded by sigaction.
4555  */
4556 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4557 {
4558 	struct k_sigaction new_sa, old_sa;
4559 	int ret;
4560 
4561 	new_sa.sa.sa_handler = handler;
4562 	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4563 	sigemptyset(&new_sa.sa.sa_mask);
4564 
4565 	ret = do_sigaction(sig, &new_sa, &old_sa);
4566 
4567 	return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4568 }
4569 #endif /* __ARCH_WANT_SYS_SIGNAL */
4570 
4571 #ifdef __ARCH_WANT_SYS_PAUSE
4572 
4573 SYSCALL_DEFINE0(pause)
4574 {
4575 	while (!signal_pending(current)) {
4576 		__set_current_state(TASK_INTERRUPTIBLE);
4577 		schedule();
4578 	}
4579 	return -ERESTARTNOHAND;
4580 }
4581 
4582 #endif
4583 
4584 static int sigsuspend(sigset_t *set)
4585 {
4586 	current->saved_sigmask = current->blocked;
4587 	set_current_blocked(set);
4588 
4589 	while (!signal_pending(current)) {
4590 		__set_current_state(TASK_INTERRUPTIBLE);
4591 		schedule();
4592 	}
4593 	set_restore_sigmask();
4594 	return -ERESTARTNOHAND;
4595 }
4596 
4597 /**
4598  *  sys_rt_sigsuspend - replace the signal mask for a value with the
4599  *	@unewset value until a signal is received
4600  *  @unewset: new signal mask value
4601  *  @sigsetsize: size of sigset_t type
4602  */
4603 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4604 {
4605 	sigset_t newset;
4606 
4607 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4608 	if (sigsetsize != sizeof(sigset_t))
4609 		return -EINVAL;
4610 
4611 	if (copy_from_user(&newset, unewset, sizeof(newset)))
4612 		return -EFAULT;
4613 	return sigsuspend(&newset);
4614 }
4615 
4616 #ifdef CONFIG_COMPAT
4617 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4618 {
4619 	sigset_t newset;
4620 
4621 	/* XXX: Don't preclude handling different sized sigset_t's.  */
4622 	if (sigsetsize != sizeof(sigset_t))
4623 		return -EINVAL;
4624 
4625 	if (get_compat_sigset(&newset, unewset))
4626 		return -EFAULT;
4627 	return sigsuspend(&newset);
4628 }
4629 #endif
4630 
4631 #ifdef CONFIG_OLD_SIGSUSPEND
4632 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4633 {
4634 	sigset_t blocked;
4635 	siginitset(&blocked, mask);
4636 	return sigsuspend(&blocked);
4637 }
4638 #endif
4639 #ifdef CONFIG_OLD_SIGSUSPEND3
4640 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4641 {
4642 	sigset_t blocked;
4643 	siginitset(&blocked, mask);
4644 	return sigsuspend(&blocked);
4645 }
4646 #endif
4647 
4648 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4649 {
4650 	return NULL;
4651 }
4652 
4653 static inline void siginfo_buildtime_checks(void)
4654 {
4655 	BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4656 
4657 	/* Verify the offsets in the two siginfos match */
4658 #define CHECK_OFFSET(field) \
4659 	BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4660 
4661 	/* kill */
4662 	CHECK_OFFSET(si_pid);
4663 	CHECK_OFFSET(si_uid);
4664 
4665 	/* timer */
4666 	CHECK_OFFSET(si_tid);
4667 	CHECK_OFFSET(si_overrun);
4668 	CHECK_OFFSET(si_value);
4669 
4670 	/* rt */
4671 	CHECK_OFFSET(si_pid);
4672 	CHECK_OFFSET(si_uid);
4673 	CHECK_OFFSET(si_value);
4674 
4675 	/* sigchld */
4676 	CHECK_OFFSET(si_pid);
4677 	CHECK_OFFSET(si_uid);
4678 	CHECK_OFFSET(si_status);
4679 	CHECK_OFFSET(si_utime);
4680 	CHECK_OFFSET(si_stime);
4681 
4682 	/* sigfault */
4683 	CHECK_OFFSET(si_addr);
4684 	CHECK_OFFSET(si_trapno);
4685 	CHECK_OFFSET(si_addr_lsb);
4686 	CHECK_OFFSET(si_lower);
4687 	CHECK_OFFSET(si_upper);
4688 	CHECK_OFFSET(si_pkey);
4689 	CHECK_OFFSET(si_perf_data);
4690 	CHECK_OFFSET(si_perf_type);
4691 
4692 	/* sigpoll */
4693 	CHECK_OFFSET(si_band);
4694 	CHECK_OFFSET(si_fd);
4695 
4696 	/* sigsys */
4697 	CHECK_OFFSET(si_call_addr);
4698 	CHECK_OFFSET(si_syscall);
4699 	CHECK_OFFSET(si_arch);
4700 #undef CHECK_OFFSET
4701 
4702 	/* usb asyncio */
4703 	BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4704 		     offsetof(struct siginfo, si_addr));
4705 	if (sizeof(int) == sizeof(void __user *)) {
4706 		BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4707 			     sizeof(void __user *));
4708 	} else {
4709 		BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4710 			      sizeof_field(struct siginfo, si_uid)) !=
4711 			     sizeof(void __user *));
4712 		BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4713 			     offsetof(struct siginfo, si_uid));
4714 	}
4715 #ifdef CONFIG_COMPAT
4716 	BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4717 		     offsetof(struct compat_siginfo, si_addr));
4718 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4719 		     sizeof(compat_uptr_t));
4720 	BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4721 		     sizeof_field(struct siginfo, si_pid));
4722 #endif
4723 }
4724 
4725 void __init signals_init(void)
4726 {
4727 	siginfo_buildtime_checks();
4728 
4729 	sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4730 }
4731 
4732 #ifdef CONFIG_KGDB_KDB
4733 #include <linux/kdb.h>
4734 /*
4735  * kdb_send_sig - Allows kdb to send signals without exposing
4736  * signal internals.  This function checks if the required locks are
4737  * available before calling the main signal code, to avoid kdb
4738  * deadlocks.
4739  */
4740 void kdb_send_sig(struct task_struct *t, int sig)
4741 {
4742 	static struct task_struct *kdb_prev_t;
4743 	int new_t, ret;
4744 	if (!spin_trylock(&t->sighand->siglock)) {
4745 		kdb_printf("Can't do kill command now.\n"
4746 			   "The sigmask lock is held somewhere else in "
4747 			   "kernel, try again later\n");
4748 		return;
4749 	}
4750 	new_t = kdb_prev_t != t;
4751 	kdb_prev_t = t;
4752 	if (!task_is_running(t) && new_t) {
4753 		spin_unlock(&t->sighand->siglock);
4754 		kdb_printf("Process is not RUNNING, sending a signal from "
4755 			   "kdb risks deadlock\n"
4756 			   "on the run queue locks. "
4757 			   "The signal has _not_ been sent.\n"
4758 			   "Reissue the kill command if you want to risk "
4759 			   "the deadlock.\n");
4760 		return;
4761 	}
4762 	ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4763 	spin_unlock(&t->sighand->siglock);
4764 	if (ret)
4765 		kdb_printf("Fail to deliver Signal %d to process %d.\n",
4766 			   sig, t->pid);
4767 	else
4768 		kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4769 }
4770 #endif	/* CONFIG_KGDB_KDB */
4771